Loggers package#

Submodules#

File Logger module#

class ablator.modules.loggers.file.FileLogger(path: Path | str | None = None, verbose: bool = True, prefix: str | None = None)[source]#

Bases: object

A logger that writes messages to a file and prints them to the console.

Attributes:
WARNINGstr

ANSI escape code for the warning text color.

FAILstr

ANSI escape code for the error text color.

ENDCstr

ANSI escape code for resetting the text color.

__init__(path: Path | str | None = None, verbose: bool = True, prefix: str | None = None)[source]#

Initialize a FileLogger.

Parameters:
pathstr | Path | None, optional

Path to the log file, by default None.

verbosebool, optional

Whether to print messages to the console, by default True.

prefixstr | None, optional

A prefix to add to each logged message, by default None.

error(msg: str)[source]#

Log an error message.

Parameters:
msgstr

The message to log.

info(msg: str, verbose=False)[source]#

Log an info message.

Parameters:
msgstr

The message to log.

verbosebool, optional

Whether to print messages to the console, by default False.

set_path(path: str | Path)[source]#

Set the path to the log file.

Parameters:
pathstr | Path

The path to the log file.

set_prefix(prefix: str | None = None)[source]#

Set the prefix for the logger.

Parameters:
prefixstr | None, optional

The prefix to add to each logged message, by default None.

warn(msg: str, verbose=True)[source]#

Log a warning message.

Parameters:
msgstr

The message to log.

verbosebool, optional

Whether to print messages to the console, by default True.

Main Logger module#

exception ablator.modules.loggers.main.DuplicateRunError[source]#

Bases: Exception

class ablator.modules.loggers.main.SummaryLogger(run_config: RunConfig, model_dir: Path | str | None = None, resume: bool = False, keep_n_checkpoints: int | None = None, verbose: bool = True)[source]#

Bases: object

A logger for training and evaluation summary.

Attributes:
SUMMARY_DIR_NAMEstr

Name of the summary directory.

RESULTS_JSON_NAMEstr

Name of the results JSON file.

LOG_FILE_NAMEstr

Name of the log file.

CONFIG_FILE_NAMEstr

Name of the configuration file.

METADATA_JSONstr

Name of the metadata JSON file.

CHKPT_DIR_NAMESlist[str]

List of checkpoint directory names.

CHKPT_DIR_VALUESlist[str]

List of checkpoint directory values.

CHKPT_DIRSdict[str, Path]

Dictionary containing checkpoint directories.

keep_n_checkpointsint

Number of checkpoints to keep.

log_iterationint

Current log iteration.

checkpoint_iterationdict[str, dict[str, int]]

checkpoint_iteration is a dictionary that keeps track of the checkpoint iterations for each directory. It is used in the checkpoint() method to determine the appropriate iteration number for the saved checkpoint.

log_file_pathPath | None

Path to the log file.

dashboardLoggerBase | None

Dashboard logger.

model_dirPath | None

the model directory.

result_json_pathPath | None

Path to the results JSON file.

__init__(run_config: RunConfig, model_dir: Path | str | None = None, resume: bool = False, keep_n_checkpoints: int | None = None, verbose: bool = True)[source]#

Initialize a SummaryLogger.

Parameters:
run_configRunConfig

The run configuration.

model_dirstr | None | Path, optional

Path to the model directory, by default None.

resumebool, optional

Whether to resume from an existing model directory, by default False.

keep_n_checkpointsint | None, optional

Number of checkpoints to keep, by default None.

verbosebool, optional

Whether to print messages to the console, by default True.

checkpoint(save_dict: object, file_name: str, itr: int | None = None, is_best: bool = False)[source]#

Save a checkpoint and update the checkpoint iteration

Saves the model checkpoint in the appropriate directory based on the is_best parameter. If is_best==True, the checkpoint is saved in the "best" directory, indicating the best performing model so far. Otherwise, the checkpoint is saved in the "recent" directory, representing the most recent checkpoint.

The file path for the checkpoint is constructed using the selected directory name ("best" or "recent"), and the file name with the format "{file_name}_{itr:010}.pt", where itr is the iteration number.

The checkpoint_iteration dictionary is updated with the current iteration number for each directory. If itr is not provided, the iteration number is increased by 1 each time a checkpoint is saved. Otherwise, the iteration number is set to the provided itr.

Parameters:
save_dictobject

The object to save.

file_namestr

The file name.

itrint | None, optional

The iteration, by default None. If not provided, the current iteration is incremented by 1.

is_bestbool, optional

Whether this is the best checkpoint, by default False.

Raises:
AssertionError

If the provided itr is not larger than the current iteration associated with the checkpoint.

clean_checkpoints(keep_n_checkpoints: int)[source]#

Clean up checkpoints and keep only the specified number of checkpoints.

Parameters:
keep_n_checkpointsint

Number of checkpoints to keep.

error(*args, **kwargs)[source]#

Log an error message to files and to console using the logger.

Parameters:
*args

Positional arguments passed to the logger’s error method.

**kwargs

Keyword arguments passed to the logger’s error method.

info(*args, **kwargs)[source]#

Log an info to files and to console message using the logger.

Parameters:
*args

Positional arguments passed to the logger’s info method.

**kwargs

Keyword arguments passed to the logger’s info method.

update(metrics: TrainMetrics | dict, itr: int | None = None)[source]#

Update the dashboard with the given metrics. write some metrics to json files and update the current metadata (log_iteration)

Parameters:
metricsUnion[TrainMetrics, dict]

The metrics to update.

itrOptional[int], optional

The iteration, by default None.

Raises:
AssertionError

If the iteration is not greater than the current iteration.

Notes

Attribute log_iteration is increased by 1 every time update() is called while training models.

warn(*args, **kwargs)[source]#

Log a warning message to files and to console using the logger.

Parameters:
*args

Positional arguments passed to the logger’s warn method.

**kwargs

Keyword arguments passed to the logger’s warn method.

Tensorboard Logger module#

class ablator.modules.loggers.tensor.TensorboardLogger(summary_dir: str | Path)[source]#

Bases: LoggerBase

A logger class for Tensorboard visualization.

Attributes:
summary_dirUnion[str, Path]

The directory to store the Tensorboard summary files.

backend_loggerSummaryWriter

The PyTorch Tensorboard SummaryWriter object used to log data.

__init__(summary_dir: str | Path)[source]#

Initialize the TensorboardLogger with a summary directory.

Parameters:
summary_dirUnion[str, Path]

The directory to store the Tensorboard summary files.

add_image(k, v, itr, dataformats='CHW')[source]#

Add an image to the TensorBoard dashboard.

Parameters:
kstr

The tag associated with the image.

vnp.ndarray

The image data.

itrint

The iteration number.

dataformatsstr, optional

The format of the image data, by default "CHW".

add_scalar(k, v, itr)[source]#

Add a scalar to the TensorBoard dashboard.

Parameters:
kstr

The tag associated with the scalar.

vfloat | int

The scalar value.

itrint

The iteration number.

add_scalars(k, v: dict[str, float | int], itr)[source]#

Add multiple scalars to the TensorBoard dashboard.

Parameters:
kstr

The main tag associated with the scalars.

vdict[str, float | int]

A dictionary of scalar tags and values.

itrint

The iteration number.

add_table(k, v: DataFrame, itr)[source]#

Add a table to the TensorBoard dashboard.

Parameters:
kstr

The tag associated with the table.

vpd.DataFrame

The table data.

itrint

The iteration number.

add_text(k, v, itr)[source]#

Add a text to the TensorBoard dashboard.

Parameters:
kstr

The tag associated with the text.

vstr

The text data.

itrint

The iteration number.

write_config(config: ConfigBase)[source]#

Write the configuration to the TensorBoard dashboard.

Parameters:
configConfigBase

The configuration object.

Module contents#

class ablator.modules.loggers.LoggerBase[source]#

Bases: ABC

class ablator.modules.loggers.LoggerConfig(*args, **kwargs)[source]#

Bases: ConfigBase

config_class#

alias of LoggerConfig