diff --git a/mmf/configs/defaults.yaml b/mmf/configs/defaults.yaml index ab6c8625b..73d63fab9 100644 --- a/mmf/configs/defaults.yaml +++ b/mmf/configs/defaults.yaml @@ -45,11 +45,26 @@ training: wandb: # Whether to use Weights and Biases Logger, (Default: false) enabled: false + # An entity is a username or team name where you're sending runs. + # This is necessary if you want to log your metrics to a team account. By default + # it will log the run to your user account. + entity: null # Project name to be used while logging the experiment with wandb - wandb_projectname: mmf_${oc.env:USER,} + project: mmf # Experiment/ run name to be used while logging the experiment # under the project with wandb - wandb_runname: ${training.experiment_name} + name: ${training.experiment_name} + # You can save your model checkpoints as W&B Artifacts for model versioning. + # Set the value to `true` to enable this feature. + log_checkpoint: false + # Set the evaluation prediction report as W&B Tables. + log_tables: false + # Specify other argument values that you want to pass to wandb.init(). Check out the documentation + # at https://docs.wandb.ai/ref/python/init to see what arguments are available. + # job_type: 'train' + # tags: ['tag1', 'tag2'] + + # Size of the batch globally. If distributed or data_parallel # is used, this will be divided equally among GPUs diff --git a/mmf/trainers/callbacks/logistics.py b/mmf/trainers/callbacks/logistics.py index 70b2ef05d..0804f5218 100644 --- a/mmf/trainers/callbacks/logistics.py +++ b/mmf/trainers/callbacks/logistics.py @@ -58,11 +58,10 @@ def __init__(self, config, trainer): if env_wandb_logdir: log_dir = env_wandb_logdir - wandb_projectname = config.training.wandb.wandb_projectname - wandb_runname = config.training.wandb.wandb_runname - self.wandb_logger = WandbLogger( - name=wandb_runname, save_dir=log_dir, project=wandb_projectname + entity=config.training.wandb.entity, + config=config, + project=config.training.wandb.project, ) def on_train_start(self): @@ -153,6 +152,7 @@ def on_test_end(self, **kwargs): meter=kwargs["meter"], should_print=prefix, tb_writer=self.tb_writer, + wandb_logger=self.wandb_logger, ) logger.info(f"Finished run in {self.total_timer.get_time_since_start()}") diff --git a/mmf/trainers/core/evaluation_loop.py b/mmf/trainers/core/evaluation_loop.py index 17d3f554a..cfd063e80 100644 --- a/mmf/trainers/core/evaluation_loop.py +++ b/mmf/trainers/core/evaluation_loop.py @@ -144,6 +144,12 @@ def prediction_loop(self, dataset_type: str) -> None: reporter.postprocess_dataset_report() + # Log the prediction report as W&B Tables + if self.config.training.wandb.log_tables: + self.logistics_callback.wandb_logger.log_prediction_report( + reporter.report, reporter.current_datamodule.dataset_name + ) + logger.info(f"Finished predicting. Loaded {loaded_batches}") logger.info(f" -- skipped {skipped_batches} batches.") self.model.train() diff --git a/mmf/utils/checkpoint.py b/mmf/utils/checkpoint.py index 5c3dd3944..e583f8394 100644 --- a/mmf/utils/checkpoint.py +++ b/mmf/utils/checkpoint.py @@ -522,6 +522,7 @@ def save(self, update, iteration=None, update_best=False): best_metric = ( self.trainer.early_stop_callback.early_stopping.best_monitored_value ) + model = self.trainer.model data_parallel = registry.get("data_parallel") or registry.get("distributed") fp16_scaler = getattr(self.trainer, "scaler", None) @@ -574,6 +575,15 @@ def save(self, update, iteration=None, update_best=False): with open_if_main(current_ckpt_filepath, "wb") as f: self.save_func(ckpt, f) + # Save the current checkpoint as W&B artifacts for model versioning. + if self.config.training.wandb.log_checkpoint: + logger.info( + "Saving current checkpoint as W&B Artifacts for model versioning" + ) + self.trainer.logistics_callback.wandb_logger.log_model_checkpoint( + current_ckpt_filepath + ) + # Remove old checkpoints if max_to_keep is set # In XLA, only delete checkpoint files in main process if self.max_to_keep > 0 and is_main(): diff --git a/mmf/utils/logger.py b/mmf/utils/logger.py index a82696f97..100174c75 100644 --- a/mmf/utils/logger.py +++ b/mmf/utils/logger.py @@ -225,6 +225,12 @@ def summarize_report( if not is_main() and not is_xla(): return + # Log the learning rate if available + if wandb_logger and "lr" in extra: + wandb_logger.log_metrics( + {"train/learning_rate": float(extra["lr"])}, commit=False + ) + if tb_writer: scalar_dict = meter.get_scalar_dict() tb_writer.add_scalars(scalar_dict, current_iteration) @@ -395,10 +401,9 @@ class WandbLogger: Log using `Weights and Biases`. Args: - name: Display name for the run. - save_dir: Path where data is saved (./save/logs/wandb/ by default). - project: Display name for the project. - **init_kwargs: Arguments passed to :func:`wandb.init`. + entity: An entity is a username or team name where you're sending runs. + config: Configuration for the run. + project: Name of the W&B project. Raises: ImportError: If wandb package is not installed. @@ -406,10 +411,9 @@ class WandbLogger: def __init__( self, - name: Optional[str] = None, - save_dir: Optional[str] = None, + entity: Optional[str] = None, + config: Optional[Dict] = None, project: Optional[str] = None, - **init_kwargs, ): try: import wandb @@ -420,10 +424,14 @@ def __init__( ) self._wandb = wandb - - self._wandb_init = dict(name=name, project=project, dir=save_dir) - - self._wandb_init.update(**init_kwargs) + self._wandb_init = dict(entity=entity, config=config, project=project) + wandb_kwargs = dict(config.training.wandb) + wandb_kwargs.pop("enabled") + wandb_kwargs.pop("entity") + wandb_kwargs.pop("project") + wandb_kwargs.pop("log_checkpoint") + wandb_kwargs.pop("log_tables") + self._wandb_init.update(**wandb_kwargs) self.setup() @@ -453,14 +461,51 @@ def _should_log_wandb(self): else: return True - def log_metrics(self, metrics: Dict[str, float]): + def log_metrics(self, metrics: Dict[str, float], commit=True): """ Log the monitored metrics to the wand dashboard. Args: - metrics (Dict[str, float]): [description] + metrics (Dict[str, float]): A dictionary of metrics to log. + commit (bool): Save the metrics dict to the wandb server and + increment the step. (default: True) + """ + if not self._should_log_wandb(): + return + + self._wandb.log(metrics, commit=commit) + + def log_model_checkpoint(self, model_path): + """ + Log the model checkpoint to the wandb dashboard. + + Args: + model_path (str): Path to the model file. """ if not self._should_log_wandb(): return - self._wandb.log(metrics) + model_artifact = self._wandb.Artifact( + "run_" + self._wandb.run.id + "_model", type="model" + ) + + model_artifact.add_file(model_path, name="current.pt") + self._wandb.log_artifact(model_artifact, aliases=["latest"]) + + def log_prediction_report(self, report, dataset_name): + """ + Log the prediction report as W&B Tables for better comparison. + Args: + report: Prediction report to log. + dataset_name: Name of the dataset. + """ + if not self._should_log_wandb(): + return + + columns = list(report[0].keys()) + data_at = self._wandb.Table(columns=columns) + + for item in report: + data_at.add_data(*item.values()) + + self._wandb.log({f"pred_table_{dataset_name}": data_at}) diff --git a/website/docs/notes/logging.md b/website/docs/notes/logging.md index ebec33902..dee7f2828 100644 --- a/website/docs/notes/logging.md +++ b/website/docs/notes/logging.md @@ -1,42 +1,75 @@ --- -id: concepts -title: Terminology and Concepts -sidebar_label: Terminology and Concepts +id: logger +title: Weights and Biases Logging +sidebar_label: Weights and Biases Logging --- ## Weights and Biases Logger -MMF has a `WandbLogger` class which lets the user to log their model's progress using [Weights and Biases](https://gitbook-docs.wandb.ai/). +MMF now has a `WandbLogger` class which lets the user to log their model's progress using [Weights and Biases](https://wandb.ai/site). Enable this logger to automatically log the training/validation metrics, system (GPU and CPU) metrics and configuration parameters. + +## First time setup To set up wandb, run the following: ``` pip install wandb +``` +In order to log anything to the W&B server you need to authenticate the machine with W&B **API key**. You can create a new account by going to https://wandb.ai/signup which will generate an API key. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. You only need to supply your key once, and then it is remembered on the same device. + +``` wandb login ``` +## W&B config parameters + The following options are available in config to enable and customize the wandb logging: ```yaml training: # Weights and Biases control, by default Weights and Biases (wandb) is disabled wandb: # Whether to use Weights and Biases Logger, (Default: false) - enabled: false + enabled: true + # An entity is a username or team name where you're sending runs. + # This is necessary if you want to log your metrics to a team account. By default + # it will log the run to your user account. + entity: null # Project name to be used while logging the experiment with wandb - wandb_projectname: mmf_${oc.env:USER} + project: mmf # Experiment/ run name to be used while logging the experiment # under the project with wandb - wandb_runname: ${training.experiment_name} + name: ${training.experiment_name} + # Specify other argument values that you want to pass to wandb.init(). Check out the documentation + # at https://docs.wandb.ai/ref/python/init to see what arguments are available. + # job_type: 'train' + # tags: ['tag1', 'tag2'] env: wandb_logdir: ${env:MMF_WANDB_LOGDIR,} ``` -To enable wandb logger the user needs to change the following option in the config. -`training.wandb.enabled=True` +* To enable wandb logger the user needs to change the following option in the config. + + `training.wandb.enabled=True` + +* To give the `entity` which is the name of the team or the username, the user needs to change the following option in the config. In case no `entity` is provided, the data will be logged to the `entity` set as default in the user's settings. + + `training.wandb.entity=` + +* To give the current experiment a project and run name, user should add these config options. The default project name is `mmf` and the default run name is `${training.experiment_name}`. + + `training.wandb.project=`
+ `training.wandb.name=` + +* To change the path to the directory where wandb metadata would be stored (Default: `env.log_dir`): + + `env.wandb_logdir=` -To give the current experiment a project and run name, user should add these config options. +* To provide extra arguments to `wandb.init()`, the user just needs to define them in the config file. Check out the documentation at https://docs.wandb.ai/ref/python/init to see what arguments are available. An example is shown in the config parameter shown above. Make sure to use the same key name in the config file as defined in the documentation. -`training.wandb.wandb_projectname= training.wandb.wandb_runname=` +## Current features -To change the path to the directory where wandb metadata would be stored (Default: `env.log_dir`): +The following features are currently supported by the `WandbLogger`: -`env.wandb_logdir=` +* Training & Validation metrics +* Learning Rate over time +* GPU: Type, GPU Utilization, power, temperature, CUDA memory usage +* Log configuration parameters