Skip to content

Commit f704bc5

Browse files
amyreesefacebook-github-bot
authored andcommitted
[codemod][pyfmt] apply Black 2024 style in fbcode (9/16)
Summary: Formats the covered files with pyfmt. paintitblack Reviewed By: aleivag Differential Revision: D54447729 fbshipit-source-id: fc781322b254f7027c24888cdadd5f1e90325ba8
1 parent f0f0845 commit f704bc5

File tree

9 files changed

+18
-24
lines changed

9 files changed

+18
-24
lines changed

mmf/common/registry.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ class Registry:
3535
r"""Class for registry object which acts as central source of truth
3636
for MMF
3737
"""
38+
3839
mapping = {
3940
# Mappings of builder name to their respective classes
4041
# Use `registry.register_builder` to register a builder class

mmf/datasets/builders/charades/_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@ def make_charades_df(csv_path, video_dir, classes_file):
2121

2222
# parse action labels
2323
df["action_labels"] = df["actions"].map(
24-
lambda x: [label.split(" ")[0] for label in x.split(";")]
25-
if pd.notnull(x)
26-
else []
24+
lambda x: (
25+
[label.split(" ")[0] for label in x.split(";")] if pd.notnull(x) else []
26+
)
2727
)
2828

2929
# load id to class map

mmf/datasets/databases/readers/feature_readers.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -275,9 +275,7 @@ def read(self, image_feat_path):
275275
tmp_image_feat = image_feat_bbox.item().get("image_feature")
276276
image_loc, image_dim = tmp_image_feat.shape
277277
tmp_image_feat_2 = np.zeros((self.max_loc, image_dim), dtype=np.float32)
278-
tmp_image_feat_2[
279-
0:image_loc,
280-
] = tmp_image_feat # noqa
278+
tmp_image_feat_2[0:image_loc,] = tmp_image_feat # noqa
281279
tmp_image_feat_2 = torch.from_numpy(tmp_image_feat_2)
282280
tmp_image_box = np.zeros((self.max_loc, 4), dtype=np.int32)
283281
tmp_image_box[0:image_loc] = image_boxes

mmf/datasets/multi_datamodule.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,9 @@ def __init__(self, config: DictConfig):
2929
self.batch_size = get_batch_size()
3030

3131
self.dataset_list: List[str] = dataset_list_from_config(self.config)
32-
self.datamodules: Dict[
33-
str, pl.LightningDataModule
34-
] = build_multiple_datamodules(self.dataset_list, self.config.dataset_config)
32+
self.datamodules: Dict[str, pl.LightningDataModule] = (
33+
build_multiple_datamodules(self.dataset_list, self.config.dataset_config)
34+
)
3535
self.train_loader: Optional[MultiDataLoader] = None
3636
self.val_loader: Optional[MultiDataLoader] = None
3737
self.test_loader: Optional[MultiDataLoader] = None

mmf/models/albef/vit.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -660,9 +660,7 @@ def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
660660
)
661661
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
662662
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
663-
print(
664-
"reshape position embedding from %d to %d" % (orig_size**2, new_size**2)
665-
)
663+
print("reshape position embedding from %d to %d" % (orig_size**2, new_size**2))
666664

667665
return new_pos_embed
668666
else:

mmf/models/krisp.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -123,9 +123,9 @@ def build(self):
123123
# Answer indices not in graph
124124
if self.config.output_combine == "add":
125125
self.missing_ans_inds = torch.LongTensor(self.config.num_labels).fill_(1)
126-
self.missing_ans_inds[
127-
self.graph_module.index_in_ans
128-
] = 0 # Now any index stil set to 1 is missing from graph
126+
self.missing_ans_inds[self.graph_module.index_in_ans] = (
127+
0 # Now any index stil set to 1 is missing from graph
128+
)
129129

130130
# Each model in MMF gets a dict called sample_list which contains
131131
# all of the necessary information returned from the image

mmf/models/visual_bert.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -459,9 +459,9 @@ def add_post_flatten_params(
459459
new_lm_labels = torch.ones_like(attention_mask) * -1
460460
size_masked_lm_labels = sample_list["masked_lm_labels"].size()
461461
assert len(size_masked_lm_labels) == 2
462-
new_lm_labels[
463-
: size_masked_lm_labels[0], : size_masked_lm_labels[1]
464-
] = sample_list["masked_lm_labels"]
462+
new_lm_labels[: size_masked_lm_labels[0], : size_masked_lm_labels[1]] = (
463+
sample_list["masked_lm_labels"]
464+
)
465465
sample_list["masked_lm_labels"] = new_lm_labels
466466

467467
return sample_list

mmf/modules/losses.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -880,7 +880,6 @@ def calc_ms_loss(pair, base, param, multiplier):
880880

881881
@registry.register_loss("refiner_ms")
882882
class RefinerMSLoss(nn.Module):
883-
884883
"""
885884
A Multi-Similarity loss between the decoder outputs of a given embedding size
886885
and its targets
@@ -949,7 +948,6 @@ def forward(self, sample_list, model_output):
949948

950949
@registry.register_loss("ms_loss")
951950
class MSLoss(nn.Module):
952-
953951
"""
954952
A Multi-Similarity loss between embeddings of similar and dissimilar
955953
labels is implemented here.
@@ -1049,7 +1047,6 @@ def forward(self, sample_list, model_output):
10491047

10501048
@registry.register_loss("refiner_contrastive_loss")
10511049
class RefinerContrastiveLoss(nn.Module):
1052-
10531050
"""
10541051
A contrastive loss between the decoder outputs of a given embedding size
10551052
and its targets

tests/models/transformers/test_heads.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -247,9 +247,9 @@ def setUp(self):
247247
)
248248
self.processed_sample_list = Sample()
249249
feat_targets = torch.zeros((bs, num_feat, img_dim))
250-
self.processed_sample_list[
251-
"mrfr_region_target"
252-
] = feat_targets.contiguous().view(-1, img_dim)
250+
self.processed_sample_list["mrfr_region_target"] = (
251+
feat_targets.contiguous().view(-1, img_dim)
252+
)
253253
self.processed_sample_list["mrfr_region_mask"] = torch.ones(
254254
(bs, num_feat)
255255
).bool()

0 commit comments

Comments
 (0)