We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 431819d commit fa188f6Copy full SHA for fa188f6
2 files changed
tests/helpers.py
@@ -22,7 +22,7 @@ def torch_save_to_buffer(obj):
22
23
def torch_load_from_buffer(buffer):
24
buffer.seek(0)
25
- obj = torch.load(buffer)
+ obj = torch.load(buffer, weights_only=False)
26
27
return obj
28
tests/test_linear8bitlt.py
@@ -118,7 +118,7 @@ def test_linear_serialization(
118
if not has_fp16_weights:
119
assert os.path.getsize(state_path_8bit) < 0.5 * os.path.getsize(state_path)
120
121
- new_state_dict = torch.load(state_path_8bit)
+ new_state_dict = torch.load(state_path_8bit, weights_only=False)
122
123
new_linear_custom = Linear8bitLt(
124
linear.in_features,
0 commit comments