We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 3159e60 commit 31010ecCopy full SHA for 31010ec
tests/lora/test_lora_layers_flux.py
@@ -169,7 +169,11 @@ def test_modify_padding_mode(self):
169
@unittest.skip("We cannot run inference on this model with the current CI hardware")
170
# TODO (DN6, sayakpaul): move these tests to a beefier GPU
171
class FluxLoRAIntegrationTests(unittest.TestCase):
172
- """internal note: The integration slices were obtained on audace."""
+ """internal note: The integration slices were obtained on audace.
173
+
174
+ torch: 2.6.0.dev20241006+cu124 with CUDA 12.5. Need the same setup for the
175
+ assertions to pass.
176
+ """
177
178
num_inference_steps = 10
179
seed = 0
0 commit comments