File tree 2 files changed +2
-2
lines changed
tests/integration/test_lists
2 files changed +2
-2
lines changed Original file line number Diff line number Diff line change @@ -35,7 +35,7 @@ examples/test_exaone.py::test_llm_exaone_1gpu[disable_weight_only-exaone_3.0_7.8
35
35
examples/test_exaone.py::test_llm_exaone_1gpu[enable_weight_only-exaone_deep_2.4b-float16-nb:1]
36
36
examples/test_exaone.py::test_llm_exaone_2gpu[exaone_3.0_7.8b_instruct-float16-nb:1]
37
37
examples/test_gemma.py::test_llm_gemma_1gpu_summary[gemma-2-27b-it-other-bfloat16-8]
38
- examples/test_gemma.py::test_llm_gemma_1gpu_summary [gemma-3-1b-it-other-bfloat16-8]
38
+ examples/test_gemma.py::test_llm_gemma_1gpu_summary_vswa [gemma-3-1b-it-other-bfloat16-8]
39
39
examples/test_gemma.py::test_llm_hf_gemma_quantization_1gpu[gemma-2-27b-it-fp8-bfloat16-8]
40
40
examples/test_gemma.py::test_hf_gemma_fp8_base_bf16_multi_lora[gemma-2-9b-it]
41
41
examples/test_gemma.py::test_hf_gemma_fp8_base_bf16_multi_lora[gemma-2-27b-it]
Original file line number Diff line number Diff line change @@ -113,7 +113,7 @@ l0_h100:
113
113
- examples/test_llama.py::test_llama_3_x_fp8_with_bf16_lora[llama-3.2-1b]
114
114
- examples/test_qwen.py::test_llm_hf_qwen_multi_lora_1gpu[qwen2.5_1.5b_instruct]
115
115
- examples/test_gemma.py::test_hf_gemma_fp8_base_bf16_multi_lora[gemma-2-9b-it]
116
- - examples/test_gemma.py::test_llm_gemma_1gpu_summary [gemma-3-1b-it-other-bfloat16-8]
116
+ - examples/test_gemma.py::test_llm_gemma_1gpu_summary_vswa [gemma-3-1b-it-other-bfloat16-8]
117
117
- examples/test_phi.py::test_llm_phi_quantization_1gpu[Phi-4-mini-instruct-fp8-bfloat16]
118
118
- unittest/trt/model_api/test_model_level_api.py # 9 mins on H100
119
119
- unittest/trt/model_api/test_model_api_multi_gpu.py # 0.5 mins on H100
You can’t perform that action at this time.
0 commit comments