|
@@ -119,7 +119,7 @@ class ModelTrainer:
|
|
|
dtype=self.dtype,
|
|
|
fast_inference = False, # Enable vLLM fast inference
|
|
|
max_lora_rank = lora_rank,
|
|
|
- gpu_memory_utilization=0.1,# Reduce if out of memory
|
|
|
+ gpu_memory_utilization=0.1, # 0.6 # Reduce if out of memory
|
|
|
)
|
|
|
|
|
|
# 添加 LoRA 适配器
|