Skip to content

Commit

Permalink
Don't split the layers in 8-bit mode by default
Browse files Browse the repository at this point in the history
  • Loading branch information
oobabooga committed Mar 16, 2023
1 parent 0a2aa79 commit ee164d1
Showing 1 changed file with 4 additions and 2 deletions.
6 changes: 4 additions & 2 deletions modules/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,10 @@ def load_model(model_name):
params["torch_dtype"] = torch.float32
else:
params["device_map"] = 'auto'
if shared.args.load_in_8bit:
if shared.args.load_in_8bit and any((shared.args.auto_devices, shared.args.gpu_memory)):
params['quantization_config'] = BitsAndBytesConfig(load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True)
elif shared.args.load_in_8bit:
params['quantization_config'] = BitsAndBytesConfig(load_in_8bit=True)
elif shared.args.bf16:
params["torch_dtype"] = torch.bfloat16
else:
Expand All @@ -119,7 +121,7 @@ def load_model(model_name):
max_memory[i] = f'{memory_map[i]}GiB'
max_memory['cpu'] = f'{shared.args.cpu_memory or 99}GiB'
params['max_memory'] = max_memory
else:
elif shared.args.auto_devices:
total_mem = (torch.cuda.get_device_properties(0).total_memory / (1024*1024))
suggestion = round((total_mem-1000) / 1000) * 1000
if total_mem - suggestion < 800:
Expand Down

0 comments on commit ee164d1

Please sign in to comment.