|
model = MllamaForConditionalGeneration.from_pretrained(model_name, device_map="auto", torch_dtype=torch.bfloat16, token=hf_token)
|
|
model = MllamaForConditionalGeneration.from_pretrained(model_name, device_map="auto", torch_dtype=torch.bfloat16, token=hf_token)
|
|
processor = MllamaProcessor.from_pretrained(model_name, token=hf_token)
|
|
processor = MllamaProcessor.from_pretrained(model_name, token=hf_token)
|