|
@@ -52,16 +52,16 @@ to run with each of the datasets set the `dataset` flag in the command as shown
|
|
|
```bash
|
|
|
# grammer_dataset
|
|
|
|
|
|
-python -m llama_cookbook.finetuning --use_peft --peft_method lora --quantization 8bit --dataset grammar_dataset --model_name /path_of_model_folder/8B --output_dir Path/to/save/PEFT/model
|
|
|
+python -m llama_recipes.finetuning --use_peft --peft_method lora --quantization 8bit --dataset grammar_dataset --model_name /path_of_model_folder/8B --output_dir Path/to/save/PEFT/model
|
|
|
|
|
|
# alpaca_dataset
|
|
|
|
|
|
-python -m llama_cookbook.finetuning --use_peft --peft_method lora --quantization 8bit --dataset alpaca_dataset --model_name /path_of_model_folder/8B --output_dir Path/to/save/PEFT/model
|
|
|
+python -m llama_recipes.finetuning --use_peft --peft_method lora --quantization 8bit --dataset alpaca_dataset --model_name /path_of_model_folder/8B --output_dir Path/to/save/PEFT/model
|
|
|
|
|
|
|
|
|
# samsum_dataset
|
|
|
|
|
|
-python -m llama_cookbook.finetuning --use_peft --peft_method lora --quantization 8bit --dataset samsum_dataset --model_name /path_of_model_folder/8B --output_dir Path/to/save/PEFT/model
|
|
|
+python -m llama_recipes.finetuning --use_peft --peft_method lora --quantization 8bit --dataset samsum_dataset --model_name /path_of_model_folder/8B --output_dir Path/to/save/PEFT/model
|
|
|
|
|
|
```
|
|
|
|