evaluate_zeroshot_gpt.sh 1.1 KB

123456789101112131415161718192021222324252627282930313233343536373839
  1. #!/bin/bash
  2. WORLD_SIZE=8
  3. DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \
  4. --nnodes 1 \
  5. --node_rank 0 \
  6. --master_addr localhost \
  7. --master_port 6000"
  8. TASK="LAMBADA"
  9. VALID_DATA=<lambada path>
  10. VOCAB_FILE=gpt2-vocab.json
  11. MERGE_FILE=gpt2-merges.txt
  12. CHECKPOINT=checkpoints/gpt2_345m
  13. python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \
  14. --task $TASK \
  15. --valid-data $VALID_DATA \
  16. --tokenizer-type GPT2BPETokenizer \
  17. --strict-lambada \
  18. --vocab-file $VOCAB_FILE \
  19. --merge-file $MERGE_FILE \
  20. --load $CHECKPOINT \
  21. --tensor-model-parallel-size 1 \
  22. --num-layers 24 \
  23. --hidden-size 1024 \
  24. --num-attention-heads 16 \
  25. --batch-size 8 \
  26. --checkpoint-activations \
  27. --seq-length 1024 \
  28. --max-position-embeddings 1024 \
  29. --log-interval 10 \
  30. --fp16 \
  31. --no-load-optim \
  32. --no-load-rng