finetune_inception_v3_on_flowers.sh 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. #!/bin/bash
  2. #
  3. # This script performs the following operations:
  4. # 1. Downloads the Flowers dataset
  5. # 2. Fine-tunes an InceptionV3 model on the Flowers training set.
  6. # 3. Evaluates the model on the Flowers validation set.
  7. #
  8. # Usage:
  9. # cd slim
  10. # ./slim/scripts/finetune_inceptionv3_on_flowers.sh
  11. # Where the pre-trained InceptionV3 checkpoint is saved to.
  12. PRETRAINED_CHECKPOINT_DIR=/tmp/checkpoints
  13. # Where the training (fine-tuned) checkpoint and logs will be saved to.
  14. TRAIN_DIR=/tmp/flowers-models/inception_v3
  15. # Where the dataset is saved to.
  16. DATASET_DIR=/tmp/flowers
  17. # Download the pre-trained checkpoint.
  18. if [ ! -d "$PRETRAINED_CHECKPOINT_DIR" ]; then
  19. mkdir ${PRETRAINED_CHECKPOINT_DIR}
  20. fi
  21. if [ ! -f ${PRETRAINED_CHECKPOINT_DIR}/inception_v3.ckpt ]; then
  22. wget http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz
  23. tar -xvf inception_v3_2016_08_28.tar.gz
  24. mv inception_v3.ckpt ${PRETRAINED_CHECKPOINT_DIR}/inception_v3.ckpt
  25. rm inception_v3_2016_08_28.tar.gz
  26. fi
  27. # Download the dataset
  28. python download_and_convert_data.py \
  29. --dataset_name=flowers \
  30. --dataset_dir=${DATASET_DIR}
  31. # Fine-tune only the new layers for 1000 steps.
  32. python train_image_classifier.py \
  33. --train_dir=${TRAIN_DIR} \
  34. --dataset_name=flowers \
  35. --dataset_split_name=train \
  36. --dataset_dir=${DATASET_DIR} \
  37. --model_name=inception_v3 \
  38. --checkpoint_path=${PRETRAINED_CHECKPOINT_DIR}/inception_v3.ckpt \
  39. --checkpoint_exclude_scopes=InceptionV3/Logits,InceptionV3/AuxLogits \
  40. --trainable_scopes=InceptionV3/Logits,InceptionV3/AuxLogits \
  41. --max_number_of_steps=1000 \
  42. --batch_size=32 \
  43. --learning_rate=0.01 \
  44. --learning_rate_decay_type=fixed \
  45. --save_interval_secs=60 \
  46. --save_summaries_secs=60 \
  47. --log_every_n_steps=100 \
  48. --optimizer=rmsprop \
  49. --weight_decay=0.00004
  50. # Run evaluation.
  51. python eval_image_classifier.py \
  52. --checkpoint_path=${TRAIN_DIR} \
  53. --eval_dir=${TRAIN_DIR} \
  54. --dataset_name=flowers \
  55. --dataset_split_name=validation \
  56. --dataset_dir=${DATASET_DIR} \
  57. --model_name=inception_v3
  58. # Fine-tune all the new layers for 500 steps.
  59. python train_image_classifier.py \
  60. --train_dir=${TRAIN_DIR}/all \
  61. --dataset_name=flowers \
  62. --dataset_split_name=train \
  63. --dataset_dir=${DATASET_DIR} \
  64. --model_name=inception_v3 \
  65. --checkpoint_path=${TRAIN_DIR} \
  66. --max_number_of_steps=500 \
  67. --batch_size=32 \
  68. --learning_rate=0.0001 \
  69. --learning_rate_decay_type=fixed \
  70. --save_interval_secs=60 \
  71. --save_summaries_secs=60 \
  72. --log_every_n_steps=10 \
  73. --optimizer=rmsprop \
  74. --weight_decay=0.00004
  75. # Run evaluation.
  76. python eval_image_classifier.py \
  77. --checkpoint_path=${TRAIN_DIR}/all \
  78. --eval_dir=${TRAIN_DIR}/all \
  79. --dataset_name=flowers \
  80. --dataset_split_name=validation \
  81. --dataset_dir=${DATASET_DIR} \
  82. --model_name=inception_v3