|
@@ -296,8 +296,12 @@ Your trained *Show and Tell* model can generate captions for any JPEG image! The
|
|
|
following command line will generate captions for an image from the test set.
|
|
|
|
|
|
```shell
|
|
|
-# Directory containing model checkpoints.
|
|
|
-CHECKPOINT_DIR="${HOME}/im2txt/model/train"
|
|
|
+# Path to checkpoint file or a directory containing checkpoint files. Passing
|
|
|
+# a directory will only work if there is also a file named 'checkpoint' which
|
|
|
+# lists the available checkpoints in the directory. It will not work if you
|
|
|
+# point to a directory with just a copy of a model checkpoint: in that case,
|
|
|
+# you will need to pass the checkpoint path explicitly.
|
|
|
+CHECKPOINT_PATH="${HOME}/im2txt/model/train"
|
|
|
|
|
|
# Vocabulary file generated by the preprocessing script.
|
|
|
VOCAB_FILE="${HOME}/im2txt/data/mscoco/word_counts.txt"
|
|
@@ -314,7 +318,7 @@ export CUDA_VISIBLE_DEVICES=""
|
|
|
|
|
|
# Run inference to generate captions.
|
|
|
bazel-bin/im2txt/run_inference \
|
|
|
- --checkpoint_path=${CHECKPOINT_DIR} \
|
|
|
+ --checkpoint_path=${CHECKPOINT_PATH} \
|
|
|
--vocab_file=${VOCAB_FILE} \
|
|
|
--input_files=${IMAGE_FILE}
|
|
|
```
|