mnist_m.py 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Provides data for the MNIST-M dataset.
  16. The dataset scripts used to create the dataset can be found at:
  17. tensorflow_models/domain_adaptation_/datasets/download_and_convert_mnist_m_dataset.py
  18. """
  19. from __future__ import absolute_import
  20. from __future__ import division
  21. from __future__ import print_function
  22. import os
  23. import tensorflow as tf
  24. from slim.datasets import dataset_utils
  25. slim = tf.contrib.slim
  26. _FILE_PATTERN = 'mnist_m_%s.tfrecord'
  27. _SPLITS_TO_SIZES = {'train': 58001, 'valid': 1000, 'test': 9001}
  28. _NUM_CLASSES = 10
  29. _ITEMS_TO_DESCRIPTIONS = {
  30. 'image': 'A [32 x 32 x 1] RGB image.',
  31. 'label': 'A single integer between 0 and 9',
  32. }
  33. def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
  34. """Gets a dataset tuple with instructions for reading MNIST.
  35. Args:
  36. split_name: A train/test split name.
  37. dataset_dir: The base directory of the dataset sources.
  38. Returns:
  39. A `Dataset` namedtuple.
  40. Raises:
  41. ValueError: if `split_name` is not a valid train/test split.
  42. """
  43. if split_name not in _SPLITS_TO_SIZES:
  44. raise ValueError('split name %s was not recognized.' % split_name)
  45. if not file_pattern:
  46. file_pattern = _FILE_PATTERN
  47. file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
  48. # Allowing None in the signature so that dataset_factory can use the default.
  49. if reader is None:
  50. reader = tf.TFRecordReader
  51. keys_to_features = {
  52. 'image/encoded':
  53. tf.FixedLenFeature((), tf.string, default_value=''),
  54. 'image/format':
  55. tf.FixedLenFeature((), tf.string, default_value='png'),
  56. 'image/class/label':
  57. tf.FixedLenFeature(
  58. [1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)),
  59. }
  60. items_to_handlers = {
  61. 'image': slim.tfexample_decoder.Image(shape=[32, 32, 3], channels=3),
  62. 'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]),
  63. }
  64. decoder = slim.tfexample_decoder.TFExampleDecoder(
  65. keys_to_features, items_to_handlers)
  66. labels_to_names = None
  67. if dataset_utils.has_labels(dataset_dir):
  68. labels_to_names = dataset_utils.read_label_file(dataset_dir)
  69. return slim.dataset.Dataset(
  70. data_sources=file_pattern,
  71. reader=reader,
  72. decoder=decoder,
  73. num_samples=_SPLITS_TO_SIZES[split_name],
  74. num_classes=_NUM_CLASSES,
  75. items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
  76. labels_to_names=labels_to_names)