Kaynağa Gözat

Merge pull request #377 from kaiix/textsum-multigpu

Fix run textsum on a single GPU
Xin Pan 9 yıl önce
ebeveyn
işleme
7498acc6e0

+ 2 - 1
textsum/seq2seq_attention.py

@@ -94,7 +94,8 @@ def _Train(model, data_batcher):
                              save_summaries_secs=60,
                              save_model_secs=FLAGS.checkpoint_secs,
                              global_step=model.global_step)
-    sess = sv.prepare_or_wait_for_session()
+    sess = sv.prepare_or_wait_for_session(config=tf.ConfigProto(
+        allow_soft_placement=True))
     running_avg_loss = 0
     step = 0
     while not sv.should_stop() and step < FLAGS.max_run_steps:

+ 2 - 1
textsum/seq2seq_attention_model.py

@@ -105,7 +105,8 @@ class Seq2SeqAttentionModel(object):
     if self._num_gpus == 0:
       return ''
     dev = '/gpu:%d' % self._cur_gpu
-    self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)
+    if self._num_gpus > 1:
+      self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)
     return dev
 
   def _get_gpu(self, gpu_id):