2017-07-05 23 views
0

私は畳み込みニューラルネットワークの実装方法についてthis tutorialと考えていました。私はこのエラーを取得しています、このコードの場合socket.gaierror:[Errno -2]名前またはサービスが不明urllib.error.URLError:<urropenエラー[Errno -2]名前またはサービスが不明>

from __future__ import absolute_import 
from __future__ import division 
from __future__ import print_function 
import numpy as np # linear algebra 
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)  
from subprocess import check_output 
print(check_output(["ls", "../input"]).decode("utf8")) 
import tensorflow as tf  
from tensorflow.contrib import learn 
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib 

tf.logging.set_verbosity(tf.logging.INFO) 

def cnn_model_fn(features, labels, mode): 
    """Model function for CNN.""" 
    # Input Layer 
    input_layer = tf.reshape(features, [-1, 28, 28, 1]) 

    # Convolutional Layer #1 
    conv1 = tf.layers.conv2d(
     inputs=input_layer, 
     filters=32, 
     kernel_size=[5, 5], 
     padding="same", 
     activation=tf.nn.relu) 

    # Pooling Layer #1 
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) 

    # Convolutional Layer #2 and Pooling Layer #2 
    conv2 = tf.layers.conv2d(
     inputs=pool1, 
     filters=64, 
     kernel_size=[5, 5], 
     padding="same", 
     activation=tf.nn.relu) 
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) 

    # Dense Layer 
    pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) 
    dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) 
    dropout = tf.layers.dropout(
     inputs=dense, rate=0.4, training=mode == learn.ModeKeys.TRAIN) 

    # Logits Layer 
    logits = tf.layers.dense(inputs=dropout, units=10) 

    loss = None 
    train_op = None 

    # Calculate Loss (for both TRAIN and EVAL modes) 
    if mode != learn.ModeKeys.INFER: 
    onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10) 
    loss = tf.losses.softmax_cross_entropy(
     onehot_labels=onehot_labels, logits=logits) 

    # Configure the Training Op (for TRAIN mode) 
    if mode == learn.ModeKeys.TRAIN: 
    train_op = tf.contrib.layers.optimize_loss(
     loss=loss, 
     global_step=tf.contrib.framework.get_global_step(), 
     learning_rate=0.001, 
     optimizer="SGD") 

    # Generate Predictions 
    predictions = { 
     "classes": tf.argmax(
      input=logits, axis=1), 
     "probabilities": tf.nn.softmax(
      logits, name="softmax_tensor") 
    } 

    # Return a ModelFnOps object 
    return model_fn_lib.ModelFnOps(
     mode=mode, predictions=predictions, loss=loss, train_op=train_op) 


def main(): 
    print("In main") 
    # Load training and eval data 
    mnist = learn.datasets.load_dataset("mnist") 
    train_data = tf.train.string_input_producer(tf.train.match_filenames_once("../inputs/train/*.jpg")) # Returns np.array 
    train_labels = np.asarray(train_labels.csv, dtype=np.float32) 
    test_data = tf.train.string_input_producer(tf.train.match_filenames_once("../inputs/test/*.jpg")) # Returns np.array 
    # eval_labels = np.asarray(mnist.test.labels, dtype=np.int32) 


    # Create the Estimator 
    mnist_classifier = learn.Estimator(
      model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model") 

    # Set up logging for predictions 
    tensors_to_log = {"probabilities": "softmax_tensor"} 
    logging_hook = tf.train.LoggingTensorHook(
     tensors=tensors_to_log, every_n_iter=50) 


    mnist_classifier.fit(
     x=train_data, 
     y=train_labels, 
     batch_size=100, 
     steps=20000, 
     monitors=[logging_hook]) 

    # Configure the accuracy metric for evaluation 
    metrics = { 
     "accuracy": 
      learn.MetricSpec(
       metric_fn=tf.metrics.accuracy, prediction_key="classes"), 
    } 


    # Evaluate the model and print results 
    eval_results = mnist_classifier.evaluate(
     x=eval_data, y=eval_labels, metrics=metrics) 
    print(eval_results) 

main() 

Traceback (most recent call last): 
    File "/opt/conda/lib/python3.6/urllib/request.py", line 1318, in do_open 
10.0s 
3 
    encode_chunked=req.has_header('Transfer-encoding')) 
    File "/opt/conda/lib/python3.6/http/client.py", line 1239, in request 
    self._send_request(method, url, body, headers, encode_chunked) 
    File "/opt/conda/lib/python3.6/http/client.py", line 1285, in _send_request 
    self.endheaders(body, encode_chunked=encode_chunked) 
    File "/opt/conda/lib/python3.6/http/client.py", line 1234, in endheaders 
    self._send_output(message_body, encode_chunked=encode_chunked) 
    File "/opt/conda/lib/python3.6/http/client.py", line 1026, in _send_output 
    self.send(msg) 
    File "/opt/conda/lib/python3.6/http/client.py", line 964, in send 
    self.connect() 
    File "/opt/conda/lib/python3.6/http/client.py", line 1392, in connect 
    super().connect() 
    File "/opt/conda/lib/python3.6/http/client.py", line 936, in connect 
    (self.host,self.port), self.timeout, self.source_address) 
    File "/opt/conda/lib/python3.6/socket.py", line 704, in create_connection 
    for res in getaddrinfo(host, port, 0, SOCK_STREAM): 
    File "/opt/conda/lib/python3.6/socket.py", line 743, in getaddrinfo 
    for res in _socket.getaddrinfo(host, port, family, type, proto, flags): 
socket.gaierror: [Errno -2] Name or service not known 

During handling of the above exception, another exception occurred: 

Traceback (most recent call last): 
    File "../src/script.py", line 130, in <module> 
    main() 
    File "../src/script.py", line 93, in main 
    mnist = learn.datasets.load_dataset("mnist") 
    File "/opt/conda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/__init__.py", line 73, in load_dataset 
10.1s 
4 
    return DATASETS[name]() 
    File "/opt/conda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py", line 279, in load_mnist 
10.2s 
5 
    return read_data_sets(train_dir) 
    File "/opt/conda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py", line 235, in read_data_sets 
    SOURCE_URL + TRAIN_IMAGES) 
    File "/opt/conda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 208, in maybe_download 
10.2s 
6 
    temp_file_name, _ = urlretrieve_with_retry(source_url) 
    File "/opt/conda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 165, in wrapped_fn 
    return fn(*args, **kwargs) 
    File "/opt/conda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 190, in urlretrieve_with_retry 
    return urllib.request.urlretrieve(url, filename) 
    File "/opt/conda/lib/python3.6/urllib/request.py", line 248, in urlretrieve 
    with contextlib.closing(urlopen(url, data)) as fp: 
    File "/opt/conda/lib/python3.6/urllib/request.py", line 223, in urlopen 
10.2s 
7 
    return opener.open(url, data, timeout) 
    File "/opt/conda/lib/python3.6/urllib/request.py", line 526, in open 
    response = self._open(req, data) 
    File "/opt/conda/lib/python3.6/urllib/request.py", line 544, in _open 
    '_open', req) 
    File "/opt/conda/lib/python3.6/urllib/request.py", line 504, in _call_chain 
    result = func(*args) 
    File "/opt/conda/lib/python3.6/urllib/request.py", line 1361, in https_open 
    context=self._context, check_hostname=self._check_hostname) 
    File "/opt/conda/lib/python3.6/urllib/request.py", line 1320, in do_open 
    raise URLError(err) 
urllib.error.URLError: <urlopen error [Errno -2] Name or service not known> 

このメッセージは、私には非常に不明瞭思わ

は、私は今、私はこのコードを持っているそこの指示に従いました。どのようなアイデアがこれの原因になる可能性がありますか?

答えて

2

mnistデータセットをダウンロードしようとしているこの行がエラーの原因です。mnist = learn.datasets.load_dataset("mnist")しかし、私はあなたのコードでmnistが使用されて表示されません。したがって、データセットを使用していない場合はコメントすることができます。または、それを使用する場合はhttp://yann.lecun.com/exdb/mnist/(すべての4つのファイル)からダウンロードし、ディレクトリファイルのパスをmnist = learn.datasets.load_dataset("/path/to/mnist")に入れてください。

0

スクリプトはこの行に失敗しているように思える:

mnist = learn.datasets.load_dataset("mnist") 

それが(ディスク上の)ローカルmnistデータセットを見つけることができないので、それをダウンロードしようとしますが、それは(何らかの理由で)はできません。

MNIST-dataディレクトリにデータセット​​を試してみてください(デフォルトtrain_dir='MNIST-data'load_mnist()を呼び出しload_dataset()のソースを、確認してください。やがて、read_data_setsトライやる負荷電車/テスト画像ディスクから/ラベルが、./MNIST-dataに見つからない場合は、ダウンロードしようとしますそれらはtrain-images-idx3-ubyte.gzなど)

関連する問題