2017-06-13 13 views
0

私はバビデータセットにkerasを用いてメモリネットワークの次のコードを経由しています -Kerasメモリネットワーク実装

  '''Trains a memory network on the bAbI dataset. 
      References: 
      - Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush, 
       "Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks", 
       http://arxiv.org/abs/1502.05698 
      - Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus, 
       "End-To-End Memory Networks", 
       http://arxiv.org/abs/1503.08895 
      Reaches 98.6% accuracy on task 'single_supporting_fact_10k' after 120 epochs. 
      Time per epoch: 3s on CPU (core i7). 
      ''' 
      from __future__ import print_function 

      from keras.models import Sequential, Model 
      from keras.layers.embeddings import Embedding 
      from keras.layers import Input, Activation, Dense, Permute, Dropout, add, dot, concatenate 
      from keras.layers import LSTM 
      from keras.utils.data_utils import get_file 
      from keras.preprocessing.sequence import pad_sequences 
      from functools import reduce 
      import tarfile 
      import numpy as np 
      import re 


      def tokenize(sent): 
       '''Return the tokens of a sentence including punctuation. 
       >>> tokenize('Bob dropped the apple. Where is the apple?') 
       ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?'] 
       ''' 
       return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()] 


      def parse_stories(lines, only_supporting=False): 
       '''Parse stories provided in the bAbi tasks format 
       If only_supporting is true, only the sentences 
       that support the answer are kept. 
       ''' 
       data = [] 
       story = [] 
       for line in lines: 
        line = line.decode('utf-8').strip() 
        nid, line = line.split(' ', 1) 
        nid = int(nid) 
        if nid == 1: 
         story = [] 
        if '\t' in line: 
         q, a, supporting = line.split('\t') 
         q = tokenize(q) 
         substory = None 
         if only_supporting: 
          # Only select the related substory 
          supporting = map(int, supporting.split()) 
          substory = [story[i - 1] for i in supporting] 
         else: 
          # Provide all the substories 
          substory = [x for x in story if x] 
         data.append((substory, q, a)) 
         story.append('') 
        else: 
         sent = tokenize(line) 
         story.append(sent) 
       return data 


      def get_stories(f, only_supporting=False, max_length=None): 
       '''Given a file name, read the file, 
       retrieve the stories, 
       and then convert the sentences into a single story. 
       If max_length is supplied, 
       any stories longer than max_length tokens will be discarded. 
       ''' 
       data = parse_stories(f.readlines(), only_supporting=only_supporting) 
       flatten = lambda data: reduce(lambda x, y: x + y, data) 
       data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length] 
       return data 


      def vectorize_stories(data, word_idx, story_maxlen, query_maxlen): 
       X = [] 
       Xq = [] 
       Y = [] 
       for story, query, answer in data: 
        x = [word_idx[w] for w in story] 
        xq = [word_idx[w] for w in query] 
        # let's not forget that index 0 is reserved 
        y = np.zeros(len(word_idx) + 1) 
        y[word_idx[answer]] = 1 
        X.append(x) 
        Xq.append(xq) 
        Y.append(y) 
       return (pad_sequences(X, maxlen=story_maxlen), 
         pad_sequences(Xq, maxlen=query_maxlen), np.array(Y)) 

      try: 
       path = get_file('babi-tasks-v1-2.tar.gz', origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz') 
      except: 
       print('Error downloading dataset, please download it manually:\n' 
         '$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n' 
         '$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz') 
       raise 
      tar = tarfile.open(path) 

      challenges = { 
       # QA1 with 10,000 samples 
       'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt', 
       # QA2 with 10,000 samples 
       'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt', 
      } 
      challenge_type = 'single_supporting_fact_10k' 
      challenge = challenges[challenge_type] 

      print('Extracting stories for the challenge:', challenge_type) 
      train_stories = get_stories(tar.extractfile(challenge.format('train'))) 
      test_stories = get_stories(tar.extractfile(challenge.format('test'))) 

      vocab = set() 
      for story, q, answer in train_stories + test_stories: 
       vocab |= set(story + q + [answer]) 
      vocab = sorted(vocab) 

      # Reserve 0 for masking via pad_sequences 
      vocab_size = len(vocab) + 1 
      story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories))) 
      query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories))) 

      print('-') 
      print('Vocab size:', vocab_size, 'unique words') 
      print('Story max length:', story_maxlen, 'words') 
      print('Query max length:', query_maxlen, 'words') 
      print('Number of training stories:', len(train_stories)) 
      print('Number of test stories:', len(test_stories)) 
      print('-') 
      print('Here\'s what a "story" tuple looks like (input, query, answer):') 
      print(train_stories[0]) 
      print('-') 
      print('Vectorizing the word sequences...') 

      word_idx = dict((c, i + 1) for i, c in enumerate(vocab)) 
      inputs_train, queries_train, answers_train = vectorize_stories(train_stories, 
                      word_idx, 
                      story_maxlen, 
                      query_maxlen) 
      inputs_test, queries_test, answers_test = vectorize_stories(test_stories, 
                     word_idx, 
                     story_maxlen, 
                     query_maxlen) 

      print('-') 
      print('inputs: integer tensor of shape (samples, max_length)') 
      print('inputs_train shape:', inputs_train.shape) 
      print('inputs_test shape:', inputs_test.shape) 
      print('-') 
      print('queries: integer tensor of shape (samples, max_length)') 
      print('queries_train shape:', queries_train.shape) 
      print('queries_test shape:', queries_test.shape) 
      print('-') 
      print('answers: binary (1 or 0) tensor of shape (samples, vocab_size)') 
      print('answers_train shape:', answers_train.shape) 
      print('answers_test shape:', answers_test.shape) 
      print('-') 
      print('Compiling...') 

      # placeholders 
      input_sequence = Input((story_maxlen,)) 
      question = Input((query_maxlen,)) 

      # encoders 
      # embed the input sequence into a sequence of vectors 
      input_encoder_m = Sequential() 
      input_encoder_m.add(Embedding(input_dim=vocab_size, 
              output_dim=64)) 
      input_encoder_m.add(Dropout(0.3)) 
      # output: (samples, story_maxlen, embedding_dim) 

      # embed the input into a sequence of vectors of size query_maxlen 
      input_encoder_c = Sequential() 
      input_encoder_c.add(Embedding(input_dim=vocab_size, 
              output_dim=query_maxlen)) 
      input_encoder_c.add(Dropout(0.3)) 
      # output: (samples, story_maxlen, query_maxlen) 

      # embed the question into a sequence of vectors 
      question_encoder = Sequential() 
      question_encoder.add(Embedding(input_dim=vocab_size, 
              output_dim=64, 
              input_length=query_maxlen)) 
      question_encoder.add(Dropout(0.3)) 
      # output: (samples, query_maxlen, embedding_dim) 

      # encode input sequence and questions (which are indices) 
      # to sequences of dense vectors 
      input_encoded_m = input_encoder_m(input_sequence) 
      input_encoded_c = input_encoder_c(input_sequence) 
      question_encoded = question_encoder(question) 

      # compute a 'match' between the first input vector sequence 
      # and the question vector sequence 
      # shape: `(samples, story_maxlen, query_maxlen)` 
      match = dot([input_encoded_m, question_encoded], axes=(2, 2)) 
      match = Activation('softmax')(match) 

      # add the match matrix with the second input vector sequence 
      response = add([match, input_encoded_c]) # (samples, story_maxlen, query_maxlen) 
      response = Permute((2, 1))(response) # (samples, query_maxlen, story_maxlen) 

      # concatenate the match matrix with the question vector sequence 
      answer = concatenate([response, question_encoded]) 

      # the original paper uses a matrix multiplication for this reduction step. 
      # we choose to use a RNN instead. 
      answer = LSTM(32)(answer) # (samples, 32) 

      # one regularization layer -- more would probably be needed. 
      answer = Dropout(0.3)(answer) 
      answer = Dense(vocab_size)(answer) # (samples, vocab_size) 
      # we output a probability distribution over the vocabulary 
      answer = Activation('softmax')(answer) 

      # build the final model 
      model = Model([input_sequence, question], answer) 
      model.compile(optimizer='rmsprop', loss='categorical_crossentropy', 
          metrics=['accuracy']) 

      # train 
      model.fit([inputs_train, queries_train], answers_train, 
         batch_size=32, 
         epochs=120, 
         validation_data=([inputs_test, queries_test], answers_test)) 

これが私の理解は、モデル作成部のためにあるものである -

後物語の密なベクトルを作成し、コードの下で一部を問う -

  input_encoded_m = input_encoder_m(input_sequence) 
      input_encoded_c = input_encoder_c(input_sequence) 
      question_encoded = question_encoder(question) 

出力は形状

下回っています

input_encoded_m形状を有するであろう - サンプル、story_maxlen、query_maxlen input_encoded_c形状を有することになる - 、query_maxlen、サンプル

input_encoded_mをembedding_dimと同じ有するinput_encoded_c - サンプル、story_maxlen、query_maxlen question_encoded形状を有することになります入力は異なる次元、すなわち(68と4)に埋め込まれています。 question_encodedには質問が埋め込まれます。私は違った組み込み同じ入力ベクトルが追加されている理由については明らかではないよ

  match = dot([input_encoded_m, question_encoded], axes=(2, 2)) 
      match = Activation('softmax')(match) 

-

は今部未満ストーリーと問題の単語と一致し、一致する単語が識別されることを意味し、出力にソフトマックスの活性化を適用します上のステップからマッチした行列。コメントは "2番目の入力ベクトル"と言いますが、2番目の入力はまだ処理されていません。これを理解できません。 #は第二入力ベクトルシーケンス 応答と一致行列を追加=追加([マッチ、input_encoded_c])#(サンプル、story_maxlen、query_maxlen)また

上記ステップの出力を置換何これに関連してい - 応答

これは、上の部分のストーリーをLSTMレイヤーの質問に連結しているだけですか?はい(LAST)ここに私の理解が間違っていれば訂正してください -

  # concatenate the match matrix with the question vector sequence 
      answer = concatenate([response, question_encoded]) 

私はここにどこにでも投稿しているので直感的な説明はありません。

ご協力いただきありがとうございます。

ありがとうございました。

答えて

0

まず、match変数は一致する単語を識別するだけでなく、入力に確率分布を与えます。これらは、各入力文の重みとして見ることができます。

コード内に結果がinput_encoded_cinput_encoded_mの2つの異なる行列を使用して入力シーケンスが埋め込まれます。第1の埋め込みを使用して、一致重みを見つける。次に、2番目の埋め込みベクトルに重みを適用すると、解が見つかります。重みを計算した同じベクトルに適用することは論理的ではありません。

次に、Permuteとなります。応答を生成するには、 responseにクエリを追加し、同じ次元を使用して応答の次元を並べ替えます。

用紙End-to-End Memory Networkでは、2.1項を読んで理解するのに役立ちます。

関連する問題