Save and continue training the LSTM network












0














I try to make a LSTM model continue were its last run left off. All compiles fine till I try to fit the network. Then it gives an error:




ValueError: Error when checking target: expected dense_29 to have 3 dimensions, but got array with shape (672, 1)




I checked various articles such as this and this but I don't see what is wrong in my code.



from keras import Sequential
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from keras.models import Sequential,Model
from keras.layers import LSTM, Dense, Bidirectional, Input,Dropout,BatchNormalization
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints

from keras.callbacks import ModelCheckpoint
from keras.models import load_model
import os.path
import os
filepath="Train-weights.best.hdf5"
act = 'relu'

model = Sequential()
model.add(BatchNormalization(input_shape=(10, 128)))
model.add(Bidirectional(LSTM(128, dropout=0.5, activation=act, return_sequences=True)))
model.add(Dense(1,activation='sigmoid'))

if (os.path.exists(filepath)):
print("extending training of previous run")
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
with open('model_architecture.json', 'r') as f:
model = model_from_json(f.read())
model.load_weights(filepath)
else:
print("First run")
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, batch_size=32, callbacks=callbacks_list, verbose=2)
model.save_weights(filepath)
with open('model_architecture.json', 'w') as f:
f.write(model.to_json())

checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]

model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, batch_size=32, callbacks=callbacks_list, verbose=0)









share|improve this question





























    0














    I try to make a LSTM model continue were its last run left off. All compiles fine till I try to fit the network. Then it gives an error:




    ValueError: Error when checking target: expected dense_29 to have 3 dimensions, but got array with shape (672, 1)




    I checked various articles such as this and this but I don't see what is wrong in my code.



    from keras import Sequential
    from keras.preprocessing.sequence import pad_sequences
    from sklearn.model_selection import train_test_split
    from keras.models import Sequential,Model
    from keras.layers import LSTM, Dense, Bidirectional, Input,Dropout,BatchNormalization
    from keras import backend as K
    from keras.engine.topology import Layer
    from keras import initializers, regularizers, constraints

    from keras.callbacks import ModelCheckpoint
    from keras.models import load_model
    import os.path
    import os
    filepath="Train-weights.best.hdf5"
    act = 'relu'

    model = Sequential()
    model.add(BatchNormalization(input_shape=(10, 128)))
    model.add(Bidirectional(LSTM(128, dropout=0.5, activation=act, return_sequences=True)))
    model.add(Dense(1,activation='sigmoid'))

    if (os.path.exists(filepath)):
    print("extending training of previous run")
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    with open('model_architecture.json', 'r') as f:
    model = model_from_json(f.read())
    model.load_weights(filepath)
    else:
    print("First run")
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, batch_size=32, callbacks=callbacks_list, verbose=2)
    model.save_weights(filepath)
    with open('model_architecture.json', 'w') as f:
    f.write(model.to_json())

    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]

    model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, batch_size=32, callbacks=callbacks_list, verbose=0)









    share|improve this question



























      0












      0








      0







      I try to make a LSTM model continue were its last run left off. All compiles fine till I try to fit the network. Then it gives an error:




      ValueError: Error when checking target: expected dense_29 to have 3 dimensions, but got array with shape (672, 1)




      I checked various articles such as this and this but I don't see what is wrong in my code.



      from keras import Sequential
      from keras.preprocessing.sequence import pad_sequences
      from sklearn.model_selection import train_test_split
      from keras.models import Sequential,Model
      from keras.layers import LSTM, Dense, Bidirectional, Input,Dropout,BatchNormalization
      from keras import backend as K
      from keras.engine.topology import Layer
      from keras import initializers, regularizers, constraints

      from keras.callbacks import ModelCheckpoint
      from keras.models import load_model
      import os.path
      import os
      filepath="Train-weights.best.hdf5"
      act = 'relu'

      model = Sequential()
      model.add(BatchNormalization(input_shape=(10, 128)))
      model.add(Bidirectional(LSTM(128, dropout=0.5, activation=act, return_sequences=True)))
      model.add(Dense(1,activation='sigmoid'))

      if (os.path.exists(filepath)):
      print("extending training of previous run")
      model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
      with open('model_architecture.json', 'r') as f:
      model = model_from_json(f.read())
      model.load_weights(filepath)
      else:
      print("First run")
      model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
      model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, batch_size=32, callbacks=callbacks_list, verbose=2)
      model.save_weights(filepath)
      with open('model_architecture.json', 'w') as f:
      f.write(model.to_json())

      checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
      callbacks_list = [checkpoint]

      model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, batch_size=32, callbacks=callbacks_list, verbose=0)









      share|improve this question















      I try to make a LSTM model continue were its last run left off. All compiles fine till I try to fit the network. Then it gives an error:




      ValueError: Error when checking target: expected dense_29 to have 3 dimensions, but got array with shape (672, 1)




      I checked various articles such as this and this but I don't see what is wrong in my code.



      from keras import Sequential
      from keras.preprocessing.sequence import pad_sequences
      from sklearn.model_selection import train_test_split
      from keras.models import Sequential,Model
      from keras.layers import LSTM, Dense, Bidirectional, Input,Dropout,BatchNormalization
      from keras import backend as K
      from keras.engine.topology import Layer
      from keras import initializers, regularizers, constraints

      from keras.callbacks import ModelCheckpoint
      from keras.models import load_model
      import os.path
      import os
      filepath="Train-weights.best.hdf5"
      act = 'relu'

      model = Sequential()
      model.add(BatchNormalization(input_shape=(10, 128)))
      model.add(Bidirectional(LSTM(128, dropout=0.5, activation=act, return_sequences=True)))
      model.add(Dense(1,activation='sigmoid'))

      if (os.path.exists(filepath)):
      print("extending training of previous run")
      model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
      with open('model_architecture.json', 'r') as f:
      model = model_from_json(f.read())
      model.load_weights(filepath)
      else:
      print("First run")
      model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
      model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, batch_size=32, callbacks=callbacks_list, verbose=2)
      model.save_weights(filepath)
      with open('model_architecture.json', 'w') as f:
      f.write(model.to_json())

      checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
      callbacks_list = [checkpoint]

      model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=100, batch_size=32, callbacks=callbacks_list, verbose=0)






      python machine-learning keras lstm recurrent-neural-network






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited Nov 21 '18 at 13:59









      today

      9,97621536




      9,97621536










      asked Nov 21 '18 at 13:47









      user3800527

      757721




      757721
























          1 Answer
          1






          active

          oldest

          votes


















          1














          Try model.summary() and you would see that the output shape of last layer (i.e. the Dense layer) in the network is (None, 10, 1). Therefore, the labels you provide to the model (i.e. y_train) must also have a shape of (num_samples, 10, 1).



          If the output shape (None, 10, 1) is not what you wanted (e.g. you want (None, 1) as the output shape of your model) then you need to modify your model definition. One simple modification to achieve that is removing the return_sequences=True argument from the LSTM layer.






          share|improve this answer





















          • I missed it return_sequences... thanks.
            – user3800527
            Nov 21 '18 at 18:47











          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53413505%2fsave-and-continue-training-the-lstm-network%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          1 Answer
          1






          active

          oldest

          votes








          1 Answer
          1






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes









          1














          Try model.summary() and you would see that the output shape of last layer (i.e. the Dense layer) in the network is (None, 10, 1). Therefore, the labels you provide to the model (i.e. y_train) must also have a shape of (num_samples, 10, 1).



          If the output shape (None, 10, 1) is not what you wanted (e.g. you want (None, 1) as the output shape of your model) then you need to modify your model definition. One simple modification to achieve that is removing the return_sequences=True argument from the LSTM layer.






          share|improve this answer





















          • I missed it return_sequences... thanks.
            – user3800527
            Nov 21 '18 at 18:47
















          1














          Try model.summary() and you would see that the output shape of last layer (i.e. the Dense layer) in the network is (None, 10, 1). Therefore, the labels you provide to the model (i.e. y_train) must also have a shape of (num_samples, 10, 1).



          If the output shape (None, 10, 1) is not what you wanted (e.g. you want (None, 1) as the output shape of your model) then you need to modify your model definition. One simple modification to achieve that is removing the return_sequences=True argument from the LSTM layer.






          share|improve this answer





















          • I missed it return_sequences... thanks.
            – user3800527
            Nov 21 '18 at 18:47














          1












          1








          1






          Try model.summary() and you would see that the output shape of last layer (i.e. the Dense layer) in the network is (None, 10, 1). Therefore, the labels you provide to the model (i.e. y_train) must also have a shape of (num_samples, 10, 1).



          If the output shape (None, 10, 1) is not what you wanted (e.g. you want (None, 1) as the output shape of your model) then you need to modify your model definition. One simple modification to achieve that is removing the return_sequences=True argument from the LSTM layer.






          share|improve this answer












          Try model.summary() and you would see that the output shape of last layer (i.e. the Dense layer) in the network is (None, 10, 1). Therefore, the labels you provide to the model (i.e. y_train) must also have a shape of (num_samples, 10, 1).



          If the output shape (None, 10, 1) is not what you wanted (e.g. you want (None, 1) as the output shape of your model) then you need to modify your model definition. One simple modification to achieve that is removing the return_sequences=True argument from the LSTM layer.







          share|improve this answer












          share|improve this answer



          share|improve this answer










          answered Nov 21 '18 at 13:56









          today

          9,97621536




          9,97621536












          • I missed it return_sequences... thanks.
            – user3800527
            Nov 21 '18 at 18:47


















          • I missed it return_sequences... thanks.
            – user3800527
            Nov 21 '18 at 18:47
















          I missed it return_sequences... thanks.
          – user3800527
          Nov 21 '18 at 18:47




          I missed it return_sequences... thanks.
          – user3800527
          Nov 21 '18 at 18:47


















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.





          Some of your past answers have not been well-received, and you're in danger of being blocked from answering.


          Please pay close attention to the following guidance:


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53413505%2fsave-and-continue-training-the-lstm-network%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          404 Error Contact Form 7 ajax form submitting

          How to know if a Active Directory user can login interactively

          Refactoring coordinates for Minecraft Pi buildings written in Python