Slide 22
Slide 22 text
Combining them into a model
encoder = Embedding(
input_dict_size, 64, input_length=ENGLISH_LENGTH, mask_zero=True)(encoder_input)
encoder = LSTM(64,return_sequences=False)(encoder)
decoder = Embedding(
output_dict_size,64,input_length=KATAKANA_LENGTH,mask_zero=True)(decoder_input)
decoder = LSTM(units=64,return_sequences=True)(decoder, initial_state=[encoder, encoder])
decoder = TimeDistributed(Dense(output_dict_size, activation="softmax"))(decoder)
model = Model(inputs=[encoder_input, decoder_input], outputs=[decoder])
model.compile(optimizer='adam', loss='binary_crossentropy')
model.fit(
x=[training_encoder_input, training_decoder_input],
y=[training_decoder_output], batch_size=64, epochs=60)