seq2seq data_generator
def data_generator(encoder_input, decoder_input, decoder_target, max_src_len, max_tar_len, src_vocab_size, tar_vocab_size, batch_size=64):
cnt = 1
while cnt <= len(encoder_input)//batch_size:
encoder_input_data = pad_sequences(encoder_input[(cnt-1)*batch_size:cnt*batch_size], maxlen=max_src_len, padding='post')
decoder_input_data = pad_sequences(decoder_input[(cnt-1)*batch_size:cnt*batch_size], maxlen=max_tar_len, padding='post')
decoder_output_data = pad_sequences(decoder_target[(cnt-1)*batch_size:cnt*batch_size], maxlen=max_tar_len, padding='post')
cnt += 1
if cnt == len(encoder_input)//batch_size:
cnt = 1
yield [to_categorical(encoder_input_data,num_classes=src_vocab_size), to_categorical(decoder_input_data,num_classes=tar_vocab_size)], to_categorical(decoder_output_data,num_classes=tar_vocab_size)