Skip to main content
. 2024 Feb 29;10:e1895. doi: 10.7717/peerj-cs.1895
from tensorflow.keras.layers import Input, Embedding, Conv1D, GlobalMaxPooling1D, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
# hyperparameter
vocabulary_size = 10,000
max_sequence_length = 100
embedding_size = 100
num_filters = 128
kernel_size = 3
hidden_units = 64
num_classes = 2
input_text = Input(shape=(max_sequence_length,), dtype=‘int32’)
embedding=Embedding(input_dim=vocabulary_size, output_dim=embedding_size, input_length=max_sequence_length)(input_text)
convolution=Conv1D(filters=num_filters,kernel_size=kernel_size,activation=‘relu’)(embedding)
pooling = GlobalMaxPooling1D()(convolution)
dense = Dense(hidden_units, activation=‘relu’)(pooling)
output = Dense(num_classes, activation=‘softmax’)(dense)
model = Model(inputs=input_text, outputs=output)
model.compile(loss=‘categorical_crossentropy’, optimizer=Adam(), metrics=[‘accuracy’])