Model
Keras
get the output of middle layer
Since we know keras model needs to be built and compiled before executing compared to eager execution in TF2 or dynamic graph in PyTorch, therefore, we need to build a new model to get the output of the middle layer. It is easier than PyTorch since we can create a sub graph using the original keras model and will not modify the original model.
> [layer.name for layer in tf_model.layers]
# ['Input-Token',
# 'Input-Segment',
# 'Embedding-Token',
# 'Embedding-Segment',
# .......
# 'Transformer-11-FeedForward-Dropout',
# 'Transformer-11-FeedForward-Add',
# 'Transformer-11-FeedForward-Norm',
# **************************************************
# ** in case we want the output before last layer **
# **************************************************
# 'dense']
> layer_model = keras.models.Model(inputs=tf_model.input, outputs=tf_model.get_layer('Transformer-11-FeedForward-Norm').output)
> layer_output = layer_model(x_input)
print the attributes of Model
> type(keras_model)
# keras.engine.functional.Functional
> keras_model.tf_model.get_config()
# {'name': 'model_1',
# 'layers': [{'class_name': 'InputLayer',
# 'config': {'batch_input_shape': (None, None),
# 'dtype': 'float32',
# 'sparse': False,
# 'ragged': False,
# 'name': 'Input-Token'},
# 'name': 'Input-Token',
# 'inbound_nodes': []},
# {'class_name': 'InputLayer',
# 'config': {'batch_input_shape': (None, None),
# 'dtype': 'float32',
# 'sparse': False,
# 'ragged': False,
# 'name': 'Input-Segment'},
# 'name': 'Input-Segment',
# 'inbound_nodes': []},
# ...
# {'class_name': 'Dense',
# 'config': {'name': 'dense',
# 'trainable': True,
# 'dtype': 'float32',
# 'units': 22,
# 'activation': 'softmax',
# 'use_bias': True,
# 'kernel_initializer': {'class_name': 'GlorotUniform',
# 'config': {'seed': None}},
# 'bias_initializer': {'class_name': 'Zeros', 'config': {}},
# 'kernel_regularizer': None,
# 'bias_regularizer': None,
# 'activity_regularizer': None,
# 'kernel_constraint': None,
# 'bias_constraint': None},
# 'name': 'dense',
# 'inbound_nodes': [[['Transformer-11-FeedForward-Norm', 0, 0, {}]]]}],
# 'input_layers': [['Input-Token', 0, 0], ['Input-Segment', 0, 0]],
# 'output_layers': [['dense', 0, 0]]}
> tf_model.summary()
# Model: "model_1"
# __________________________________________________________________________________________________
# Layer (type) Output Shape Param # Connected to
# ==================================================================================================
# Input-Token (InputLayer) [(None, None)] 0 []
# Input-Segment (InputLayer) [(None, None)] 0 []
# Embedding-Token (Embedding) (None, None, 768) 384885504 ['Input-Token[0][0]']
# Embedding-Segment (Embedding) (None, None, 768) 1536 ['Input-Segment[0][0]']
# ................
# dense (Dense) (None, None, 22) 16918 ['Transformer-11-FeedForward-Norm[0][0]']
# ==================================================================================================
# Total params: 470,353,174
# Trainable params: 470,353,174
# Non-trainable params: 0
# __________________________________________________________________________________________________
> tf_model.layers
# [<keras.engine.input_layer.InputLayer at 0x7ff158495370>,
# <keras.engine.input_layer.InputLayer at 0x7fef2a285fd0>,
# <bert4keras.layers.Embedding at 0x7fef2a2b35e0>,
# <bert4keras.layers.Embedding at 0x7fef2a2b3df0>,
# <keras.layers.merge.Add at 0x7fef2a2b3820>,
# <bert4keras.layers.PositionEmbedding at 0x7fef289d2be0>,
# <bert4keras.layers.LayerNormalization at 0x7fe
# ................
# <keras.layers.core.dense.Dense at 0x7ff158495520>]
> [layer.name for layer in tf_model.layers]
# ['Input-Token',
# 'Input-Segment',
# 'Embedding-Token',
# 'Embedding-Segment',
# 'Embedding-Token-Segment',
# 'Embedding-Position',
# 'Embedding-Norm',
# ................
# 'dense']
> type(tf_model.get_layer('dense'))
# keras.layers.core.dense.Dense
> tf_model.get_layer('dense').weights
# [<tf.Variable 'dense/kernel:0' shape=(768, 22) dtype=float32, numpy=
# array([[-0.00010732, -0.0852981 , -0.04779567, ..., -0.04220317,
# ...,
# [-0.05151889, -0.01136582, 0.03139671, ..., 0.00703724,
# <tf.Variable 'dense/bias:0' shape=(22,) dtype=float32, numpy=
# array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
# 0., 0., 0., 0., 0.], dtype=float32)>]
> tf_model.get_layer('dense').variables
# (Return the same as weight in this situation, but they have the different id(method))
Last updated