OiO.lk Community platform!

Oio.lk is an excellent forum for developers, providing a wide range of resources, discussions, and support for those in the developer community. Join oio.lk today to connect with like-minded professionals, share insights, and stay updated on the latest trends and technologies in the development field.
  You need to log in or register to access the solved answers to this problem.
  • You have reached the maximum number of guest views allowed
  • Please register below to remove this limitation

ValueError: The layer sequential_2 has never been called and thus has no defined input

  • Thread starter Thread starter apollo_gang
  • Start date Start date
A

apollo_gang

Guest
I have been running different version of the code below to get the activation maps by a certain specified layer for the frames input for predictions. Like i said i tried various different ways but i keep getting the same error. I also tried model(tf.keras.Input((20,64,64 3))) after loading the model. But then i got another error message, saying something like the expected shape and obtained shape do not match. I apologise in advance if the way i presented the issue sounds confusing

Traceback (most recent call last): File "AI_Powered_Deepfake_Detection\activation_map.py", line 93, in plot_activation_maps(model, video_frames, layer_name) File "AI_Powered_Deepfake_Detection\activation_map.py", line 63, in plot_activation_maps activation_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output) ^^^^^^^^^^^ File "AI_Powered_Deepfake_Detection\env\Lib\site-packages\keras\src\ops\operation.py", line 228, in input return self._get_node_attribute_at_index(0, "input_tensors", "input") ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "AI_Powered_Deepfake_Detection\env\Lib\site-packages\keras\src\ops\operation.py", line 259, in _get_node_attribute_at_index raise ValueError( ValueError: The layer sequential_2 has never been called and thus has no defined input.

Below is the code i tried to run. Basically I wanted to get the activation maps of the frames input for predictions by a certain specified layer. Here is the model architecture for reference

#################################################################

Code:
model = Sequential()
model.add(TimeDistributed(Conv2D(16, (3, 3), padding='same',activation = 'relu'), input_shape = (SEQUENCE_LENGTH, IMAGE_HEIGHT, IMAGE_WIDTH, 3)))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))

model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))

model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Dropout(0.25)))

model.add(TimeDistributed(Flatten()))

model.add(LSTM(32)) #changed to 64 from 32

model.add(Dense(len(CLASSES_LIST), activation = 'softmax'))

######################################################################

Code:
model = tf.keras.models.load_model('path_to_model')

def frames_extraction(input_video_file_path, model):

# Initialize the VideoCapture object to read from the video file.
video_reader = cv2.VideoCapture(input_video_file_path)

# Declare a list to store video frames we will extract.
frames_list = []

# Iterating the number of times equal to the fixed length of sequence.
for _ in range(20):

    # Read a frame.
    success, frame = video_reader.read()

    # Check if frame is not read properly then break the loop.
    if not success:
        break

    face_locations = face_recognition.face_locations(frame)

    # Check if any face was detected
    if face_locations:
        # Assuming there's only one face per frame
        top, right, bottom, left = face_locations[0]

        # Crop the face from the image
        face_frame = frame[top:bottom, left:right]

        # Convert BGR to RGB
        face_frame_rgb = cv2.cvtColor(face_frame, cv2.COLOR_BGR2RGB)

        # Resize the Frame to fixed height and width.
        resized_face_frame = cv2.resize(face_frame_rgb, (64, 64))

        # Normalize the resized frame by dividing it with 255 so that each pixel value then lies between 0 and 1
        normalized_face_frame = resized_face_frame / 255

        # Append the normalized frame into the frames list
        frames_list.append(normalized_face_frame)

video_reader.release()

return np.expand_dims(frames_list, axis = 0)


def plot_activation_maps(model, video_frames, layer_name):
'''
This function will plot the activation maps of the specified layer 
for the given video frames.
Args:
    model:        The trained LRCN model.
    video_frames: A list containing the extracted frames from a 
    video.
    layer_name:   The name of the layer for which the activation 
    maps are to be plotted.
'''
# Create a model that outputs the activations of the specified layer.
activation_model = Model(inputs=model.input, 
outputs=model.get_layer(layer_name).output)

# Get the activations for the video frames.
activations = 
activation_model.predict(np.expand_dims(video_frames, axis=0))[0]

# Determine the number of filters in the specified layer.
num_filters = activations.shape[-1]

# Plot the activations for each filter.
for frame_index, frame_activations in enumerate(activations):
    fig, axes = plt.subplots(1, num_filters, figsize=(20, 20))
    fig.suptitle(f'Frame {frame_index + 1} Activations', 
    fontsize=16)
    
    for filter_index in range(num_filters):
        ax = axes[filter_index]
        ax.imshow(frame_activations[:, :, filter_index], 
         cmap='viridis')
        ax.axis('off')
    
    plt.show()



layer_name = 'time_distributed_20'

input_video_file_path = "path_to_video"

video_frames = frames_extraction(input_video_file_path, model)

plot_activation_maps(model, video_frames, layer_name)
<p>I have been running different version of the code below to get the activation maps by a certain specified layer for the frames input for predictions. Like i said i tried various different ways but i keep getting the same error. I also tried <strong>model(tf.keras.Input((20,64,64 3)))</strong> after loading the model. But then i got another error message, saying something like the expected shape and obtained shape do not match. I apologise in advance if the way i presented the issue sounds confusing</p>
<p>Traceback (most recent call last):
File "AI_Powered_Deepfake_Detection\activation_map.py", line 93, in
plot_activation_maps(model, video_frames, layer_name)
File "AI_Powered_Deepfake_Detection\activation_map.py", line 63, in plot_activation_maps
activation_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
^^^^^^^^^^^
File "AI_Powered_Deepfake_Detection\env\Lib\site-packages\keras\src\ops\operation.py", line 228, in input
return self._get_node_attribute_at_index(0, "input_tensors", "input")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "AI_Powered_Deepfake_Detection\env\Lib\site-packages\keras\src\ops\operation.py", line 259, in _get_node_attribute_at_index
raise ValueError(
ValueError: The layer sequential_2 has never been called and thus has no defined input.</p>
<p>Below is the code i tried to run. Basically I wanted to get the activation maps of the frames input for predictions by a certain specified layer. Here is the model architecture for reference</p>
<p>#################################################################</p>
<pre><code>model = Sequential()
model.add(TimeDistributed(Conv2D(16, (3, 3), padding='same',activation = 'relu'), input_shape = (SEQUENCE_LENGTH, IMAGE_HEIGHT, IMAGE_WIDTH, 3)))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))

model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))

model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Dropout(0.25)))

model.add(TimeDistributed(Flatten()))

model.add(LSTM(32)) #changed to 64 from 32

model.add(Dense(len(CLASSES_LIST), activation = 'softmax'))
</code></pre>
<p>######################################################################</p>
<pre><code>model = tf.keras.models.load_model('path_to_model')

def frames_extraction(input_video_file_path, model):

# Initialize the VideoCapture object to read from the video file.
video_reader = cv2.VideoCapture(input_video_file_path)

# Declare a list to store video frames we will extract.
frames_list = []

# Iterating the number of times equal to the fixed length of sequence.
for _ in range(20):

# Read a frame.
success, frame = video_reader.read()

# Check if frame is not read properly then break the loop.
if not success:
break

face_locations = face_recognition.face_locations(frame)

# Check if any face was detected
if face_locations:
# Assuming there's only one face per frame
top, right, bottom, left = face_locations[0]

# Crop the face from the image
face_frame = frame[top:bottom, left:right]

# Convert BGR to RGB
face_frame_rgb = cv2.cvtColor(face_frame, cv2.COLOR_BGR2RGB)

# Resize the Frame to fixed height and width.
resized_face_frame = cv2.resize(face_frame_rgb, (64, 64))

# Normalize the resized frame by dividing it with 255 so that each pixel value then lies between 0 and 1
normalized_face_frame = resized_face_frame / 255

# Append the normalized frame into the frames list
frames_list.append(normalized_face_frame)

video_reader.release()

return np.expand_dims(frames_list, axis = 0)


def plot_activation_maps(model, video_frames, layer_name):
'''
This function will plot the activation maps of the specified layer
for the given video frames.
Args:
model: The trained LRCN model.
video_frames: A list containing the extracted frames from a
video.
layer_name: The name of the layer for which the activation
maps are to be plotted.
'''
# Create a model that outputs the activations of the specified layer.
activation_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)

# Get the activations for the video frames.
activations =
activation_model.predict(np.expand_dims(video_frames, axis=0))[0]

# Determine the number of filters in the specified layer.
num_filters = activations.shape[-1]

# Plot the activations for each filter.
for frame_index, frame_activations in enumerate(activations):
fig, axes = plt.subplots(1, num_filters, figsize=(20, 20))
fig.suptitle(f'Frame {frame_index + 1} Activations',
fontsize=16)

for filter_index in range(num_filters):
ax = axes[filter_index]
ax.imshow(frame_activations[:, :, filter_index],
cmap='viridis')
ax.axis('off')

plt.show()



layer_name = 'time_distributed_20'

input_video_file_path = "path_to_video"

video_frames = frames_extraction(input_video_file_path, model)

plot_activation_maps(model, video_frames, layer_name)
</code></pre>
 
Top