Coverage for source/model/model_blue_prints/rnn_blue_print.py: 18%

39 statements  

« prev     ^ index     » next       coverage.py v7.8.0, created at 2025-09-21 11:29 +0000

1# model/model_blue_prints/rnn_blue_print.py 

2 

3# global imports 

4import math 

5from tensorflow.keras import layers, Model 

6from typing import Optional 

7 

8# local imports 

9from source.model import BluePrintBase, ModelAdapterBase, TFModelAdapter 

10 

11class RnnBluePrint(BluePrintBase): 

12 """ 

13 Blueprint for creating an RNN model. 

14 

15 This class implements a model blueprint that constructs a recurrent neural network 

16 (RNN) using LSTM layers. It's designed to process sequential data with temporal 

17 dependencies. 

18 """ 

19 

20 def __init__(self, dense_squeezing_coeff: int = 2, dense_repetition_coeff: int = 1) -> None: 

21 """ 

22 Initializes the RnnBluePrint with the specified configuration parameters. 

23 

24 Parameters: 

25 dense_squeezing_coeff (int): Factor by which dense layer sizes are reduced. 

26 dense_repetition_coeff (int): Number of dense layers of the same size to use. 

27 """ 

28 

29 self.__dense_squeezing_coeff = dense_squeezing_coeff 

30 self.__dense_repetition_coeff = dense_repetition_coeff 

31 

32 def instantiate_model(self, input_shape: tuple[int, int], output_length: int, spatial_data_shape: tuple[int, int], 

33 dense_squeezing_coeff: Optional[int] = None, dense_repetition_coeff: Optional[int] = None) -> ModelAdapterBase: 

34 """ 

35 Creates and returns an RNN model according to specified parameters. 

36 

37 The method constructs a neural network that: 

38 1. Separates the input into spatial and non-spatial components 

39 2. Processes the spatial data through LSTM layers 

40 3. Flattens the LSTM output and concatenates with non-spatial features 

41 4. Passes the combined features through a series of dense layers 

42 5. Produces a softmax output for classification 

43 

44 Parameters: 

45 input_shape (tuple[int, int]): Shape of the input tensor 

46 output_length (int): Number of output classes/actions 

47 spatial_data_shape (tuple[int, int]): Rows and columns to reshape spatial data 

48 dense_squeezing_coeff (int): Factor by which dense layer sizes are reduced 

49 dense_repetition_coeff (int): Number of dense layers of the same size to use 

50 

51 Returns: 

52 Model: Keras model implementing the RNN architecture to be compiled further. 

53 """ 

54 

55 if dense_squeezing_coeff is None: 

56 dense_squeezing_coeff = self.__dense_squeezing_coeff 

57 if dense_repetition_coeff is None: 

58 dense_repetition_coeff = self.__dense_repetition_coeff 

59 

60 spatial_data_rows, spatial_data_cols = spatial_data_shape 

61 spatial_data_length = spatial_data_rows * spatial_data_cols 

62 

63 input_vector = layers.Input((1, input_shape[0])) 

64 reshaped_input_vector = layers.Reshape((input_shape[0],))(input_vector) 

65 spatial_part = layers.Lambda(lambda x: x[:, :spatial_data_length])(reshaped_input_vector) 

66 non_spatial_part = layers.Lambda(lambda x: x[:, spatial_data_length:])(reshaped_input_vector) 

67 reshaped_spatial_part = layers.Reshape((spatial_data_rows, spatial_data_cols))(spatial_part) 

68 

69 rnn_part = layers.LSTM(2 * spatial_data_cols, return_sequences = False)(reshaped_spatial_part) 

70 rnn_part = layers.BatchNormalization()(rnn_part) 

71 

72 concatenated_parts = layers.Concatenate()([rnn_part, non_spatial_part]) 

73 

74 closest_smaller_power_of_coeff = int(math.pow(dense_squeezing_coeff, 

75 int(math.log(concatenated_parts.shape[-1], 

76 dense_squeezing_coeff)))) 

77 dense = layers.Dense(closest_smaller_power_of_coeff, activation = 'relu')(concatenated_parts) 

78 dense = layers.BatchNormalization()(dense) 

79 

80 number_of_nodes = closest_smaller_power_of_coeff // dense_squeezing_coeff 

81 nr_of_dense_layers = int(math.log(closest_smaller_power_of_coeff, dense_squeezing_coeff)) 

82 for _ in range(nr_of_dense_layers): 

83 for _ in range(dense_repetition_coeff): 

84 dense = layers.Dense(number_of_nodes, activation = 'relu')(dense) 

85 dense = layers.BatchNormalization()(dense) 

86 number_of_nodes //= dense_squeezing_coeff 

87 if int(math.log(number_of_nodes, 10)) == int(math.log(output_length, 10)) + 1: 

88 dense = layers.Dropout(0.3)(dense) 

89 elif int(math.log(number_of_nodes, 10)) == int(math.log(output_length, 10)): 

90 break 

91 

92 output = layers.Dense(output_length, activation = 'softmax')(dense) 

93 

94 return TFModelAdapter(Model(inputs = input_vector, outputs = output))