diff --git a/firmware/firmware.c b/firmware/firmware.c
index 8b66b95f27ed131ecf8f704da5707f422fb29068..bc8835f75d84d9fceeea684ca319d981013091c5 100644
--- a/firmware/firmware.c
+++ b/firmware/firmware.c
@@ -67,8 +67,8 @@ void main()
       INIT,
       // Read mode
       READ,
-      READ_LAYER,
-      READ_LAYER_WEIGHTS,
+      READ_LAYER_NEURON,
+      READ_LAYER_NEURON_VALUE,
       // Write mode
       WRITE,
       WRITE_LAYER,
@@ -83,8 +83,8 @@ void main()
     } command_mode;
     command_mode = START;
 
-    enum layer_type current_layer;
-    enum layer_value_type current_layer_value;
+    enum layer_type current_layer = LAYER_TYPE_NONE;
+    enum layer_value_type current_layer_value = LAYER_VALUE_TYPE_NONE;
     uint32_t current_max_num_values;
     uint32_t current_num_neurons;
 
@@ -140,6 +140,7 @@ void main()
                 }
                 else if(!strcmp(msg,"RESET")) {
                     reset_network();
+                    command_mode = START;
                 }
                 else if(!strcmp(msg,"TRAIN")) {
                     command_mode = TRAIN;
@@ -152,9 +153,6 @@ void main()
                 if(!strcmp(msg,"WEIGHTS")) {
                     init_random_weights();
                 }
-                else if(!strcmp(msg,"RWEIGHTS")) {
-                    init_random_rnn_weights();
-                }
                 else if(!strcmp(msg,"BIAS")) {
                     set_bias_values(1);
                 }
@@ -162,50 +160,51 @@ void main()
             
             case READ:
                 response = "OK";
-                command_mode = READ_LAYER;
+                command_mode = READ_LAYER_NEURON;
                 if(!strcmp(msg,"ENCODER")) {
+                    response = "ENCODER";
                     current_layer = LAYER_TYPE_ENCODER;
-                    current_num_neurons = get_num_neurons(LAYER_TYPE_ENCODER);
+                    current_num_neurons = ENCODER_NEURON_COUNT;
+                    current_max_num_values = ENCODER_WEIGHT_COUNT;
                 }
                 else if(!strcmp(msg,"HIDDEN")) {
+                    response = "HIDDEN";
                     current_layer = LAYER_TYPE_HIDDEN;
-                    current_num_neurons = get_num_neurons(LAYER_TYPE_HIDDEN);
+                    current_num_neurons = HIDDEN_NEURON_COUNT;
+                    current_max_num_values = HIDDEN_WEIGHT_COUNT;
                 }
                 else if(!strcmp(msg,"DECODER")) {
+                    response = "DECODER";
                     current_layer = LAYER_TYPE_DECODER;
-                    current_num_neurons = get_num_neurons(LAYER_TYPE_DECODER);
+                    current_num_neurons = DECODER_NEURON_COUNT;
+                    current_max_num_values = DECODER_WEIGHT_COUNT;
                 }
                 break;
 
-            case READ_LAYER:
-                response = "OK";
-                if(!strcmp(msg,"WEIGHTS")) {
-                    command_mode = READ_LAYER_WEIGHTS;
-                    current_layer_value = LAYER_VALUE_TYPE_WEIGHTS;
-                    current_max_num_values = get_num_values(current_layer, current_layer_value);
-                }
-                else if(!strcmp(msg,"RWEIGHTS")) {
-                    current_layer_value = LAYER_VALUE_TYPE_RNN_WEIGHTS;
-                    command_mode = READ_LAYER_WEIGHTS;
-                    current_max_num_values = get_num_values(current_layer, current_layer_value);
-                }
-                else if(!strcmp(msg,"BIAS")) {
-                    weight_transferred = get_layer_weight(current_layer, LAYER_VALUE_TYPE_BIAS, neuron_idx, 0);
-                    response = itoa(weight_transferred, numstr, 10);
-                }
-                else {
-                    neuron_idx = atoi(numstr);
-                    response = (neuron_idx<current_num_neurons)?msg:"END";
+            case READ_LAYER_NEURON:
+                neuron_idx = atoi(numstr);
+                response = "END";
+                command_mode = START;
+                if(neuron_idx<current_num_neurons) {
+                    response = "OK";
+                    command_mode = READ_LAYER_NEURON_VALUE;
                 }
                 break;
 
-            case READ_LAYER_WEIGHTS:
-                weight_idx = atoi(numstr);
-                response = "END";
-                if(weight_idx<current_max_num_values) {
-                    weight_transferred = get_layer_weight(current_layer, current_layer_value, neuron_idx, weight_idx);
+            case READ_LAYER_NEURON_VALUE:
+                response = "ERROR";
+                if(!strcmp(msg,"BIAS")) {
+                    weight_transferred = get_layer_bias(current_layer, neuron_idx);
                     response = itoa(weight_transferred, numstr, 10);
                 }
+                else {
+                    weight_idx = atoi(numstr);
+                    response = "END";
+                    if(weight_idx<current_max_num_values) {
+                        weight_transferred = get_layer_weight(current_layer, neuron_idx, weight_idx);
+                        response = itoa(weight_transferred, numstr, 10);
+                    }
+                }
                 break;
 
             case WRITE:
@@ -213,15 +212,18 @@ void main()
                 command_mode = WRITE_LAYER;
                 if(!strcmp(msg,"ENCODER")) {
                     current_layer = LAYER_TYPE_ENCODER;
-                    current_num_neurons = get_num_neurons(LAYER_TYPE_ENCODER);
+                    current_num_neurons = ENCODER_NEURON_COUNT;
+                    current_max_num_values = ENCODER_WEIGHT_COUNT;
                 }
                 else if(!strcmp(msg,"HIDDEN")) {
                     current_layer = LAYER_TYPE_HIDDEN;
-                    current_num_neurons = get_num_neurons(LAYER_TYPE_HIDDEN);
+                    current_num_neurons = HIDDEN_NEURON_COUNT;
+                    current_max_num_values = HIDDEN_WEIGHT_COUNT;
                 }
                 else if(!strcmp(msg,"DECODER")) {
                     current_layer = LAYER_TYPE_DECODER;
-                    current_num_neurons = get_num_neurons(LAYER_TYPE_DECODER);
+                    current_num_neurons = DECODER_NEURON_COUNT;
+                    current_max_num_values = DECODER_WEIGHT_COUNT;
                 }
                 break;
 
@@ -230,13 +232,6 @@ void main()
                 if(!strcmp(msg,"WEIGHTS")) {
                     command_mode = WRITE_LAYER_WEIGHTS;
                     current_layer_value = LAYER_VALUE_TYPE_WEIGHTS;
-                    current_max_num_values = get_num_values(current_layer, current_layer_value);
-                    value_write_counter = 0;
-                }
-                else if(!strcmp(msg,"RWEIGHTS")) {
-                    command_mode = WRITE_LAYER_WEIGHTS;
-                    current_layer_value = LAYER_VALUE_TYPE_RNN_WEIGHTS;
-                    current_max_num_values = get_num_values(current_layer, current_layer_value);
                     value_write_counter = 0;
                 }
                 else if(!strcmp(msg,"BIAS")) {
@@ -257,7 +252,7 @@ void main()
                 if(value_write_counter<current_max_num_values) {
                     response = numstr;
                     new_value = atoi(numstr);
-                    set_layer_weight(current_layer, current_layer_value, neuron_idx, value_write_counter, new_value);
+                    set_layer_weight(current_layer, neuron_idx, value_write_counter, new_value);
                     value_write_counter++;
                 }
                 break;
@@ -318,7 +313,7 @@ void main()
             case TRAIN_RUN_SINGLE_EPOCH:
                 epoch_value = atoi(numstr);
                 new_value = run_training_single_epoch(epoch_value, learning_rate, decay_rate, token_series, token_counter);
-                response = new_value?"SUCCESS":"FAILURE";
+                response = itoa(new_value, numstr, 10);
                 break;
 
         }
diff --git a/firmware/include/rnn.h b/firmware/include/rnn.h
index 9fc064b653072342c5ccfc9c427da9e4173c8cf2..9f8fa6d1939980beaef7950c2fbf7e89e1db1eec 100644
--- a/firmware/include/rnn.h
+++ b/firmware/include/rnn.h
@@ -13,26 +13,39 @@
 #define GENERATE_RANDOM_NUMBER 4
 #define RESET_NETWORK 5
 
-#define ENCODER 1<<8;
-#define HIDDEN 2<<8;
-#define DECODER 3<<8;
+#define ENCODER (1<<8)
+#define HIDDEN (2<<8)
+#define DECODER (3<<8)
 
-#define INFERENCE_START 1<<8;
-#define INFERENCE_FETCH 2<<8;
+#define INFERENCE_START (1<<8)
+#define INFERENCE_FETCH (2<<8)
 
-#define TRAINING_SINGLE_SHOT 1<<8;
+#define TRAINING_SINGLE_SHOT (1<<8)
+
+#define ENCODER_WEIGHT_COUNT (NUM_INPUT_SYNAPSES+NUM_INPUT_NEURONS)
+#define HIDDEN_WEIGHT_COUNT (NUM_INPUT_NEURONS+NUM_HIDDEN_NEURONS_H)
+#define DECODER_WEIGHT_COUNT (NUM_HIDDEN_NEURONS_H)
+
+#define ENCODER_MEM_SIZE (ENCODER_WEIGHT_COUNT+2)
+#define HIDDEN_MEM_SIZE (HIDDEN_WEIGHT_COUNT+2)
+#define DECODER_MEM_SIZE (DECODER_WEIGHT_COUNT+2)
+
+#define ENCODER_NEURON_COUNT NUM_INPUT_NEURONS
+#define HIDDEN_NEURON_COUNT (NUM_HIDDEN_NEURONS_W*NUM_HIDDEN_NEURONS_H)
+#define DECODER_NEURON_COUNT NUM_OUTPUT_NEURONS
 
 enum layer_type {
     LAYER_TYPE_ENCODER,
     LAYER_TYPE_HIDDEN,
-    LAYER_TYPE_DECODER
+    LAYER_TYPE_DECODER,
+    LAYER_TYPE_NONE
 };
 
 enum layer_value_type {
     LAYER_VALUE_TYPE_WEIGHTS,
-    LAYER_VALUE_TYPE_RNN_WEIGHTS,
     LAYER_VALUE_TYPE_BIAS,
-    LAYER_VALUE_TYPE_DELTA_W
+    LAYER_VALUE_TYPE_DELTA_W,
+    LAYER_VALUE_TYPE_NONE
 };
 
 /* 
@@ -43,11 +56,26 @@ enum layer_value_type {
  */
 int get_layer_weight(
     enum layer_type layer,
-    enum layer_value_type value_type,
     uint32_t nidx,
     uint32_t vidx
 );
 
+/*
+ * Function for reading bias
+ */
+int get_layer_bias(
+    enum layer_type layer,
+    uint32_t nidx
+);
+
+/*
+ * Function for reading learning rate
+ */
+int get_layer_alpha(
+    enum layer_type layer,
+    uint32_t nidx
+);
+
 /* 
  * Generic function for writing neuron values:
  *
@@ -59,31 +87,28 @@ int get_layer_weight(
  */
 void set_layer_weight(
     enum layer_type layer,
-    enum layer_value_type value_type,
     uint32_t nidx,
     uint32_t vidx,
     int weight
 );
 
-/* 
- * Generic function for determining max amount of values
- *
- * layer: what layer to access (encoder, decoder, hidden)
- * value_type: type (normal weight, RNN weight, bias)
+/*
+ * Function for writing bias
  */
-uint32_t get_num_values(
+void set_layer_bias(
     enum layer_type layer,
-    enum layer_value_type value_type
+    uint32_t nidx,
+    int weight
 );
 
-/* 
- * Generic function for determining amount of neurons
- * in given layer
- *
- * layer: what layer to access (encoder, decoder, hidden)
- * value_type: type (normal weight, RNN weight, bias)
+/*
+ * Function for writing learning rate
  */
-uint32_t get_num_neurons(enum layer_type layer);
+void set_layer_alpha(
+    enum layer_type layer,
+    uint32_t nidx,
+    int weight
+);
 
 /*
  * Run inference
@@ -111,20 +136,11 @@ int get_random_value();
  */
 uint8_t get_random_char();
 
-
-/* Set the deltas for the weights */
-void set_dws(int dw, bool randomize);
-
 /*
  * Initialize random weights
  */
 void init_random_weights();
 
-/*
- * Initialize random RNN weights
- */
-void init_random_rnn_weights();
-
 /*
  * Set all biases to given value
  */
@@ -164,7 +180,7 @@ char* run_training(
  * Returns 0 when successful
  * Returns 1 when unsuccessful
  */
-int run_training_single_epoch(
+uint32_t run_training_single_epoch(
     int epoch,
     uint32_t learning_rate_zero,
     uint32_t decay_rate,
diff --git a/firmware/rnn.c b/firmware/rnn.c
index 921a1ecd817d1e10558912cb599fd377ea7e62ad..b1e5f02cf561da9ddb03aa5513ec2a252c056f1a 100644
--- a/firmware/rnn.c
+++ b/firmware/rnn.c
@@ -4,100 +4,39 @@
  * Acces functions for the encoder layer
  */
 
-#define ENCODER_MEM_SIZE (NUM_INPUT_SYNAPSES+NUM_INPUT_NEURONS+2)
-#define HIDDEN_MEM_SIZE (NUM_INPUT_NEURONS+NUM_HIDDEN_NEURONS_H+2)
-#define DECODER_MEM_SIZE (NUM_HIDDEN_NEURONS_H+NUM_OUTPUT_NEURONS+2)
+#define MEM_SIZE(type) \
+    ({ int MEM_SIZE = 0; switch(type){ \
+         case LAYER_TYPE_ENCODER:           \
+                MEM_SIZE = ENCODER_MEM_SIZE; break;      \
+            case LAYER_TYPE_HIDDEN:       \
+                MEM_SIZE = HIDDEN_MEM_SIZE; break;   \
+            case LAYER_TYPE_DECODER:       \
+                MEM_SIZE = DECODER_MEM_SIZE; break;   \
+         }; MEM_SIZE; })
+
+#define WEIGHT_COUNT(type) \
+    ({ int WEIGHT_COUNT = 0; switch(type){ \
+         case LAYER_TYPE_ENCODER:           \
+                WEIGHT_COUNT = ENCODER_WEIGHT_COUNT; break;       \
+            case LAYER_TYPE_HIDDEN:       \
+                WEIGHT_COUNT = HIDDEN_WEIGHT_COUNT; break;   \
+            case LAYER_TYPE_DECODER:       \
+                WEIGHT_COUNT = DECODER_WEIGHT_COUNT; break;   \
+         }; WEIGHT_COUNT; })
 
 /*
  * Get access command
  */
-uint32_t get_layer_access_cmd(enum layer_type layer)
-{
-    uint32_t ret = 0;
-    switch(layer) {
-        case LAYER_TYPE_ENCODER:
-            ret = ACCESS_VALUES|ENCODER;
-            break;
-        case LAYER_TYPE_HIDDEN:
-            ret = ACCESS_VALUES|HIDDEN;
-            break;
-        case LAYER_TYPE_DECODER:
-            ret = ACCESS_VALUES|DECODER;
-            break;
-    }
-    return ret;
-}
-
-uint32_t get_layer_value_base_addr(
-    enum layer_type layer,
-    uint32_t nidx
-)
-{
-    uint32_t ret = 0;
-    switch(layer) {
-        case LAYER_TYPE_ENCODER:
-            ret = nidx*ENCODER_MEM_SIZE;
-            break;
-        case LAYER_TYPE_HIDDEN:
-            ret = nidx*HIDDEN_MEM_SIZE;
-            break;
-        case LAYER_TYPE_DECODER:
-            ret = nidx*DECODER_MEM_SIZE;
-            break;
-    }
-    return ret;
-}
 
-uint32_t get_layer_rnn_offset(enum layer_type layer)
-{
-    uint32_t ret = 0;
-    switch(layer) {
-        case LAYER_TYPE_ENCODER:
-            ret = NUM_INPUT_SYNAPSES;
-            break;
-        case LAYER_TYPE_HIDDEN:
-            ret = NUM_INPUT_NEURONS;
-            break;
-        case LAYER_TYPE_DECODER:
-            ret = NUM_HIDDEN_NEURONS_H;
-            break;
-    }
-    return ret;
-}
-
-uint32_t get_layer_bias_offset(enum layer_type layer)
-{
-    uint32_t ret = 0;
-    switch(layer) {
-        case LAYER_TYPE_ENCODER:
-            ret = NUM_INPUT_SYNAPSES+NUM_INPUT_NEURONS;
-            break;
-        case LAYER_TYPE_HIDDEN:
-            ret = NUM_INPUT_NEURONS+NUM_HIDDEN_NEURONS_H;
-            break;
-        case LAYER_TYPE_DECODER:
-            ret = NUM_HIDDEN_NEURONS_H+NUM_OUTPUT_NEURONS;
-            break;
-    }
-    return ret;
-}
-
-uint32_t get_layer_dw_offset(enum layer_type layer)
-{
-    uint32_t ret = 0;
-    switch(layer) {
-        case LAYER_TYPE_ENCODER:
-            ret = NUM_INPUT_SYNAPSES+NUM_INPUT_NEURONS+1;
-            break;
-        case LAYER_TYPE_HIDDEN:
-            ret = NUM_INPUT_NEURONS+NUM_HIDDEN_NEURONS_H+1;
-            break;
-        case LAYER_TYPE_DECODER:
-            ret = NUM_HIDDEN_NEURONS_H+NUM_OUTPUT_NEURONS+1;
-            break;
-    }
-    return ret;
-}
+#define ACCESS_CMD(type) \
+    ({ int ACCESS_CMD = ACCESS_VALUES; switch(type){ \
+         case LAYER_TYPE_ENCODER:           \
+                ACCESS_CMD |= ENCODER; break;       \
+            case LAYER_TYPE_HIDDEN:       \
+                ACCESS_CMD |= HIDDEN; break;   \
+            case LAYER_TYPE_DECODER:       \
+                ACCESS_CMD |= DECODER; break;   \
+         }; ACCESS_CMD; })
 
 /* 
  * Generic function for reading neuron values:
@@ -109,31 +48,40 @@ uint32_t get_layer_dw_offset(enum layer_type layer)
  */
 int get_layer_weight(
     enum layer_type layer,
-    enum layer_value_type value_type,
     uint32_t nidx,
     uint32_t vidx
 )
 {
-    uint32_t addr;
-
-    ann_cmd = get_layer_access_cmd(layer);
-    addr = get_layer_value_base_addr(layer, nidx);
-
-    switch(value_type) {
-        case LAYER_VALUE_TYPE_RNN_WEIGHTS:
-            addr += get_layer_rnn_offset(layer);
-            break;
-        case LAYER_VALUE_TYPE_BIAS:
-            addr += get_layer_bias_offset(layer);
-            break;
-    }
-    addr += vidx;
+    if(layer==LAYER_TYPE_NONE) return 0;
 
+    uint32_t addr = MEM_SIZE(layer)*nidx+vidx;
+    ann_cmd = ACCESS_CMD(layer);
     ann_value1 = addr;
-
     return ann_value2;
 }
 
+/*
+ * Function for reading bias
+ */
+int get_layer_bias(
+    enum layer_type layer,
+    uint32_t nidx
+)
+{
+    return get_layer_weight(layer, nidx, WEIGHT_COUNT(layer));
+}
+
+/*
+ * Function for reading learning rate
+ */
+int get_layer_alpha(
+    enum layer_type layer,
+    uint32_t nidx
+)
+{
+    return get_layer_weight(layer, nidx, WEIGHT_COUNT(layer)+1);
+}
+
 /* 
  * Generic function for writing neuron values:
  *
@@ -145,116 +93,39 @@ int get_layer_weight(
  */
 void set_layer_weight(
     enum layer_type layer,
-    enum layer_value_type value_type,
     uint32_t nidx,
     uint32_t vidx,
     int weight
 )
 {
-    uint32_t addr;
-
-    ann_cmd = get_layer_access_cmd(layer);
-    addr = get_layer_value_base_addr(layer, nidx);
-
-    switch(value_type) {
-        case LAYER_VALUE_TYPE_RNN_WEIGHTS:
-            addr += get_layer_rnn_offset(layer);
-            break;
-        case LAYER_VALUE_TYPE_BIAS:
-            addr += get_layer_bias_offset(layer);
-            break;
-        case LAYER_VALUE_TYPE_DELTA_W:
-            addr += get_layer_dw_offset(layer);
-            break;
-    }
-    addr += vidx;
-
+    uint32_t addr = MEM_SIZE(layer)*nidx+vidx;
+    ann_cmd = ACCESS_CMD(layer);
     ann_value1 = addr;
     ann_value2 = weight;
 }
 
 /*
- * Helpers for determining amount of values
+ * Function for writing bias
  */
-uint32_t get_layer_weight_count(enum layer_type layer)
-{
-    uint32_t ret = 0;
-    switch(layer) {
-        case LAYER_TYPE_ENCODER:
-            ret = NUM_INPUT_SYNAPSES;
-            break;
-        case LAYER_TYPE_HIDDEN:
-            ret = NUM_INPUT_NEURONS;
-            break;
-        case LAYER_TYPE_DECODER:
-            ret = NUM_HIDDEN_NEURONS_H;
-            break;
-    }
-    return ret;
-}
-
-uint32_t get_layer_rnn_count(enum layer_type layer)
-{
-    uint32_t ret = 0;
-    switch(layer) {
-        case LAYER_TYPE_ENCODER:
-            ret = NUM_INPUT_NEURONS;
-            break;
-        case LAYER_TYPE_HIDDEN:
-            ret = NUM_HIDDEN_NEURONS_H;
-            break;
-        case LAYER_TYPE_DECODER:
-            ret = NUM_OUTPUT_NEURONS;
-            break;
-    }
-    return ret;
-}
-
-/* 
- * Generic function for determining max amount of values
- *
- * layer: what layer to access (encoder, decoder, hidden)
- * value_type: type (normal weight, RNN weight, bias)
- */
-uint32_t get_num_values(
+void set_layer_bias(
     enum layer_type layer,
-    enum layer_value_type value_type
+    uint32_t nidx,
+    int weight
 )
 {
-    uint32_t ret = 0;
-    switch(value_type) {
-        case LAYER_VALUE_TYPE_WEIGHTS:
-            ret = get_layer_weight_count(layer);
-            break;
-        case LAYER_VALUE_TYPE_RNN_WEIGHTS:
-            ret = get_layer_rnn_count(layer);
-            break;
-    }
-    return ret;
+    set_layer_weight(layer, nidx,  WEIGHT_COUNT(layer), weight);
 }
 
-/* 
- * Generic function for determining amount of neurons
- * in given layer
- *
- * layer: what layer to access (encoder, decoder, hidden)
- * value_type: type (normal weight, RNN weight, bias)
+/*
+ * Function for writing learning rate
  */
-uint32_t get_num_neurons(enum layer_type layer)
+void set_layer_alpha(
+    enum layer_type layer,
+    uint32_t nidx,
+    int weight
+)
 {
-    uint32_t ret = 0;
-    switch(layer) {
-        case LAYER_TYPE_ENCODER:
-            ret = NUM_INPUT_NEURONS;
-            break;
-        case LAYER_TYPE_HIDDEN:
-            ret = NUM_HIDDEN_NEURONS_W*NUM_HIDDEN_NEURONS_H;
-            break;
-        case LAYER_TYPE_DECODER:
-            ret = NUM_OUTPUT_NEURONS;
-            break;
-    }
-    return ret;
+    set_layer_weight(layer, nidx,  WEIGHT_COUNT(layer)+1, weight);
 }
 
 /*
@@ -323,16 +194,14 @@ void set_alpha(int alpha)
 {
     int i, x, y;
 
-    for(i=0;i<NUM_INPUT_NEURONS;i++) {
-        set_layer_weight(LAYER_TYPE_ENCODER, LAYER_VALUE_TYPE_DELTA_W, i, 0, alpha);
+    for(i=0;i<ENCODER_NEURON_COUNT;i++) {
+        set_layer_alpha(LAYER_TYPE_ENCODER, i, alpha);
     }
-    for(x=0;x<NUM_HIDDEN_NEURONS_W;x++) {
-        for(y=0;y<NUM_HIDDEN_NEURONS_H;y++) {
-            set_layer_weight(LAYER_TYPE_HIDDEN,LAYER_VALUE_TYPE_DELTA_W, x*NUM_HIDDEN_NEURONS_H+y, 0, alpha);
-        }
-    }    
-    for(i=0;i<NUM_OUTPUT_NEURONS;i++) {
-        set_layer_weight(LAYER_TYPE_DECODER,LAYER_VALUE_TYPE_DELTA_W, i, 0, alpha);
+    for(i=0;i<HIDDEN_NEURON_COUNT;i++) {
+        set_layer_alpha(LAYER_TYPE_HIDDEN, i, alpha);
+    }
+    for(i=0;i<DECODER_NEURON_COUNT;i++) {
+        set_layer_alpha(LAYER_TYPE_DECODER, i, alpha);
     }
 }
 
@@ -342,32 +211,21 @@ void set_alpha(int alpha)
 void init_random_weights()
 {
     int i, j, x, y;
-    for(i=0;i<NUM_INPUT_NEURONS;i++)
-        for(j=0;j<NUM_INPUT_SYNAPSES;j++)
-            set_layer_weight(LAYER_TYPE_ENCODER,LAYER_VALUE_TYPE_WEIGHTS,i, j, get_random_value());
-    for(i=0;i<NUM_HIDDEN_NEURONS_W*NUM_HIDDEN_NEURONS_H;i++)
-        for(j=0;j<NUM_INPUT_NEURONS;j++)
-            set_layer_weight(LAYER_TYPE_HIDDEN,LAYER_VALUE_TYPE_WEIGHTS,i, j, get_random_value());
-    for(i=0;i<NUM_OUTPUT_NEURONS;i++)
-        for(j=0;j<NUM_HIDDEN_NEURONS_H;j++)
-            set_layer_weight(LAYER_TYPE_DECODER,LAYER_VALUE_TYPE_WEIGHTS,i, j, get_random_value());
-}
-
-/*
- * Initialize random RNN weights
- */
-void init_random_rnn_weights()
-{
-    int i, j, x, y;
-    for(i=0;i<NUM_INPUT_NEURONS;i++)
-        for(j=0;j<NUM_INPUT_NEURONS;j++)
-            set_layer_weight(LAYER_TYPE_ENCODER,LAYER_VALUE_TYPE_RNN_WEIGHTS,i, j, get_random_value());
-    for(i=0;i<NUM_HIDDEN_NEURONS_W*NUM_HIDDEN_NEURONS_H;i++)
-        for(j=0;j<NUM_HIDDEN_NEURONS_H;j++)
-            set_layer_weight(LAYER_TYPE_HIDDEN,LAYER_VALUE_TYPE_RNN_WEIGHTS,i , j, get_random_value());
-    for(i=0;i<NUM_OUTPUT_NEURONS;i++)
-        for(j=0;j<NUM_OUTPUT_NEURONS;j++)
-            set_layer_weight(LAYER_TYPE_DECODER,LAYER_VALUE_TYPE_RNN_WEIGHTS,i, j, get_random_value());
+    for(i=0;i<ENCODER_NEURON_COUNT;i++) {
+        for(j=0;j<ENCODER_WEIGHT_COUNT;j++) {
+            set_layer_weight(LAYER_TYPE_ENCODER,i, j, get_random_value());
+        }
+    }
+    for(i=0;i<HIDDEN_NEURON_COUNT;i++) {
+        for(j=0;j<HIDDEN_WEIGHT_COUNT;j++) {
+            set_layer_weight(LAYER_TYPE_HIDDEN,i, j, get_random_value());
+        }
+    }
+    for(i=0;i<DECODER_NEURON_COUNT;i++) {
+        for(j=0;j<DECODER_WEIGHT_COUNT;j++) {
+            set_layer_weight(LAYER_TYPE_DECODER,i, j, get_random_value());
+        }
+    }
 }
 
 /*
@@ -376,14 +234,14 @@ void init_random_rnn_weights()
 void set_bias_values(int bias)
 {
     int i, x, y;
-    for(i=0;i<NUM_INPUT_NEURONS;i++) {
-        set_layer_weight(LAYER_TYPE_ENCODER,LAYER_VALUE_TYPE_BIAS, i, 0, bias);
+    for(i=0;i<ENCODER_NEURON_COUNT;i++) {
+        set_layer_bias(LAYER_TYPE_ENCODER, i, bias);
     }
-    for(i=0;i<NUM_HIDDEN_NEURONS_W*NUM_HIDDEN_NEURONS_H;i++) {
-        set_layer_weight(LAYER_TYPE_HIDDEN,LAYER_VALUE_TYPE_BIAS, i, 0, bias);
+    for(i=0;i<HIDDEN_NEURON_COUNT;i++) {
+        set_layer_bias(LAYER_TYPE_HIDDEN, i, bias);
     }
-    for(i=0;i<NUM_OUTPUT_NEURONS;i++) {
-        set_layer_weight(LAYER_TYPE_DECODER,LAYER_VALUE_TYPE_BIAS, i, 0, bias);
+    for(i=0;i<DECODER_NEURON_COUNT;i++) {
+        set_layer_bias(LAYER_TYPE_DECODER, i, bias);
     }
 }
 
@@ -398,7 +256,7 @@ void set_bias_values(int bias)
  * Returns 0 when successful
  * Returns 1 when unsuccessful
  */
-int run_training_single_epoch(
+uint32_t run_training_single_epoch(
     int epoch,
     uint32_t learning_rate_zero,
     uint32_t decay_rate,
@@ -425,18 +283,18 @@ int run_training_single_epoch(
      * Check whether the network has already been trained
      * Set return value to zero and exit the loop if so
      */
-    if(last_val==y) {
-        return 1;
+    train_mask = last_val ^ y;
+    if(!train_mask) {
+        return train_mask;
     }
     /*
      * Determine the training mask my finding out which
      * neurons misfired or didn't fire although they should have
      */
-    train_mask = last_val ^ y;
     set_alpha(learning_rate_zero/(1+(decay_rate*epoch)));
     mask_back_propgatation(train_mask);
     
-    return 0;
+    return train_mask;
 }
 
 
@@ -523,7 +381,7 @@ char* run_training(
          * there's no point in training for a further token
          * in the time series.
          */
-        if(ret) break;
+        if(!ret) break;
         /*
          * If we haven't terminated the loop by now, it means we're
          * still game
diff --git a/src/py/fac_tools.py b/src/py/fac_tools.py
index f6f6293ed74d2fa0ea6cd9b9e6b077a3b0573b4f..624eb9d1673aa78e5cec32f026ea631ff98d6e46 100644
--- a/src/py/fac_tools.py
+++ b/src/py/fac_tools.py
@@ -48,17 +48,25 @@ def get_weight(server, layer, wtype, nidx, vidx):
     run_command(server,"DONE")
     return ret
 
-def extract_values(server, layer, wtype):
-    arr = []
+def extract_values(server, layer):
+    arr=[]
     i=0
     while True:
+        retd={}
+
         run_command(server,"READ")
         run_command(server,layer)
-        if "END" in run_command(server,str(i)):
-            break
-        i=i+1
-        run_command(server,wtype)
+        ret = run_command(server,str(i))
+        if "END" in ret:
+            break        
+        retd['BIAS'] = int(run_command(server,"BIAS"))
+        run_command(server,"DONE")
 
+        run_command(server,"READ")
+        run_command(server,layer)
+        ret = run_command(server,str(i))
+        if "END" in ret:
+            break
         j=0
         sarr=[]
         while True:
@@ -67,9 +75,13 @@ def extract_values(server, layer, wtype):
                 break
             sarr.append(int(ret))
             j=j+1
-        arr.append(sarr)
 
+        retd['WEIGHTS']=sarr
         run_command(server,"DONE")
+
+        arr.append(retd)
+        i=i+1
+
     return arr
 
 def extract_single_value(server, layer, wtype):
@@ -86,70 +98,46 @@ def extract_single_value(server, layer, wtype):
         run_command(server,"DONE")
     return arr
 
-def fetch_all_values(server, layer):
-    ret={}
-    ret['weights'] = extract_values(server,layer,"WEIGHTS")
-    ret['rnn_weights'] = extract_values(server,layer,"RWEIGHTS")
-    ret['biases'] = extract_single_value(server,layer,"BIAS")
-    return ret
-
 def dump_neural_network(server):
     weights_and_biases = {}
-    weights_and_biases['encoder'] = fetch_all_values(server,"ENCODER")
-    weights_and_biases['hidden'] = fetch_all_values(server,"HIDDEN")
-    weights_and_biases['decoder'] = fetch_all_values(server,"DECODER")
+    weights_and_biases['ENCODER'] = extract_values(server,"ENCODER")
+    weights_and_biases['HIDDEN'] = extract_values(server,"HIDDEN")
+    weights_and_biases['DECODER'] = extract_values(server,"DECODER")
     return weights_and_biases
 
+def write_neuron(server, layer_name, nidx, neuron):
+    # Write bias
+    run_command(server,"WRITE")
+    run_command(server,layer_name)
+    run_command(server,str(nidx))
+    run_command(server,"BIAS")
+    run_command(server,str(neuron["BIAS"]))
+    run_command(server,"DONE")
+    # Write weights
+    run_command(server,"WRITE")
+    run_command(server,layer_name)
+    run_command(server,str(nidx))
+    run_command(server,"WEIGHTS")
+    for w in neuron["WEIGHTS"]:
+        run_command(server,str(w))
+    run_command(server,"DONE")
 
-def write_value_array(server, layer, wtype, vals):
-    i=0
-    for val in vals:
-        run_command(server,"WRITE")
-        run_command(server,layer)
-        if "END" in run_command(server,str(i)):
-            break
-        i=i+1
-        run_command(server,wtype)
-        ret = run_command(server, str(val))
-        if "END" in ret:
-            break
-        run_command(server,"DONE")
-
-def write_value_multi_array(server, layer, wtype, valar):
-    i=0
-    for vals in valar:
-        run_command(server,"WRITE")
-        run_command(server,layer)
-        if "END" in run_command(server,str(i)):
-            break
-        i=i+1
-        run_command(server,wtype)
-        for v in vals:
-            ret = run_command(server, str(v))
-            if "END" in ret:
-                break
-        run_command(server,"DONE")
+def write_layer(server, layer_name, layer):
+    nidx=0
+    for neuron in layer:
+        write_neuron(server, layer_name, nidx, neuron)
+        nidx=nidx+1
 
 def load_weights_and_biases(server,weights_and_biases):
-    write_value_array(server, "ENCODER", "BIAS", weights_and_biases['encoder']['biases'])
-    write_value_multi_array(server, "ENCODER", "WEIGHTS", weights_and_biases['encoder']['weights'])
-    write_value_multi_array(server, "ENCODER", "RWEIGHTS", weights_and_biases['encoder']['rnn_weights'])
-    write_value_array(server, "DECODER", "BIAS", weights_and_biases['decoder']['biases'])
-    write_value_multi_array(server, "DECODER", "WEIGHTS", weights_and_biases['decoder']['weights'])
-    write_value_multi_array(server, "DECODER", "RWEIGHTS", weights_and_biases['decoder']['rnn_weights'])
-    write_value_array(server, "HIDDEN", "BIAS", weights_and_biases['hidden']['biases'])
-    write_value_multi_array(server, "HIDDEN", "WEIGHTS", weights_and_biases['hidden']['weights'])
-    write_value_multi_array(server, "HIDDEN", "RWEIGHTS", weights_and_biases['hidden']['rnn_weights'])
+    write_layer(server, "ENCODER", weights_and_biases['ENCODER'])
+    write_layer(server, "HIDDEN", weights_and_biases['HIDDEN'])
+    write_layer(server, "DECODER", weights_and_biases['DECODER'])
 
 def init_weights_and_biases(server):
     run_command(server,"INIT")
     run_command(server,"WEIGHTS")
     run_command(server,"INIT")
-    run_command(server,"RWEIGHTS")
-    run_command(server,"INIT")
     run_command(server,"BIAS")
-    run_command(server,"TRAIN")
-    run_command(server,"LEARNING_RATE")
 
 def train_token_series(server, tokens, epochs):
     run_command(server,"TRAIN")
@@ -162,5 +150,6 @@ def train_token_series(server, tokens, epochs):
     for i in range(0,epochs):
         print("Training epoch: ",i)
         ret = run_command(server,str(i))
-        if "SUCCESS" in ret:
+        print("Error: ", ret)
+        if "0" == ret:
             break
diff --git a/src/py/tty1.py b/src/py/tty1.py
index 0fbaa8f0b8653dbe751b3d356c1582fd958a1fa2..79c6a30f01337f67783a0db65498a91d4bc513da 100644
--- a/src/py/tty1.py
+++ b/src/py/tty1.py
@@ -1,7 +1,6 @@
 import json
 
 from fac_tools import run_command
-from fac_tools import fetch_all_values
 from fac_tools import get_fac_wrapper
 from fac_tools import dump_neural_network
 from fac_tools import init_weights_and_biases
diff --git a/src/py/tty3.py b/src/py/tty3.py
index 116ef4735f1ea12f26b8b02bf09a42b50c658857..ab8091d15d3c1c121ff0704a9589649e9b2a9782 100644
--- a/src/py/tty3.py
+++ b/src/py/tty3.py
@@ -19,7 +19,7 @@ server = get_fac_wrapper("telnet")
 
 INTMAX=2147483647
 lr=0.95
-decay_rate=100
+decay_rate=1
 
 run_command(server,"HELLO")
 
@@ -31,6 +31,9 @@ init_weights_and_biases(server)
 # this means we have to do the math here and multiply
 # the maximum integer value by our desired learning rate
 lr=lr*INTMAX
+
+run_command(server,"TRAIN")
+run_command(server,"LEARNING_RATE")
 run_command(server,str(int(lr)))
 
 run_command(server,"TRAIN")
@@ -43,23 +46,21 @@ run_command(server,"DECAY_RATE")
 # α=(1/(1+decayRate×epochNumber))*​α0
 run_command(server,str(decay_rate))
 
+max_epochs=1000
 # Priming phase!
 # Upload and train token pairs first
-train_token_series(server, tokens[0:2], 20000)
-train_token_series(server, tokens[1:3], 20000)
-train_token_series(server, tokens[2:4], 20000)
+train_token_series(server, tokens[0:2], max_epochs)
+run_command(server,"DONE")
+train_token_series(server, tokens[1:3], max_epochs)
+run_command(server,"DONE")
+train_token_series(server, tokens[2:4], max_epochs)
+run_command(server,"DONE")
 
-'''
 # Upload token series
-run_command(server,"TRAIN")
-run_command(server,"TOKENS")
-for tok in tokens:
-    run_command(server,str(tok))
+train_token_series(server, tokens[0:3], max_epochs)
+run_command(server,"DONE")
+train_token_series(server, tokens[0:4], max_epochs)
 run_command(server,"DONE")
-run_command(server,"TRAIN")
-run_command(server,"RUN_EPOCHS")
-run_command(server,str(1000))
-'''
 
 # Store the weights and biases
 weights_and_biases = dump_neural_network(server)
diff --git a/src/rtl/fac69.v b/src/rtl/fac69.v
deleted file mode 100644
index 7c5ec1150de5294182a98cdd9f5a699587ba0947..0000000000000000000000000000000000000000
--- a/src/rtl/fac69.v
+++ /dev/null
@@ -1,11 +0,0 @@
-// A 4 bit RCA, can be extended to 8 bit
-module fac69(tdi, tdo, tms, tck, trst);
-  input tdi, tms, tck, trst;
-  output tdo;
-
-  /*jfulladder jfa0(Y[0],c1,A[0],B[0],carryin);
-  jfulladder jfa1(Y[1],c2,A[1],B[1],c1);
-  jfulladder jfa2(Y[2],c3,A[2],B[2],c2);
-  jfulladder jfa3(Y[3],carryout,A[3],B[3],c3);*/
-  
-endmodule
diff --git a/src/rtl/layer.sv b/src/rtl/layer.sv
index 6c42f712eea96111842d993c6b1b53292bd8ad8d..566d060ef23417fdcdb6e46b03862a87645c1ed7 100644
--- a/src/rtl/layer.sv
+++ b/src/rtl/layer.sv
@@ -1,6 +1,7 @@
 module layer #(
     parameter NUMBER_SYNAPSES = 8,
-    parameter NUMBER_NEURONS = 8
+    parameter NUMBER_NEURONS = 8,
+    parameter IS_RNN = 1
 )
 (
     clk,
@@ -25,8 +26,8 @@ module layer #(
     data_i,
     data_o
 );
-    parameter values_neuron = NUMBER_SYNAPSES+NUMBER_NEURONS+2;
-    parameter total_neurons = NUMBER_NEURONS*values_neuron;
+    parameter values_neuron = IS_RNN?NUMBER_SYNAPSES+NUMBER_NEURONS+2:NUMBER_SYNAPSES+2;
+    parameter total_neuron_values = NUMBER_NEURONS*values_neuron;
 
     input clk;
     input rst;
@@ -44,27 +45,26 @@ module layer #(
     // Data interface
     input write_enable;
     input read_enable;
-    input [$clog2(total_neurons+1):0] address;
+    input [$clog2(total_neuron_values)+1:0] address;
     input [31:0] data_i;
     output reg[31:0] data_o;
     output reg read_done;
     output reg write_done;
 
-    genvar gv1, gv2;
     wire[NUMBER_NEURONS-1:0][NUMBER_SYNAPSES-1:0] backprop_out_port_array;
     wire[NUMBER_SYNAPSES-1:0][NUMBER_NEURONS-1:0] backprop_out_port_array_flipped;
 
     generate
-        for(gv1=0;gv1<NUMBER_NEURONS;gv1=gv1+1) begin : flip_outer_loop
-            for(gv2=0;gv2<NUMBER_SYNAPSES;gv2=gv2+1) begin : flip_inner_loop
+        for(genvar gv1=0;gv1<NUMBER_NEURONS;gv1=gv1+1) begin : flip_outer_loop
+            for(genvar gv2=0;gv2<NUMBER_SYNAPSES;gv2=gv2+1) begin : flip_inner_loop
                 assign backprop_out_port_array_flipped[gv2][gv1] = backprop_out_port_array[gv1][gv2];
             end
         end
     endgenerate
 
     generate
-        for(gv2=0;gv2<NUMBER_SYNAPSES;gv2=gv2+1) begin : backprop_loop
-            assign backprop_out_port[gv2] = |backprop_out_port_array_flipped[gv2];
+        for(genvar gv=0;gv<NUMBER_SYNAPSES;gv=gv+1) begin : backprop_loop
+            assign backprop_out_port[gv] = |backprop_out_port_array_flipped[gv];
         end
     endgenerate
     
@@ -74,23 +74,23 @@ module layer #(
     wire[NUMBER_NEURONS-1:0] backprop_done_array;
     assign backprop_done = &backprop_done_array;
 
-    wire [NUMBER_NEURONS-1:0][values_neuron+1:0]translated_address;
+    wire [NUMBER_NEURONS-1:0][$clog2(values_neuron)+1:0]translated_address;
     generate
-        for(gv=0;gv<NUMBER_NEURONS;gv=gv+1) begin : translate_addr
+        for(genvar gv=0;gv<NUMBER_NEURONS;gv=gv+1) begin : translate_addr
             assign translated_address[gv] = address-gv*values_neuron;
         end
     endgenerate
 
     wire [NUMBER_NEURONS-1:0] read_enable_array;
     generate
-        for(gv=0;gv<NUMBER_NEURONS;gv=gv+1) begin : re_loop
+        for(genvar gv=0;gv<NUMBER_NEURONS;gv=gv+1) begin : re_loop
             assign read_enable_array[gv] = (translated_address[gv]<values_neuron) && read_enable;
         end
     endgenerate
 
     wire [NUMBER_NEURONS-1:0] write_enable_array;
     generate
-        for(gv=0;gv<NUMBER_NEURONS;gv=gv+1) begin : we_loop
+        for(genvar gv=0;gv<NUMBER_NEURONS;gv=gv+1) begin : we_loop
             assign write_enable_array[gv] = (translated_address[gv]<values_neuron) && write_enable;
         end
     endgenerate
@@ -119,13 +119,11 @@ module layer #(
         if(write_done && !write_enable) write_done <= 0;
     end
 
-    genvar gv;
     generate
-        for(gv=0;gv<NUMBER_NEURONS;gv=gv+1) begin : neurons
+        for(genvar gv=0;gv<NUMBER_NEURONS;gv=gv+1) begin : neurons
 
             neuron #(
-                .NUMBER_SYNAPSES(NUMBER_SYNAPSES),
-                .NUMBER_RNN_SYNAPSES(NUMBER_NEURONS)
+                .NUMBER_SYNAPSES(IS_RNN?NUMBER_SYNAPSES+NUMBER_NEURONS:NUMBER_SYNAPSES)
             )
             ut(
                 .clk(clk),
@@ -134,12 +132,11 @@ module layer #(
                 .run_inference(run_inference),
                 .inference_done(inference_done_array[gv]),
                 // The data I/O
-                .neuron_inputs(layer_inputs),
+                .neuron_inputs(IS_RNN?{layer_inputs,layer_outputs}:layer_inputs),
                 .neuron_output(layer_outputs[gv]),
-                .rnn_inputs(layer_outputs),
                 // Backprop
                 .backprop_out_port(backprop_out_port_array[gv]),
-                .backprop_in_port(backprop_in_port),
+                .backprop_in_port(backprop_in_port[gv]),
                 .backprop_done(backprop_done_array[gv]),
                 .backprop_enable(backprop_enable),
                 // Data interface
diff --git a/src/rtl/network.v b/src/rtl/network.v
index cb88f255b81a47da2be71e321ac787b1ebed43b2..56c2f826a7614fa992d2acabd786449d9cd0926b 100644
--- a/src/rtl/network.v
+++ b/src/rtl/network.v
@@ -132,7 +132,8 @@ module network
     // Encoder
     layer #(
         .NUMBER_SYNAPSES(`NUM_INPUT_SYNAPSES),
-        .NUMBER_NEURONS(`NUM_INPUT_NEURONS)
+        .NUMBER_NEURONS(`NUM_INPUT_NEURONS),
+        .IS_RNN(1)
     )
     encoder(
         .clk(clk),
@@ -188,7 +189,8 @@ module network
     // Decoder
     layer #(
         .NUMBER_SYNAPSES(`NUM_HIDDEN_NEURONS_H),
-        .NUMBER_NEURONS(`NUM_OUTPUT_NEURONS)
+        .NUMBER_NEURONS(`NUM_OUTPUT_NEURONS),
+        .IS_RNN(0)
     )
     decoder(
         .clk(clk),
diff --git a/src/rtl/network_controller.sv b/src/rtl/network_controller.sv
index 2b015e17c1d80a1ec59d220ceedb2182355d4f90..f954b59e6fa4ce670d7bc0c3eeb545860cc942df 100644
--- a/src/rtl/network_controller.sv
+++ b/src/rtl/network_controller.sv
@@ -11,9 +11,9 @@
 `define RESET_NETWORK 5
 
 // Components:
-`define COMPONENT_ENCODER 1
-`define COMPONENT_HIDDEN 2
-`define COMPONENT_DECODER 3
+`define COMPONENT_ENCODER 8'h1
+`define COMPONENT_HIDDEN 8'h2
+`define COMPONENT_DECODER 8'h3
 
 // Inference commands
 `define INFERENCE_OPTION_START 1
@@ -139,8 +139,8 @@ module network_controller #(
     input decoder_write_done;
     
     // Access engine
-    reg [3:0] command;
-    reg [3:0] option;
+    reg [7:0] command;
+    reg [7:0] option;
 
     // Random number generator
     wire [31:0] lfsr_data;
@@ -211,13 +211,13 @@ module network_controller #(
             end
             else
             if (wr_enable && (mem_addr==`REGISTER_VALUE1) && (command==`RUN_INFERENCE) && (option==`INFERENCE_OPTION_START)  ) begin
-                encoder_input <= mem_data_i;
+                encoder_input <= mem_data_i[NUMBER_SYNAPSES-1:0];
                 mem_ready <= 1;
                 run_inference <= 1;
             end
             else
             if (wr_enable && (mem_addr==`REGISTER_VALUE1) && (command==`RUN_TRAINING) && (option==`TRAINING_SINGLE_SHOT)  ) begin
-                backprop_port <= mem_data_i;
+                backprop_port <= mem_data_i[NUMBER_OUTPUT_NEURONS-1:0];
                 mem_ready <= 1;
                 backprop_enable <= 1;
             end
diff --git a/src/rtl/neuron.sv b/src/rtl/neuron.sv
index 6895e96a1c08e4f1a324f68063380deb8394e346..42ab22d9c62b015c7e3a959c29eb2b2edfb5807b 100644
--- a/src/rtl/neuron.sv
+++ b/src/rtl/neuron.sv
@@ -10,8 +10,7 @@ when f(w,x) > 0, then the output spikes
 
 */
 module neuron #(
-    parameter NUMBER_SYNAPSES = 8,
-    parameter NUMBER_RNN_SYNAPSES = 8
+    parameter NUMBER_SYNAPSES = 8
 )
 (
     clk,
@@ -20,7 +19,6 @@ module neuron #(
     run_inference,
     inference_done,
     // The layer i/o
-    rnn_inputs,
     neuron_inputs,
     neuron_output,
     // Backprop
@@ -37,14 +35,11 @@ module neuron #(
     data_i,
     data_o
 );
-    parameter NUM_WEIGHTS = NUMBER_SYNAPSES+NUMBER_RNN_SYNAPSES;
-
     input clk;
     input rst;
     input run_inference;
     output reg inference_done;
     input[NUMBER_SYNAPSES-1:0] neuron_inputs;
-    input[NUMBER_RNN_SYNAPSES-1:0] rnn_inputs;
     output reg neuron_output;
 
     output reg write_done;
@@ -53,25 +48,21 @@ module neuron #(
     // Data interface
     input write_enable;
     input read_enable;
-    input [$clog2(NUMBER_SYNAPSES+NUMBER_RNN_SYNAPSES+2):0] address;
+    input [$clog2(NUMBER_SYNAPSES+2)+1:0] address;
     input [31:0] data_i;
     output reg [31:0] data_o;
 
     // Backprop
     output reg [NUMBER_SYNAPSES-1:0] backprop_out_port; // tell the other neurons that they have to better themselves
-    input [NUMBER_SYNAPSES-1:0] backprop_in_port; // Constructive criticism for the neuron
+    input backprop_in_port; // Constructive criticism for the neuron
     output reg backprop_done;
     input backprop_enable;
     reg backprop_running;
 
     reg signed[NUMBER_SYNAPSES-1:0][31:0] W; // summands
-    reg signed[NUMBER_RNN_SYNAPSES-1:0][31:0] RNNW; // RNN summands
     reg signed[31:0] bias; // summands
     reg signed[31:0] dw; // weight correction
 
-    reg [NUMBER_SYNAPSES-1:0] backprop_in_port_store; // store values
-    reg [NUMBER_RNN_SYNAPSES-1:0] rnn_inputs_store;
-
     genvar gv;
     integer regidx;
 
@@ -104,29 +95,18 @@ module neuron #(
             // Adjust normal weights
             if( regidx < NUMBER_SYNAPSES )
             begin
-                if(backprop_in_port_store[regidx] && neuron_output) begin
+                if( backprop_in_port && neuron_output && neuron_inputs[regidx] ) begin
                     W[regidx] <= $signed(W[regidx]) - $signed(dw);
                 end
                 else
-                if(backprop_in_port_store[regidx] && !neuron_output) begin
+                if( backprop_in_port && !neuron_output && neuron_inputs[regidx] ) begin
                     W[regidx] <= $signed(W[regidx]) + $signed(dw);
                 end
-                regidx <= regidx+1;
-            end
-            // Adjust RNN weights
-            else if( regidx < NUM_WEIGHTS )
-            begin
-                if(rnn_inputs[regidx] && neuron_output) begin
-                    RNNW[regidx-NUMBER_SYNAPSES] <= $signed(RNNW[regidx-NUMBER_SYNAPSES]) - $signed(dw);
-                end
-                else
-                if(rnn_inputs[regidx] && !neuron_output) begin
-                    RNNW[regidx-NUMBER_SYNAPSES] <= $signed(RNNW[regidx-NUMBER_SYNAPSES]) + $signed(dw);
-                end
+                backprop_out_port[regidx] <= backprop_in_port ^ neuron_inputs[regidx];
                 regidx <= regidx+1;
             end
             // Reset the redister index counter
-            else if( regidx == NUM_WEIGHTS )
+            else if( regidx == NUMBER_SYNAPSES )
             begin
                 //$display("Backprop done");
                 backprop_done <= 1;
@@ -138,8 +118,7 @@ module neuron #(
         else
         if( !backprop_done && backprop_enable && !run_inference )
         begin
-            backprop_out_port <= neuron_inputs;
-            backprop_in_port_store <= backprop_in_port;
+            backprop_out_port <= 0;
             backprop_running <= 1;
             regidx <= 0;
         end
@@ -151,15 +130,11 @@ module neuron #(
                 W[address] <= data_i;
             end
             else
-            if(address<(NUMBER_SYNAPSES+NUMBER_RNN_SYNAPSES)) begin
-                RNNW[address-NUMBER_SYNAPSES] <= data_i;
-            end
-            else
-            if(address<(NUMBER_SYNAPSES+NUMBER_RNN_SYNAPSES+1)) begin
+            if(address<(NUMBER_SYNAPSES+1)) begin
                 bias <= data_i;
             end
             else
-            if(address<(NUMBER_SYNAPSES+NUMBER_RNN_SYNAPSES+2)) begin
+            if(address<(NUMBER_SYNAPSES+2)) begin
                 dw <= data_i;
             end
             write_done <= 1;
@@ -172,15 +147,11 @@ module neuron #(
                 data_o <= W[address];
             end
             else
-            if(address<(NUMBER_SYNAPSES+NUMBER_RNN_SYNAPSES)) begin
-                data_o <= RNNW[address-NUMBER_SYNAPSES];
-            end
-            else
-            if(address<(NUMBER_SYNAPSES+NUMBER_RNN_SYNAPSES+1)) begin
+            if(address<(NUMBER_SYNAPSES+1)) begin
                 data_o <= bias;
             end
             else
-            if(address<(NUMBER_SYNAPSES+NUMBER_RNN_SYNAPSES+2)) begin
+            if(address<(NUMBER_SYNAPSES+2)) begin
                 data_o <= dw;
             end
             read_done <= 1;
@@ -190,23 +161,13 @@ module neuron #(
         if(run_inference && !inference_done)
         begin
 
-            if( regidx == 0 ) begin
-                rnn_inputs_store <= rnn_inputs;
-            end
-
             if( regidx < NUMBER_SYNAPSES )
             begin
                 Y <= $signed(Y) + $signed((neuron_inputs[regidx]==1'b1) ? W[regidx] : 0);
                 regidx <= regidx+1;
             end
 
-            else if( regidx < NUM_WEIGHTS )
-            begin
-                Y <= $signed(Y) + $signed((rnn_inputs_store[regidx-NUMBER_SYNAPSES]==1'b1) ? RNNW[regidx-NUMBER_SYNAPSES] : 0);
-                regidx <= regidx+1;
-            end
-
-            else if( regidx == NUM_WEIGHTS )
+            else if( regidx == NUMBER_SYNAPSES )
             begin
                 regidx <= 0;
                 neuron_output <= ($signed(Y)>0);
diff --git a/src/rtl/neuron_matrix.sv b/src/rtl/neuron_matrix.sv
index 57107df25361bdcc91bbc030b21afd06cd6e6a78..d42e7e80d0c6b64009b45412e3e6d8573e2ee98b 100644
--- a/src/rtl/neuron_matrix.sv
+++ b/src/rtl/neuron_matrix.sv
@@ -56,9 +56,9 @@ module neuron_matrix #(
     wire[NUMBER_NEURONS_W-1:0][NUMBER_NEURONS_H-1:0] backprop_ports; // Constructive criticism
 
     // wires for wiring the network together
-    wire [NUMBER_NEURONS_W-1:0][values_layer+1:0]translated_address;
+    wire [NUMBER_NEURONS_W-1:0][$clog2(values_layer):0]translated_address;
     generate
-        for(gv=0;gv<NUMBER_NEURONS_W;gv=gv+1) begin : translate_addr_m
+        for(genvar gv=0;gv<NUMBER_NEURONS_W;gv=gv+1) begin : translate_addr_m
             assign translated_address[gv] = address-gv*values_layer;
         end
     endgenerate
@@ -67,14 +67,14 @@ module neuron_matrix #(
 
     wire [NUMBER_NEURONS_W-1:0] read_enable_array;
     generate
-        for(gv=0;gv<NUMBER_NEURONS_W;gv=gv+1) begin : re_arr_m
+        for(genvar gv=0;gv<NUMBER_NEURONS_W;gv=gv+1) begin : re_arr_m
             assign read_enable_array[gv] = (translated_address[gv]<values_layer) && read_enable;
         end
     endgenerate
 
     wire [NUMBER_NEURONS_W-1:0] write_enable_array;
     generate
-        for(gv=0;gv<NUMBER_NEURONS_W;gv=gv+1) begin : rw_arr_m
+        for(genvar gv=0;gv<NUMBER_NEURONS_W;gv=gv+1) begin : rw_arr_m
             assign write_enable_array[gv] = (translated_address[gv]<values_layer) && write_enable;
         end
     endgenerate
@@ -106,12 +106,12 @@ module neuron_matrix #(
         if(write_done && !write_enable) write_done <= 0;
     end
 
-    genvar gv;
     generate
-        for(gv=0;gv<NUMBER_NEURONS_W;gv=gv+1) begin : matrix_layers
+        for(genvar gv=0;gv<NUMBER_NEURONS_W;gv=gv+1) begin : matrix_layers
             layer #(
                 .NUMBER_SYNAPSES(NUMBER_SYNAPSES),
-                .NUMBER_NEURONS(NUMBER_NEURONS_H)
+                .NUMBER_NEURONS(NUMBER_NEURONS_H),
+                .IS_RNN(1)
             )
             hidden_layer(
                 .clk(clk),
diff --git a/src/rtl/soc.v b/src/rtl/soc.v
index 10b5fce770a5af5f8e74732184f364e2761523bc..938d13db1fce6daaabd3172a9806e799c1068980 100644
--- a/src/rtl/soc.v
+++ b/src/rtl/soc.v
@@ -20,120 +20,120 @@
 `include "params.vh"
  
 module soc (
-	input clk,
-	// UART
-	output ser_tx,
-	input ser_rx,
-	// LEDS
-	output [15:0] leds,
+    input clk,
+    // UART
+    output ser_tx,
+    input ser_rx,
+    // LEDS
+    output [15:0] leds,
     // Flash
-	output flash_csb,
-	output flash_clk,
-	inout  flash_io0,
-	inout  flash_io1,
-	inout  flash_io2,
-	inout  flash_io3
+    output flash_csb,
+    output flash_clk,
+    inout  flash_io0,
+    inout  flash_io1,
+    inout  flash_io2,
+    inout  flash_io3
 );
-	reg [5:0] reset_cnt = 0;
-	wire resetn = &reset_cnt;
-
-	always @(posedge clk) begin
-		reset_cnt <= reset_cnt + !resetn;
-	end
-
-	wire flash_io0_oe, flash_io0_do, flash_io0_di;
-	wire flash_io1_oe, flash_io1_do, flash_io1_di;
-	wire flash_io2_oe, flash_io2_do, flash_io2_di;
-	wire flash_io3_oe, flash_io3_do, flash_io3_di;
-
-	assign flash_io0_di = (flash_io0_oe==1'b0) ? flash_io0 : 1'bZ;
-	assign flash_io0 = (flash_io0_oe==1'b1) ? flash_io0_do : 1'bZ;
-	assign flash_io1_di =  (flash_io1_oe==1'b0) ? flash_io1 : 1'bZ;
-	assign flash_io1 = (flash_io1_oe==1'b1) ? flash_io1_do : 1'bZ;
-
-	wire        iomem_valid;
-	reg         iomem_ready;
-	wire [3:0]  iomem_wstrb;
-	wire [31:0] iomem_addr;
-	wire [31:0] iomem_wdata;
-	reg  [31:0] iomem_rdata;
-
-	reg [31:0] gpio;
-	assign leds = gpio;
-
-	wire [31:0] rnn_data;
-	wire rnn_ready;
-	wire rnn_addressed = (iomem_addr[31:24] == 8'h 04);
-
-	always @(posedge clk) begin
-		if (!resetn) begin
-			gpio <= 0;
-		end else begin
-			iomem_ready <= 0;
-			// GPIOs
-			if (iomem_valid && !iomem_ready && iomem_addr[31:24] == 8'h 03) begin
-				iomem_ready <= 1;
-				iomem_rdata <= gpio;
-				if (iomem_wstrb[0]) gpio[ 7: 0] <= iomem_wdata[ 7: 0];
-				if (iomem_wstrb[1]) gpio[15: 8] <= iomem_wdata[15: 8];
-				if (iomem_wstrb[2]) gpio[23:16] <= iomem_wdata[23:16];
-				if (iomem_wstrb[3]) gpio[31:24] <= iomem_wdata[31:24];
-			end
-			else
-			// Neural network
-			if (iomem_valid && !iomem_ready && rnn_addressed) begin
-				iomem_ready <= rnn_ready;
-				iomem_rdata <= rnn_data;
-			end
-		end
-	end
-
-	picosoc soc (
-		.clk          (clk         ),
-		.resetn       (resetn      ),
-
-		.ser_tx       (ser_tx      ),
-		.ser_rx       (ser_rx      ),
-
-		.flash_csb    (flash_csb   ),
-		.flash_clk    (flash_clk   ),
-
-		.flash_io0_oe (flash_io0_oe),
-		.flash_io1_oe (flash_io1_oe),
-		.flash_io2_oe (flash_io2_oe),
-		.flash_io3_oe (flash_io3_oe),
-
-		.flash_io0_do (flash_io0_do),
-		.flash_io1_do (flash_io1_do),
-		.flash_io2_do (flash_io2_do),
-		.flash_io3_do (flash_io3_do),
-
-		.flash_io0_di (flash_io0_di),
-		.flash_io1_di (flash_io1_di),
-		.flash_io2_di (flash_io2_di),
-		.flash_io3_di (flash_io3_di),
-
-		.irq_5        (1'b0        ),
-		.irq_6        (1'b0        ),
-		.irq_7        (1'b0        ),
-
-		.iomem_valid  (iomem_valid ),
-		.iomem_ready  (iomem_ready ),
-		.iomem_wstrb  (iomem_wstrb ),
-		.iomem_addr   (iomem_addr  ),
-		.iomem_wdata  (iomem_wdata ),
-		.iomem_rdata  (iomem_rdata )
-	);
-
-	network neurons(
-		.clk          (clk         ),
-		.rst          (!resetn      ),
-		.mem_enable   (rnn_addressed),
+    reg [5:0] reset_cnt = 0;
+    wire resetn = &reset_cnt;
+
+    always @(posedge clk) begin
+        reset_cnt <= reset_cnt + !resetn;
+    end
+
+    wire flash_io0_oe, flash_io0_do, flash_io0_di;
+    wire flash_io1_oe, flash_io1_do, flash_io1_di;
+    wire flash_io2_oe, flash_io2_do, flash_io2_di;
+    wire flash_io3_oe, flash_io3_do, flash_io3_di;
+
+    assign flash_io0_di = (flash_io0_oe==1'b0) ? flash_io0 : 1'bZ;
+    assign flash_io0 = (flash_io0_oe==1'b1) ? flash_io0_do : 1'bZ;
+    assign flash_io1_di =  (flash_io1_oe==1'b0) ? flash_io1 : 1'bZ;
+    assign flash_io1 = (flash_io1_oe==1'b1) ? flash_io1_do : 1'bZ;
+
+    wire        iomem_valid;
+    reg         iomem_ready;
+    wire [3:0]  iomem_wstrb;
+    wire [31:0] iomem_addr;
+    wire [31:0] iomem_wdata;
+    reg  [31:0] iomem_rdata;
+
+    reg [31:0] gpio;
+    assign leds = gpio;
+
+    wire [31:0] rnn_data;
+    wire rnn_ready;
+    wire rnn_addressed = (iomem_addr[31:24] == 8'h 04);
+
+    always @(posedge clk) begin
+        if (!resetn) begin
+            gpio <= 0;
+        end else begin
+            iomem_ready <= 0;
+            // GPIOs
+            if (iomem_valid && !iomem_ready && iomem_addr[31:24] == 8'h 03) begin
+                iomem_ready <= 1;
+                iomem_rdata <= gpio;
+                if (iomem_wstrb[0]) gpio[ 7: 0] <= iomem_wdata[ 7: 0];
+                if (iomem_wstrb[1]) gpio[15: 8] <= iomem_wdata[15: 8];
+                if (iomem_wstrb[2]) gpio[23:16] <= iomem_wdata[23:16];
+                if (iomem_wstrb[3]) gpio[31:24] <= iomem_wdata[31:24];
+            end
+            else
+            // Neural network
+            if (iomem_valid && !iomem_ready && rnn_addressed) begin
+                iomem_ready <= rnn_ready;
+                iomem_rdata <= rnn_data;
+            end
+        end
+    end
+
+    picosoc soc (
+        .clk          (clk         ),
+        .resetn       (resetn      ),
+
+        .ser_tx       (ser_tx      ),
+        .ser_rx       (ser_rx      ),
+
+        .flash_csb    (flash_csb   ),
+        .flash_clk    (flash_clk   ),
+
+        .flash_io0_oe (flash_io0_oe),
+        .flash_io1_oe (flash_io1_oe),
+        .flash_io2_oe (flash_io2_oe),
+        .flash_io3_oe (flash_io3_oe),
+
+        .flash_io0_do (flash_io0_do),
+        .flash_io1_do (flash_io1_do),
+        .flash_io2_do (flash_io2_do),
+        .flash_io3_do (flash_io3_do),
+
+        .flash_io0_di (flash_io0_di),
+        .flash_io1_di (flash_io1_di),
+        .flash_io2_di (flash_io2_di),
+        .flash_io3_di (flash_io3_di),
+
+        .irq_5        (1'b0        ),
+        .irq_6        (1'b0        ),
+        .irq_7        (1'b0        ),
+
+        .iomem_valid  (iomem_valid ),
+        .iomem_ready  (iomem_ready ),
+        .iomem_wstrb  (iomem_wstrb ),
+        .iomem_addr   (iomem_addr  ),
+        .iomem_wdata  (iomem_wdata ),
+        .iomem_rdata  (iomem_rdata )
+    );
+
+    network neurons(
+        .clk          (clk         ),
+        .rst          (!resetn      ),
+        .mem_enable   (rnn_addressed),
         .mem_wstrb    (iomem_wstrb),
         .mem_addr     (iomem_addr[23:0]),
         .mem_data_i   (iomem_wdata),
         .mem_data_o   (rnn_data),
         .mem_ready    (rnn_ready)
-	);
+    );
 
 endmodule