diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b1dff867cfb6b5e8dbff671a0fe0127b1b554e5b..e2f836b28606a8ed5efc43b92029cce820f4fe7a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -164,7 +164,7 @@ ctest:
   extends: .tests_base
   stage: Applications Test
   before_script:
-    - pip3 install pytest pytest-cov pytest-order
+    - pip install pytest pytest-cov pytest-order
     - mkdir -p $ARTIFACT_TEST_DIR
     - cd $CI_PROJECT_DIR
 
@@ -189,6 +189,15 @@ sr4rs:
     - export PYTHONPATH=$PYTHONPATH:$PWD/sr4rs
     - python -m pytest --junitxml=$ARTIFACT_TEST_DIR/report_sr4rs.xml $OTBTF_SRC/test/sr4rs_unittest.py
 
+decloud:
+  extends: .applications_test_base
+  script:
+    - git clone https://github.com/CNES/decloud.git
+    - pip install -r $PWD/decloud/docker/requirements.txt
+    - wget -P decloud_data --no-verbose --recursive --level=inf --no-parent -R "index.html*" --cut-dirs=3 --no-host-directories http://indexof.montpellier.irstea.priv/projets/geocicd/decloud/
+    - export DECLOUD_DATA_DIR="$PWD/decloud_data"
+    - pytest decloud/tests/train_from_tfrecords_unittest.py
+
 otbtf_api:
   extends: .applications_test_base
   script:
diff --git a/doc/api_tutorial.md b/doc/api_tutorial.md
index ef0e2c164957c1fd28cb8731ae4142619c13e2f6..cc08b9198469a97a083c16588976a9b2c9aa8785 100644
--- a/doc/api_tutorial.md
+++ b/doc/api_tutorial.md
@@ -184,6 +184,19 @@ def dataset_preprocessing_fn(examples: dict):
 
 As you can see, we don't modify the input tensor, since we want to use it 
 as it in the model.
+Note that since version 4.2.0 the `otbtf.ops.one_hot` can ease the transform:
+
+```python
+def dataset_preprocessing_fn(examples: dict):
+    return {
+        INPUT_NAME: examples["input_xs_patches"],
+        TARGET_NAME: otbtf.ops.one_hot(
+            labels=examples["labels_patches"],
+            nb_classes=N_CLASSES
+        )
+}
+
+```
 
 ### Model inputs preprocessing
 
diff --git a/otbtf/examples/tensorflow_v2x/fcnn/fcnn_model.py b/otbtf/examples/tensorflow_v2x/fcnn/fcnn_model.py
index 44285d92bb77695224288cb9d804018c90924c82..fcd14a2024e575f79a9f0c9a4a8e475a5e1d373b 100644
--- a/otbtf/examples/tensorflow_v2x/fcnn/fcnn_model.py
+++ b/otbtf/examples/tensorflow_v2x/fcnn/fcnn_model.py
@@ -123,6 +123,18 @@ class FCNNModel(ModelBase):
         softmax_op = tf.keras.layers.Softmax(name=OUTPUT_SOFTMAX_NAME)
         predictions = softmax_op(out_tconv4)
 
+        # note that we could also add additional outputs, for instance the
+        # argmax of the softmax:
+        #
+        # argmax_op = otbtf.layers.Argmax(name="labels")
+        # labels = argmax_op(predictions)
+        # return {TARGET_NAME: predictions, OUTPUT_ARGMAX_NAME: labels}
+        # The default extra outputs (i.e. output tensors with cropping in
+        # physical domain) are append by `otbtf.ModelBase` for all returned
+        # outputs of this function to be used at inference time (e.g.
+        # "labels_crop32", "labels_crop64", ...,
+        # "predictions_softmax_tensor_crop16", ..., etc).
+
         return {TARGET_NAME: predictions}
 
 
@@ -173,12 +185,23 @@ def train(params, ds_train, ds_valid, ds_test):
         model = FCNNModel(dataset_element_spec=ds_train.element_spec)
 
         # Compile the model
+        # It is a good practice to use a `dict` to explicitly name the outputs
+        # over which the losses/metrics are computed.
+        # This ensures a better optimization control, and also avoids lots of
+        # useless outputs (e.g. metrics computed over extra outputs).
         model.compile(
-            loss=tf.keras.losses.CategoricalCrossentropy(),
+            loss={
+                TARGET_NAME: tf.keras.losses.CategoricalCrossentropy()
+            },
             optimizer=tf.keras.optimizers.Adam(
                 learning_rate=params.learning_rate
             ),
-            metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()]
+            metrics={
+                TARGET_NAME: [
+                    tf.keras.metrics.Precision(class_id=1),
+                    tf.keras.metrics.Recall(class_id=1)
+                ]
+            }
         )
 
         # Summarize the model (in CLI)