Data loader for a Segmentation model

Using the PD SDK within your data pipeline gives you a couple of advantages: - only one data loader is needed for all dataset formats supported by PD SDK - it’s easy to mix data from different dataset formats since the model representation is shared - it will work with datasets located locally or in s3 (this is also easily extendable to more cloud storage providers. See AnyPath)

In the following snippet we show an example on how to create a generator that will yield you image + segmentation mask tuples using the PD SDK. This generator yields examples from a PD dgp dataset as well as from the NuImages train split.

[ ]:
from dataclasses import dataclass, field
from typing import Tuple, Dict, List, Any, Generator
import numpy as np

from paralleldomain.decoding.helper import decode_dataset
from paralleldomain.model.annotation import AnnotationTypes
from paralleldomain.utilities.mask import replace_values
from paralleldomain.model.class_mapping import LabelMapping, OnLabelNotDefined
from paralleldomain.utilities.any_path import AnyPath


@dataclass
class DatasetReference:
    dataset_path: AnyPath
    label_map: Dict[str, str]
    dataset_format: str = "dgp"
    decoder_kwargs: Dict[str, Any] = field(default_factory=dict)


def data_generator(
    dataset_references: List[DatasetReference], class_name_to_index: Dict[str, int]
) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
    for dataset_reference in dataset_references:
        # We use this to map from the dataset specific class names to a common class name shared between all datasets
        label_mapping = LabelMapping(
            label_mapping=dataset_reference.label_map, on_not_defined=OnLabelNotDefined.KEEP_LABEL
        )

        dataset = decode_dataset(
            dataset_path=dataset_reference.dataset_path,
            dataset_format=dataset_reference.dataset_format,
            **dataset_reference.decoder_kwargs
        )

        # since we dont require temporally ordered frames we can use unordered_scene_names instead of just scene_names.
        # All temporally ordered scenes are contained in this superset of names.
        for scene_name in dataset.unordered_scene_names:
            scene = dataset.get_unordered_scene(scene_name=scene_name)

            # Get the class map that maps from class_id to the respective class name
            class_map = scene.get_class_map(annotation_type=AnnotationTypes.SemanticSegmentation2D)

            # we concatenate those to maps to get a adjusted ClassMap that maps from class_id to a common class name
            common_names_class_map = label_mapping @ class_map
            internal_to_common_id_map: Dict[int, int] = dict()
            for source_id, class_detail in common_names_class_map.items():
                if class_detail.name in class_name_to_index:
                    internal_to_common_id_map[source_id] = class_name_to_index[class_detail.name]
                else:
                    internal_to_common_id_map[source_id] = 0  # background

            for camera_name in scene.camera_names:
                camera = scene.get_camera_sensor(camera_name=camera_name)
                for frame_id in camera.frame_ids:
                    camera_frame = camera.get_frame(frame_id=frame_id)
                    if AnnotationTypes.SemanticSegmentation2D in camera_frame.available_annotation_types:
                        rgb = camera_frame.image.rgb
                        semseg_annotation = camera_frame.get_annotations(
                            annotation_type=AnnotationTypes.SemanticSegmentation2D
                        )
                        class_ids = semseg_annotation.class_ids
                        mapped_class_ids = replace_values(
                            mask=class_ids, value_map=internal_to_common_id_map, value_min=0, value_max=255
                        )
                        yield rgb, mapped_class_ids

In the below pseudo code snippet we use the above defined generator to fuse a dgp and a NuImages dataset to train a segmentation model.

[ ]:
from unittest import mock

dataset_references = [
    DatasetReference(
        dataset_path=AnyPath("s3://path/to/my/synthetic/data"),
        label_map={"Car": "car", "Bicyclist": "cyclist", "Bicycle": "cyclist", "Pedestrian": "pedestrian"},
        dataset_format="dgp",
    ),
    DatasetReference(
        dataset_path=AnyPath("s3://path/to/my/nuimages/data"),
        label_map={
            "vehicle.car": "car",
            "vehicle.bicycle": "cyclist",
            "human.pedestrian.adult": "pedestrian",
            "human.pedestrian.child": "pedestrian",
            "human.pedestrian.construction_worker": "pedestrian",
            "human.pedestrian.personal_mobility": "pedestrian",
            "human.pedestrian.police_officer": "pedestrian",
            "human.pedestrian.stroller": "pedestrian",
            "human.pedestrian.wheelchair": "pedestrian",
        },
        dataset_format="nuimages",
        decoder_kwargs=dict(split_name="v1.0-train"),
    ),
]

generator = data_generator(
    dataset_references=dataset_references, class_name_to_index={"car": 0, "cyclist": 1, "pedestrian": 2}
)

# Some model train pseudo code to show how to use the generator
model = lambda rgb: np.random.rand(*rgb.shape)
optimizer = mock.MagicMock()
loss_function = lambda pred, cid: np.random.random()

for rgb, class_ids in generator:
    prediction = model(rgb, class_ids)
    loss = loss_function(prediction, class_ids)
    optimizer.compute_gradient(loss)
    optimizer.step()