cerebras.modelzoo.data.vision.segmentation.config.UNetDataProcessorConfig#

class cerebras.modelzoo.data.vision.segmentation.config.UNetDataProcessorConfig(batch_size: int = <object object at 0x7f2933a50b80>, shuffle: bool = True, shuffle_seed: int = 0, num_workers: int = 0, prefetch_factor: int = 10, persistent_workers: bool = True, data_dir: Union[str, List[str]] = <object object at 0x7f2933a50b80>, num_classes: int = <object object at 0x7f2933a50b80>, loss: str = <object object at 0x7f2933a50b80>, normalize_data_method: str = <object object at 0x7f2933a50b80>, augment_data: bool = True, drop_last: bool = True, mixed_precision: Optional[bool] = None, use_fast_dataloader: bool = False, duplicate_act_worker_data: bool = False)[source]#
data_dir: Union[str, List[str]] = <object object>#
num_classes: int = <object object>#
loss: str = <object object>#
normalize_data_method: str = <object object>#
augment_data: bool = True#
num_workers: int = 0#

The number of PyTorch processes used in the dataloader

drop_last: bool = True#
prefetch_factor: int = 10#

The number of batches to prefetch in the dataloader

persistent_workers: bool = True#

Whether or not to keep workers persistent between epochs

mixed_precision: Optional[bool] = None#
use_fast_dataloader: bool = False#
duplicate_act_worker_data: bool = False#
batch_size: int = <object object>#

Batch size to be used

shuffle: bool = True#

Whether or not to shuffle the dataset

shuffle_seed: int = 0#

Seed used for deterministic shuffling