from torch.utils.data.dataloader import DataLoader
train_loader = DataLoader(dataset=train_set,
num_workers=4, # 사용할 프로세스의 수
batch_size=512,
persistent_workers=True)
import h5py
celebA = h5py.File(DATA_DIR, 'w', rdcc_nslots=11213, rdcc_nbytes=1024**3, rdcc_w0=1)
celebA.create_dataset('images',
data=batch_images,
dtype=np.uint8,
chunks=(100, 3, 217, 178), # Chunk size는 11MB
maxshape=(None, 3, 218, 178))
celebA.create_dataset('labels',
data=labels_h5[:size],
dtype=np.uint8,
chunks=(20000, 0))