Data set processing - tiny Imagenet

1, Introduction

   recently, in addition to some experiments on large-scale data sets (ImageNet-1k and ImageNet-21k), we also did some ablation studies on small data sets. Among them, pytorch has its own cifar10 and cifar100 data loading, while Tiny ImageNet does not. So simply record the processing of this data set here.
  Tiny ImageNet Challenge is the default course project of Stanford CS231N. It runs like the ImageNet challenge (ILSVRC). The goal of the challenge is to let users solve the problem of image classification as much as possible. The user submits the final forecast on the test set to this evaluation server, and the official will maintain the class ranking list.
  Tiny Imagenet has 200 classes. Each class has 500 training images, 50 verification images and 50 test images. Officials have released training and validation sets with images and annotations. Class labels and bounding boxes are officially provided as notes; However, the user only needs to predict the category label of each image without locating the object. The test set is published without labels.

2, Download data

  click official Download link , you can download it directly( . after downloading, you will get a compressed package of about 236MB.

3, Data form

  first unzip the folder and you will get the following contents

$ tree data
├── train
│   ├── class1
│   │   ├── images
│   │   │   ├── img1.JPEG
│   │   │   ├── img2.JPEG
│   │   │   └── ...
│   │   └── class1_boxes.txt
│   ├── class2
│   │   ├── images
│   │   │   ├── img3.JPEG
│   │   │   ├── img4.JPEG
│   │   │   └── ...
│   │   └── class2_boxes.txt
│   └── ...
├── val
│   ├── images
│   │   ├── img5.JPEG
│   │   ├── img6.JPEG
│   │   └── ...
│   └── val_annotations.txt
├── test
│   └── images
│       ├── img7.JPEG
│       ├── img8.JPEG
│       └── ...
├── wnids.txt
└── words.txt

   as the official said, the test set is not marked, so we generally won't use it ourselves. It is mainly the training set and verification set. It can be seen from the above tree that the structure of train and val are different, so we can't simply use the torchvision.datasets.ImageFolder('/ data_dir') provided by pytorch.

4, Custom data loading

  I have also written some articles on custom data loading before. I won't repeat it here. Simply put an available code found on github.

from import Dataset, DataLoader
from torchvision import models, utils, datasets, transforms
import numpy as np
import sys
import os
from PIL import Image

class TinyImageNet(Dataset):
    def __init__(self, root, train=True, transform=None):
        self.Train = train
        self.root_dir = root
        self.transform = transform
        self.train_dir = os.path.join(self.root_dir, "train")
        self.val_dir = os.path.join(self.root_dir, "val")

        if (self.Train):


        words_file = os.path.join(self.root_dir, "words.txt")
        wnids_file = os.path.join(self.root_dir, "wnids.txt")

        self.set_nids = set()

        with open(wnids_file, 'r') as fo:
            data = fo.readlines()
            for entry in data:

        self.class_to_label = {}
        with open(words_file, 'r') as fo:
            data = fo.readlines()
            for entry in data:
                words = entry.split("\t")
                if words[0] in self.set_nids:
                    self.class_to_label[words[0]] = (words[1].strip("\n").split(","))[0]

    def _create_class_idx_dict_train(self):
        if sys.version_info >= (3, 5):
            classes = [ for d in os.scandir(self.train_dir) if d.is_dir()]
            classes = [d for d in os.listdir(self.train_dir) if os.path.isdir(os.path.join(train_dir, d))]
        classes = sorted(classes)
        num_images = 0
        for root, dirs, files in os.walk(self.train_dir):
            for f in files:
                if f.endswith(".JPEG"):
                    num_images = num_images + 1

        self.len_dataset = num_images;

        self.tgt_idx_to_class = {i: classes[i] for i in range(len(classes))}
        self.class_to_tgt_idx = {classes[i]: i for i in range(len(classes))}

    def _create_class_idx_dict_val(self):
        val_image_dir = os.path.join(self.val_dir, "images")
        if sys.version_info >= (3, 5):
            images = [ for d in os.scandir(val_image_dir) if d.is_file()]
            images = [d for d in os.listdir(val_image_dir) if os.path.isfile(os.path.join(train_dir, d))]
        val_annotations_file = os.path.join(self.val_dir, "val_annotations.txt")
        self.val_img_to_class = {}
        set_of_classes = set()
        with open(val_annotations_file, 'r') as fo:
            entry = fo.readlines()
            for data in entry:
                words = data.split("\t")
                self.val_img_to_class[words[0]] = words[1]

        self.len_dataset = len(list(self.val_img_to_class.keys()))
        classes = sorted(list(set_of_classes))
        # self.idx_to_class = {i:self.val_img_to_class[images[i]] for i in range(len(images))}
        self.class_to_tgt_idx = {classes[i]: i for i in range(len(classes))}
        self.tgt_idx_to_class = {i: classes[i] for i in range(len(classes))}

    def _make_dataset(self, Train=True):
        self.images = []
        if Train:
            img_root_dir = self.train_dir
            list_of_dirs = [target for target in self.class_to_tgt_idx.keys()]
            img_root_dir = self.val_dir
            list_of_dirs = ["images"]

        for tgt in list_of_dirs:
            dirs = os.path.join(img_root_dir, tgt)
            if not os.path.isdir(dirs):

            for root, _, files in sorted(os.walk(dirs)):
                for fname in sorted(files):
                    if (fname.endswith(".JPEG")):
                        path = os.path.join(root, fname)
                        if Train:
                            item = (path, self.class_to_tgt_idx[tgt])
                            item = (path, self.class_to_tgt_idx[self.val_img_to_class[fname]])

    def return_label(self, idx):
        return [self.class_to_label[self.tgt_idx_to_class[i.item()]] for i in idx]

    def __len__(self):
        return self.len_dataset

    def __getitem__(self, idx):
        img_path, tgt = self.images[idx]
        with open(img_path, 'rb') as f:
            sample =
            sample = sample.convert('RGB')
        if self.transform is not None:
            sample = self.transform(sample)

        return sample, tgt

  then you can load data directly:

from * import TinyImageNet
data_dir = './tiny-imagenet-200/'
dataset_train = TinyImageNet(data_dir, train=True)
dataset_val = TinyImageNet(data_dir, train=False)

"*" is your file name. The mean and standard deviation in transform can refer to the standard imagenet

Tags: Pytorch Deep Learning

Posted on Wed, 24 Nov 2021 00:58:46 -0500 by abhijeet