- eval_resnet50.py 실행 시
2024-02-17 00:54:29.785482: W tensorflow/core/framework/op_kernel.cc:1818] RESOURCE_EXHAUSTED: failed to allocate memory
이와 같은 오류가 발생했음
- 사용하는 컴퓨터의 메모리가 그리 넉넉한 편이 아니라서 그런지 (16gb) Resnet50과 같은 큰 모델은 실행이 불가함
- 따라서 Resnet50과 관련된 것은 실행시키지 않았음
run_all.sh/main_imagenet
main_imagenet(){
cp modelzoo/ImageNet/*.txt ./target/imagenet/
prepare_imagenet_test_images
quantize_resnet18_imagenet
compile_resnet18_imagenet
remove_imagenet_test_images
prepare_imagenet_archives
}
run_all.sh/prepare_imagenet_test_images
ARCHIVE=./files/modelzoo/ImageNet/ILSVRC2012_img_val.tar
if [ -f "$ARCHIVE" ]; then
echo "ERROR! $ARCHIVE does exist: you have to download it"
else
cd ./modelzoo/ImageNet/
mkdir -p val_dataset
echo "expanding ILSVRC2012_img_val.tar archive"
tar -xvf ILSVRC2012_img_val.tar -C ./val_dataset > /dev/null
ls -l ./val_dataset | wc
expanding ILSVRC2012_img_val.tar archive
50001 450002 4600014
python3 imagenet_val_dataset.py
import zipfile
import glob
import os
NUM_VAL_IMAGES=500
filenames_list = []
f1 = open("./val.txt", "r");
f1_lines = f1.readlines();
for k in range(NUM_VAL_IMAGES):
line = f1_lines[k]
(filename, val) = line.strip().split(" ")
filename = os.path.basename(filename)
filenames_list.append(filename)
dst_dir="./val_dataset"
N = 0
with zipfile.ZipFile('val_dataset.zip','w') as f2:
for filename in filenames_list:
path_filename=os.path.join(dst_dir, filename)
f2.write(path_filename)
N = N+1
f2.write("./val.txt")
f2.write("./words.txt")
f2.close()
print("val_dataset.zip archive is ready with ", N, "images")
val_dataset.zip archive is ready with 500 images
cd ../..
cp ./modelzoo/ImageNet/val_dataset.zip ./target/imagenet
cd ./target/imagenet/
unzip -o -q val_dataset.zip
cd ../../
fi
run_all.sh/quantize_resnet18_imagenet
python ./code/eval_resnet18.py
import os
import cv2
import numpy as np
import tensorflow as tf
keras = tf.keras
from keras.utils import Sequence
from config import imagenet_config as cfg
print(cfg.SCRIPT_DIR)
/workspace/tutorials/RESNET18/original
eval_batch_size = 50
EVAL_NUM = cfg.NUM_VAL_IMAGES
NUMEL = cfg.NUM_CLASSES
def get_images_infor_from_file(image_dir, image_list, label_offset):
with open(image_list, 'r') as fr:
lines = fr.readlines()
imgs = []
labels = []
for line in lines:
img_name, label = line.strip().split(" ")
img_path = os.path.join(image_dir, img_name)
label = int(label) + 1 - label_offset
imgs.append(img_path)
labels.append(label)
filename = os.path.basename(img_path)
class_name = filename.split(".")[0]
label2 = cfg.labelNames_dict[class_name] +0
assert label2==label, "found a mismatch in labels"
return imgs, labels
class ImagenetSequence_ResNet18(Sequence):
def __init__(self, filenames, labels, batch_size):
self.filenames, self.labels = filenames, labels
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.filenames) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.filenames[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]
processed_imgs = []
for filename in batch_x:
img2 = cv2.imread(filename)
B, G, R = cv2.split(img2)
img = cv2.merge([R, G, B])
height, width = img.shape[0], img.shape[1]
img = img.astype(float)
smaller_dim = np.min([height, width])
_RESIZE_MIN = 256
scale_ratio = _RESIZE_MIN*1.0 / (smaller_dim*1.0)
new_height = int(height * scale_ratio)
new_width = int(width * scale_ratio)
resized_img = cv2.resize(img, (new_width, new_height), interpolation = cv2.INTER_CUBIC )
crop_height = 224
crop_width = 224
amount_to_be_cropped_h = (new_height - crop_height)
crop_top = amount_to_be_cropped_h // 2
amount_to_be_cropped_w = (new_width - crop_width)
crop_left = amount_to_be_cropped_w // 2
cropped_img = resized_img[crop_top:crop_top+crop_height, crop_left:crop_left+crop_width, :]
_R_MEAN = 0
_G_MEAN = 0
_B_MEAN = 0
_CHANNEL_MEANS = [_B_MEAN, _G_MEAN, _R_MEAN]
means = np.expand_dims(np.expand_dims(_CHANNEL_MEANS, 0), 0)
meaned_img = cropped_img - means
processed_imgs.append(meaned_img)
return np.array(processed_imgs), np.array(batch_y)
print("\n[DB INFO] Get Input Data with Proper Pre-processing...\n")
[DB INFO] Get Input Data with Proper Pre-processing...
dataset_dir = cfg.VALID_DIR
val_list_file = os.path.sep.join([cfg.DATASET_DIR, "val.txt"])
labels_offset = 1
img_paths, labels = get_images_infor_from_file(dataset_dir, val_list_file, labels_offset)
imagenet_seq18 = ImagenetSequence_ResNet18(img_paths, labels, 50)
print("\n[DB INFO] Get ResNet18 CNN pre-trained on ImageNet...\n")
[DB INFO] Get ResNet18 CNN pre-trained on ImageNet...
from classification_models.keras import Classifiers
ResNet18, preprocess_input = Classifiers.get("resnet18")
model18 = ResNet18((224, 224, 3), weights="imagenet")
model18.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
data (InputLayer) [(None, 224, 224, 3 0 []
)]
bn_data (BatchNormalization) (None, 224, 224, 3) 9 ['data[0][0]']
zero_padding2d (ZeroPadding2D) (None, 230, 230, 3) 0 ['bn_data[0][0]']
conv0 (Conv2D) (None, 112, 112, 64 9408 ['zero_padding2d[0][0]']
)
bn0 (BatchNormalization) (None, 112, 112, 64 256 ['conv0[0][0]']
)
relu0 (Activation) (None, 112, 112, 64 0 ['bn0[0][0]']
)
zero_padding2d_1 (ZeroPadding2 (None, 114, 114, 64 0 ['relu0[0][0]']
D) )
pooling0 (MaxPooling2D) (None, 56, 56, 64) 0 ['zero_padding2d_1[0][0]']
stage1_unit1_bn1 (BatchNormali (None, 56, 56, 64) 256 ['pooling0[0][0]']
zation)
stage1_unit1_relu1 (Activation (None, 56, 56, 64) 0 ['stage1_unit1_bn1[0][0]']
)
zero_padding2d_2 (ZeroPadding2 (None, 58, 58, 64) 0 ['stage1_unit1_relu1[0][0]']
D)
stage1_unit1_conv1 (Conv2D) (None, 56, 56, 64) 36864 ['zero_padding2d_2[0][0]']
stage1_unit1_bn2 (BatchNormali (None, 56, 56, 64) 256 ['stage1_unit1_conv1[0][0]']
zation)
stage1_unit1_relu2 (Activation (None, 56, 56, 64) 0 ['stage1_unit1_bn2[0][0]']
)
zero_padding2d_3 (ZeroPadding2 (None, 58, 58, 64) 0 ['stage1_unit1_relu2[0][0]']
D)
stage1_unit1_conv2 (Conv2D) (None, 56, 56, 64) 36864 ['zero_padding2d_3[0][0]']
stage1_unit1_sc (Conv2D) (None, 56, 56, 64) 4096 ['stage1_unit1_relu1[0][0]']
add (Add) (None, 56, 56, 64) 0 ['stage1_unit1_conv2[0][0]',
'stage1_unit1_sc[0][0]']
stage1_unit2_bn1 (BatchNormali (None, 56, 56, 64) 256 ['add[0][0]']
zation)
stage1_unit2_relu1 (Activation (None, 56, 56, 64) 0 ['stage1_unit2_bn1[0][0]']
)
zero_padding2d_4 (ZeroPadding2 (None, 58, 58, 64) 0 ['stage1_unit2_relu1[0][0]']
D)
stage1_unit2_conv1 (Conv2D) (None, 56, 56, 64) 36864 ['zero_padding2d_4[0][0]']
stage1_unit2_bn2 (BatchNormali (None, 56, 56, 64) 256 ['stage1_unit2_conv1[0][0]']
zation)
stage1_unit2_relu2 (Activation (None, 56, 56, 64) 0 ['stage1_unit2_bn2[0][0]']
)
zero_padding2d_5 (ZeroPadding2 (None, 58, 58, 64) 0 ['stage1_unit2_relu2[0][0]']
D)
stage1_unit2_conv2 (Conv2D) (None, 56, 56, 64) 36864 ['zero_padding2d_5[0][0]']
add_1 (Add) (None, 56, 56, 64) 0 ['stage1_unit2_conv2[0][0]',
'add[0][0]']
stage2_unit1_bn1 (BatchNormali (None, 56, 56, 64) 256 ['add_1[0][0]']
zation)
stage2_unit1_relu1 (Activation (None, 56, 56, 64) 0 ['stage2_unit1_bn1[0][0]']
)
zero_padding2d_6 (ZeroPadding2 (None, 58, 58, 64) 0 ['stage2_unit1_relu1[0][0]']
D)
stage2_unit1_conv1 (Conv2D) (None, 28, 28, 128) 73728 ['zero_padding2d_6[0][0]']
stage2_unit1_bn2 (BatchNormali (None, 28, 28, 128) 512 ['stage2_unit1_conv1[0][0]']
zation)
stage2_unit1_relu2 (Activation (None, 28, 28, 128) 0 ['stage2_unit1_bn2[0][0]']
)
zero_padding2d_7 (ZeroPadding2 (None, 30, 30, 128) 0 ['stage2_unit1_relu2[0][0]']
D)
stage2_unit1_conv2 (Conv2D) (None, 28, 28, 128) 147456 ['zero_padding2d_7[0][0]']
stage2_unit1_sc (Conv2D) (None, 28, 28, 128) 8192 ['stage2_unit1_relu1[0][0]']
add_2 (Add) (None, 28, 28, 128) 0 ['stage2_unit1_conv2[0][0]',
'stage2_unit1_sc[0][0]']
stage2_unit2_bn1 (BatchNormali (None, 28, 28, 128) 512 ['add_2[0][0]']
zation)
stage2_unit2_relu1 (Activation (None, 28, 28, 128) 0 ['stage2_unit2_bn1[0][0]']
)
zero_padding2d_8 (ZeroPadding2 (None, 30, 30, 128) 0 ['stage2_unit2_relu1[0][0]']
D)
stage2_unit2_conv1 (Conv2D) (None, 28, 28, 128) 147456 ['zero_padding2d_8[0][0]']
stage2_unit2_bn2 (BatchNormali (None, 28, 28, 128) 512 ['stage2_unit2_conv1[0][0]']
zation)
stage2_unit2_relu2 (Activation (None, 28, 28, 128) 0 ['stage2_unit2_bn2[0][0]']
)
zero_padding2d_9 (ZeroPadding2 (None, 30, 30, 128) 0 ['stage2_unit2_relu2[0][0]']
D)
stage2_unit2_conv2 (Conv2D) (None, 28, 28, 128) 147456 ['zero_padding2d_9[0][0]']
add_3 (Add) (None, 28, 28, 128) 0 ['stage2_unit2_conv2[0][0]',
'add_2[0][0]']
stage3_unit1_bn1 (BatchNormali (None, 28, 28, 128) 512 ['add_3[0][0]']
zation)
stage3_unit1_relu1 (Activation (None, 28, 28, 128) 0 ['stage3_unit1_bn1[0][0]']
)
zero_padding2d_10 (ZeroPadding (None, 30, 30, 128) 0 ['stage3_unit1_relu1[0][0]']
2D)
stage3_unit1_conv1 (Conv2D) (None, 14, 14, 256) 294912 ['zero_padding2d_10[0][0]']
stage3_unit1_bn2 (BatchNormali (None, 14, 14, 256) 1024 ['stage3_unit1_conv1[0][0]']
zation)
stage3_unit1_relu2 (Activation (None, 14, 14, 256) 0 ['stage3_unit1_bn2[0][0]']
)
zero_padding2d_11 (ZeroPadding (None, 16, 16, 256) 0 ['stage3_unit1_relu2[0][0]']
2D)
stage3_unit1_conv2 (Conv2D) (None, 14, 14, 256) 589824 ['zero_padding2d_11[0][0]']
stage3_unit1_sc (Conv2D) (None, 14, 14, 256) 32768 ['stage3_unit1_relu1[0][0]']
add_4 (Add) (None, 14, 14, 256) 0 ['stage3_unit1_conv2[0][0]',
'stage3_unit1_sc[0][0]']
stage3_unit2_bn1 (BatchNormali (None, 14, 14, 256) 1024 ['add_4[0][0]']
zation)
stage3_unit2_relu1 (Activation (None, 14, 14, 256) 0 ['stage3_unit2_bn1[0][0]']
)
zero_padding2d_12 (ZeroPadding (None, 16, 16, 256) 0 ['stage3_unit2_relu1[0][0]']
2D)
stage3_unit2_conv1 (Conv2D) (None, 14, 14, 256) 589824 ['zero_padding2d_12[0][0]']
stage3_unit2_bn2 (BatchNormali (None, 14, 14, 256) 1024 ['stage3_unit2_conv1[0][0]']
zation)
stage3_unit2_relu2 (Activation (None, 14, 14, 256) 0 ['stage3_unit2_bn2[0][0]']
)
zero_padding2d_13 (ZeroPadding (None, 16, 16, 256) 0 ['stage3_unit2_relu2[0][0]']
2D)
stage3_unit2_conv2 (Conv2D) (None, 14, 14, 256) 589824 ['zero_padding2d_13[0][0]']
add_5 (Add) (None, 14, 14, 256) 0 ['stage3_unit2_conv2[0][0]',
'add_4[0][0]']
stage4_unit1_bn1 (BatchNormali (None, 14, 14, 256) 1024 ['add_5[0][0]']
zation)
stage4_unit1_relu1 (Activation (None, 14, 14, 256) 0 ['stage4_unit1_bn1[0][0]']
)
zero_padding2d_14 (ZeroPadding (None, 16, 16, 256) 0 ['stage4_unit1_relu1[0][0]']
2D)
stage4_unit1_conv1 (Conv2D) (None, 7, 7, 512) 1179648 ['zero_padding2d_14[0][0]']
stage4_unit1_bn2 (BatchNormali (None, 7, 7, 512) 2048 ['stage4_unit1_conv1[0][0]']
zation)
stage4_unit1_relu2 (Activation (None, 7, 7, 512) 0 ['stage4_unit1_bn2[0][0]']
)
zero_padding2d_15 (ZeroPadding (None, 9, 9, 512) 0 ['stage4_unit1_relu2[0][0]']
2D)
stage4_unit1_conv2 (Conv2D) (None, 7, 7, 512) 2359296 ['zero_padding2d_15[0][0]']
stage4_unit1_sc (Conv2D) (None, 7, 7, 512) 131072 ['stage4_unit1_relu1[0][0]']
add_6 (Add) (None, 7, 7, 512) 0 ['stage4_unit1_conv2[0][0]',
'stage4_unit1_sc[0][0]']
stage4_unit2_bn1 (BatchNormali (None, 7, 7, 512) 2048 ['add_6[0][0]']
zation)
stage4_unit2_relu1 (Activation (None, 7, 7, 512) 0 ['stage4_unit2_bn1[0][0]']
)
zero_padding2d_16 (ZeroPadding (None, 9, 9, 512) 0 ['stage4_unit2_relu1[0][0]']
2D)
stage4_unit2_conv1 (Conv2D) (None, 7, 7, 512) 2359296 ['zero_padding2d_16[0][0]']
stage4_unit2_bn2 (BatchNormali (None, 7, 7, 512) 2048 ['stage4_unit2_conv1[0][0]']
zation)
stage4_unit2_relu2 (Activation (None, 7, 7, 512) 0 ['stage4_unit2_bn2[0][0]']
)
zero_padding2d_17 (ZeroPadding (None, 9, 9, 512) 0 ['stage4_unit2_relu2[0][0]']
2D)
stage4_unit2_conv2 (Conv2D) (None, 7, 7, 512) 2359296 ['zero_padding2d_17[0][0]']
add_7 (Add) (None, 7, 7, 512) 0 ['stage4_unit2_conv2[0][0]',
'add_6[0][0]']
bn1 (BatchNormalization) (None, 7, 7, 512) 2048 ['add_7[0][0]']
relu1 (Activation) (None, 7, 7, 512) 0 ['bn1[0][0]']
pool1 (GlobalAveragePooling2D) (None, 512) 0 ['relu1[0][0]']
fc1 (Dense) (None, 1000) 513000 ['pool1[0][0]']
softmax (Activation) (None, 1000) 0 ['fc1[0][0]']
==================================================================================================
Total params: 11,699,889
Trainable params: 11,691,947
Non-trainable params: 7,942
__________________________________________________________________________________________________
cnn_filename = os.path.sep.join([cfg.SCRIPT_DIR, "build/float/float_resnet18_imagenet.h5"])
model18.save(cnn_filename)
print("\n[DB INFO] Compile ResNet18 CNN...\n")
[DB INFO] Compile ResNet18 CNN...
loss = keras.losses.SparseCategoricalCrossentropy()
metric_top_5 = keras.metrics.SparseTopKCategoricalAccuracy()
accuracy = keras.metrics.SparseCategoricalAccuracy()
model18.compile(optimizer="adam", loss=loss, metrics=[accuracy, metric_top_5])
print("\n[DB INFO] Evaluate Average Prediction Accuracy of ResNet18 CNN...\n")
[DB INFO] Evaluate Average Prediction Accuracy of ResNet18 CNN...
res18 = model18.evaluate(imagenet_seq18, steps=EVAL_NUM/eval_batch_size, verbose=1)
10/10 [==============================] - 5s 291ms/step - loss: 1.2996 - sparse_categorical_accuracy: 0.6900 - sparse_top_k_categorical_accuracy: 0.9080
Original ResNet18 top1, top5: 0.6899999976158142 0.9079999923706055
print("\n[DB INFO] Vitis AI PT Quantization of ResNet18 CNN...\n")
[DB INFO] Vitis AI PT Quantization of ResNet18 CNN...
from tensorflow_model_optimization.quantization.keras import vitis_quantize
quantizer = vitis_quantize.VitisQuantizer(model18)
q_model18 = quantizer.quantize_model(calib_dataset=imagenet_seq18.__getitem__(0)[0])
QUANT_HDF5_FILE = os.path.sep.join([cfg.SCRIPT_DIR, "build/quantized/q_resnet18_imagenet.h5"])
q_model18.save(QUANT_HDF5_FILE)
[VAI INFO] Using func format quantizer
[VAI INFO] Quantizing without specific `target`.
[VAI INFO] Start CrossLayerEqualization...
1/10 [==>...........................] - ETA: 0s
2/10 [=====>........................] - ETA: 5s
3/10 [========>.....................] - ETA: 4s
4/10 [===========>..................] - ETA: 4s
5/10 [==============>...............] - ETA: 3s
6/10 [=================>............] - ETA: 2s
7/10 [====================>.........] - ETA: 2s
8/10 [=======================>......] - ETA: 1s
9/10 [==========================>...] - ETA: 0s
10/10 [==============================] - 6s 707ms/step
[VAI INFO] CrossLayerEqualization Done.
[VAI INFO] Start Quantize Calibration...
1/2 [==============>...............] - ETA: 16s
2/2 [==============================] - ETA: 0s
2/2 [==============================] - 18s 2s/step
[VAI INFO] Quantize Calibration Done.
[VAI INFO] Start Post-Quant Model Refinement...
[VAI INFO] Start Quantize Position Ajustment...
[VAI INFO] Quantize Position Ajustment Done.
[VAI INFO] Post-Quant Model Refninement Done.
[VAI INFO] Start Model Finalization...
[VAI INFO] Model Finalization Done.
[VAI INFO] Quantization Finished.
print("\n[DB INFO] Evaluation of ResNet18 Quantized Model...\n")
[DB INFO] Evaluation of ResNet18 Quantized Model...
q_model = keras.models.load_model(QUANT_HDF5_FILE)
X_test= np.zeros((EVAL_NUM,cfg.IMAGE_HEIGHT,cfg.IMAGE_WIDTH,3),dtype="float32")
Y_test= np.zeros((EVAL_NUM,1) ,dtype="float32")
start = 0
step = eval_batch_size
stop = EVAL_NUM
for i in range(start, stop, step):
X_test[i:i+step, :, :, :]= np.asarray(imagenet_seq18.__getitem__(i//step)[0])
Y_test[i:i+step, 0] = np.asarray(imagenet_seq18.__getitem__(i//step)[1])
X_test = np.asarray(X_test)
Y_test = np.asarray(Y_test)
with vitis_quantize.quantize_scope():
q_model.compile(optimizer="adam", loss=loss, metrics=[accuracy, metric_top_5])
q_res = q_model.evaluate(X_test, Y_test)
print("Quantized ResNet18 top1, top5: ", q_res[1] , q_res[2])
16/16 [==============================] - 6s 263ms/step - loss: 1.6757 - sparse_categorical_accuracy: 0.6420 - sparse_top_k_categorical_accuracy: 0.8740
Quantized ResNet18 top1, top5: 0.6420000195503235 0.8740000128746033
run_all.sh/compile_resnet18_imagenet
source ./scripts/run_compile.sh zcu102 q_resnet18_imagenet.h5
source ./scripts/run_compile.sh vck190 q_resnet18_imagenet.h5
source ./scripts/run_compile.sh vek280 q_resnet18_imagenet.h5
source ./scripts/run_compile.sh vck5000 q_resnet18_imagenet.h5
source ./scripts/run_compile.sh v70 q_resnet18_imagenet.h5
mv ./build/compiled_zcu102/zcu102_q_resnet18_imagenet.h5.xmodel ./target/imagenet/zcu102_resnet18_imagenet.xmodel
mv ./build/compiled_vck190/vck190_q_resnet18_imagenet.h5.xmodel ./target/imagenet/vck190_resnet18_imagenet.xmodel
mv ./build/compiled_vek280/vek280_q_resnet18_imagenet.h5.xmodel ./target/imagenet/vek280_resnet18_imagenet.xmodel
mv ./build/compiled_vck5000/vck5000_q_resnet18_imagenet.h5.xmodel ./target/imagenet/vck5000_resnet18_imagenet.xmodel
mv ./build/compiled_v70/v70_q_resnet18_imagenet.h5.xmodel ./target/imagenet/v70_resnet18_imagenet.xmodel
#!/bin/sh
if [ $1 = zcu102 ]; then
ARCH=/opt/vitis_ai/compiler/arch/DPUCZDX8G/ZCU102/arch.json
TARGET=zcu102
echo "-----------------------------------------"
echo "COMPILING MODEL FOR ZCU102.."
echo "-----------------------------------------"
elif [ $1 = zcu104 ]; then
ARCH=/opt/vitis_ai/compiler/arch/DPUCZDX8G/ZCU104/arch.json
TARGET=zcu104
echo "-----------------------------------------"
echo "COMPILING MODEL FOR ZCU104.."
echo "-----------------------------------------"
elif [ $1 = vck190 ]; then
ARCH=/opt/vitis_ai/compiler/arch/DPUCVDX8G/VCK190/arch.json
TARGET=vck190
echo "-----------------------------------------"
echo "COMPILING MODEL FOR VCK190.."
echo "-----------------------------------------"
elif [ $1 = v70 ]; then
ARCH=/opt/vitis_ai/compiler/arch/DPUCV2DX8G/V70/arch.json
TARGET=v70
echo "-----------------------------------------"
echo "COMPILING MODEL FOR ALVEO V70.."
echo "-----------------------------------------"
elif [ $1 = vek280 ]; then
ARCH=/opt/vitis_ai/compiler/arch/DPUCV2DX8G/VEK280/arch.json
TARGET=vek280
echo "-----------------------------------------"
echo "COMPILING MODEL FOR VEK280"
echo "-----------------------------------------"
elif [ $1 = vck5000 ]; then
ARCH=/opt/vitis_ai/compiler/arch/DPUCVDX8H/VCK50004PE/arch.json
TARGET=vck5000
echo "-----------------------------------------"
echo "COMPILING MODEL FOR VCK5000"
echo "-----------------------------------------"
else
echo "Target not found. Valid choices are: zcu102, zcu104, vck190, vck5000, vek280, v70 ...exiting"
exit 1
fi
CNN_MODEL=$2
compile() {
vai_c_tensorflow2 \
--model ./build/quantized/${CNN_MODEL} \
--arch $ARCH \
--output_dir ./build/compiled_${TARGET} \
--net_name ${TARGET}_${CNN_MODEL}
}
compile
echo "-----------------------------------------"
echo "MODEL COMPILED"
echo "-----------------------------------------"
----------------------------------------------------------------------------------
[DB INFO STEP10] COMPILE IMAGENET QUANTIZED RESNET18
----------------------------------------------------------------------------------
-----------------------------------------
COMPILING MODEL FOR ZCU102..
-----------------------------------------
[INFO] Namespace(batchsize=1, inputs_shape=None, layout='NHWC', model_files=['./build/quantized/q_resnet18_imagenet.h5'], model_type='tensorflow2', named_inputs_shape=None, out_filename='/tmp/zcu102_q_resnet18_imagenet.h5_DPUCZDX8G_ISA1_B4096_org.xmodel', proto=None)
[INFO] tensorflow2 model: /workspace/tutorials/RESNET18/original/build/quantized/q_resnet18_imagenet.h5
[INFO] keras version: 2.12.0
[INFO] Tensorflow Keras model type: functional
[INFO] dump xmodel ...
[INFO] dump xmodel: /tmp/zcu102_q_resnet18_imagenet.h5_DPUCZDX8G_ISA1_B4096_org.xmodel
**************************************************
* VITIS_AI Compilation - Xilinx Inc.
**************************************************
-----------------------------------------
MODEL COMPILED
-----------------------------------------
-----------------------------------------
COMPILING MODEL FOR VCK190..
-----------------------------------------
[INFO] Namespace(batchsize=1, inputs_shape=None, layout='NHWC', model_files=['./build/quantized/q_resnet18_imagenet.h5'], model_type='tensorflow2', named_inputs_shape=None, out_filename='/tmp/vck190_q_resnet18_imagenet.h5_DPUCVDX8G_ISA3_C32B6_org.xmodel', proto=None)
[INFO] tensorflow2 model: /workspace/tutorials/RESNET18/original/build/quantized/q_resnet18_imagenet.h5
[INFO] keras version: 2.12.0
[INFO] Tensorflow Keras model type: functional
[INFO] dump xmodel ...
[INFO] dump xmodel: /tmp/vck190_q_resnet18_imagenet.h5_DPUCVDX8G_ISA3_C32B6_org.xmodel
**************************************************
* VITIS_AI Compilation - Xilinx Inc.
**************************************************
-----------------------------------------
MODEL COMPILED
-----------------------------------------
-----------------------------------------
COMPILING MODEL FOR VEK280
-----------------------------------------
[INFO] Namespace(batchsize=1, inputs_shape=None, layout='NHWC', model_files=['./build/quantized/q_resnet18_imagenet.h5'], model_type='tensorflow2', named_inputs_shape=None, out_filename='/tmp/vek280_q_resnet18_imagenet.h5_DPUCV2DX8G_ISA1_C20B14_org.xmodel', proto=None)
[INFO] tensorflow2 model: /workspace/tutorials/RESNET18/original/build/quantized/q_resnet18_imagenet.h5
[INFO] keras version: 2.12.0
[INFO] Tensorflow Keras model type: functional
[INFO] dump xmodel ...
[INFO] dump xmodel: /tmp/vek280_q_resnet18_imagenet.h5_DPUCV2DX8G_ISA1_C20B14_org.xmodel
**************************************************
* VITIS_AI Compilation - Xilinx Inc.
**************************************************
-----------------------------------------
MODEL COMPILED
-----------------------------------------
-----------------------------------------
COMPILING MODEL FOR VCK5000
-----------------------------------------
[INFO] Namespace(batchsize=1, inputs_shape=None, layout='NHWC', model_files=['./build/quantized/q_resnet18_imagenet.h5'], model_type='tensorflow2', named_inputs_shape=None, out_filename='/tmp/vck5000_q_resnet18_imagenet.h5_DPUCVDX8H_ISA1_F2W4_4PE_org.xmodel', proto=None)
[INFO] tensorflow2 model: /workspace/tutorials/RESNET18/original/build/quantized/q_resnet18_imagenet.h5
[INFO] keras version: 2.12.0
[INFO] Tensorflow Keras model type: functional
[INFO] dump xmodel ...
[INFO] dump xmodel: /tmp/vck5000_q_resnet18_imagenet.h5_DPUCVDX8H_ISA1_F2W4_4PE_org.xmodel
**************************************************
* VITIS_AI Compilation - Xilinx Inc.
**************************************************
-----------------------------------------
MODEL COMPILED
-----------------------------------------
-----------------------------------------
COMPILING MODEL FOR ALVEO V70..
-----------------------------------------
[INFO] Namespace(batchsize=1, inputs_shape=None, layout='NHWC', model_files=['./build/quantized/q_resnet18_imagenet.h5'], model_type='tensorflow2', named_inputs_shape=None, out_filename='/tmp/v70_q_resnet18_imagenet.h5_DPUCV2DX8G_ISA1_C20B14_org.xmodel', proto=None)
[INFO] tensorflow2 model: /workspace/tutorials/RESNET18/original/build/quantized/q_resnet18_imagenet.h5
[INFO] keras version: 2.12.0
[INFO] Tensorflow Keras model type: functional
[INFO] dump xmodel ...
[INFO] dump xmodel: /tmp/v70_q_resnet18_imagenet.h5_DPUCV2DX8G_ISA1_C20B14_org.xmodel
**************************************************
* VITIS_AI Compilation - Xilinx Inc.
**************************************************
-----------------------------------------
MODEL COMPILED
-----------------------------------------
run_all.sh/prepare_imagenet_archives
echo " "
echo "----------------------------------------------------------------------------------"
echo "[DB INFO STEP11] PREPARING IMAGENET ARCHIVE FOR TARGET BOARDS"
echo "----------------------------------------------------------------------------------"
echo " "
if [ -d "./build/target" ]; then
echo "./build/target exists already ..."
else
echo "./build/target does not exists ..."
mkdir -p ./build/target
mkdir -p ./build/target_vck190
mkdir -p ./build/target_vck5000
mkdir -p ./build/target_vek280
mkdir -p ./build/target_zcu102
mkdir -p ./build/target_v70
fi
cp -r ./target/imagenet ./build/target/
cp -r ./build/target/imagenet ./build/target_zcu102/
rm -f ./build/target_zcu102/imagenet/vck*_imagenet.xmodel
rm -f ./build/target_zcu102/imagenet/vek*_imagenet.xmodel
cp -r ./build/target/imagenet ./build/target_vck190/
rm -f ./build/target_vck190/imagenet/zcu1*_imagenet.xmodel
rm -f ./build/target_vck190/imagenet/vek2*_imagenet.xmodel
rm -f ./build/target_vck190/imagenet/vck5*_imagenet.xmodel
cp -r ./build/target/imagenet ./build/target_vek280/
rm -f ./build/target_vek280/imagenet/zcu*_imagenet.xmodel
rm -f ./build/target_vek280/imagenet/vck*_imagenet.xmodel
cp -r ./build/target/imagenet ./build/target_vck5000/
rm -f ./build/target_vck5000/imagenet/zcu1*_imagenet.xmodel
rm -f ./build/target_vck5000/imagenet/vek2*_imagenet.xmodel
rm -f ./build/target_vck5000/imagenet/vck1*_imagenet.xmodel
cp -r ./build/target/imagenet ./build/target_v70/
rm -f ./build/target_v70/imagenet/zcu1*_imagenet.xmodel
rm -f ./build/target_v70/imagenet/vek2*_imagenet.xmodel
rm -f ./build/target_v70/imagenet/vck*_imagenet.xmodel
cd ./build
tar -cvf target_zcu102.tar ./target_zcu102 > /dev/null
tar -cvf target_vck190.tar ./target_vck190 > /dev/null
tar -cvf target_vek280.tar ./target_vek280 > /dev/null
tar -cvf target_vck5000.tar ./target_vck5000 > /dev/null
tar -cvf target_v70.tar ./target_v70 > /dev/null
cd ..
----------------------------------------------------------------------------------
[DB INFO STEP11] PREPARING IMAGENET ARCHIVE FOR TARGET BOARDS
----------------------------------------------------------------------------------
./build/target exists already ...