Skip to content

Instantly share code, notes, and snippets.

@wecacuee
Created March 25, 2016 18:13
Show Gist options
  • Save wecacuee/e56b6c8bc6c762e1440f to your computer and use it in GitHub Desktop.
Save wecacuee/e56b6c8bc6c762e1440f to your computer and use it in GitHub Desktop.
Makefile to download, install and run VQA evaluation (and training) code https://github.com/VT-vision-lab/VQA_LSTM_CNN
.PHONY: all clean evaluate downloaddata torch
SHELL:=bash -l
########################################################################
# Makefile to download, install and run VQA evaluation (and training) code https://github.com/VT-vision-lab/VQA_LSTM_CNN
# Usage:
# make evaluate # Runs evaluation code from downloaded models
# make downloaddata # Downloads MSCOCO dataset to required directory format
# make evaluate DO_TRAINING=1 # Runs training code before evaluation
# make clean # Removes the downloaded models
# make torch # Installs torch
########################################################################
# Config
# Do training before evaluation or just download pretrained models?
DO_TRAINING?=
# Data directory for MSCOCO dataset Images, Annotations and Questions
# Scroll down to the definition of target "downloaddata"
# for the required directory structure.
DATA_DIR?=$(if $(call checkos,flux),/scratch/eecs542w16_fluxg/$(USER)/MSCOCO/,/home/$(USER)/data/MSCOCO/)
# Install location for TORCH
TORCH_INSTALL?=$(HOME)/share/torch/
########################################################################
# Environment
PATH:=$(HOME)/.local/bin/:$(PATH)
PYTHONPATH:=$(HOME)/.local/lib/python2.7/site-packages/:$(PYTHONPATH)
all: evaluate
evaluate: VQA/Results/OpenEnded_mscoco_lstm_accuracy.json
clean:
rm data_img.h5 data_prepro.h5 data_prepro.json model/lstm.t7
torch: $(TORCH_INSTALL)/.installdone
DATA_TRAIN_URL:=https://filebox.ece.vt.edu/%7Ejiasenlu/codeRelease/vqaRelease/train_only/data_train_val.zip
PRETRAINED_LSTM_MODEL_URL:= https://filebox.ece.vt.edu/%7Ejiasenlu/codeRelease/vqaRelease/train_only/pretrained_lstm_train_val.t7.zip
########################################################################
# Evaluation
########################################################################
# 1. Prepare the features
data_img.h5 data_prepro.h5 data_prepro.json: $(if $(DO_TRAINING),data/vqa_raw_train.json data/vqa_raw_test.json,) $(HOME)/nltk_data/.touch
ifeq ($(strip $(DO_TRAINING)),)
wget $(DATA_TRAIN_URL) -O data_train_val.zip
unzip data_train_val.zip
else
$(modload) && python prepro.py --input_train_json data/vqa_raw_train.json --input_test_json data/vqa_raw_test.json --num_ans 1000
endif
$(HOME)/nltk_data/.touch:
$(modload) && python -c "import nltk; nltk.download('all')"
touch $@
# 2. Prepare the LSTM trained model
model/lstm.t7: $(if $(DO_TRAINING),model/VGG_ILSVRC_19_layers.caffemodel model/VGG_ILSVRC_19_layers.prototxt torch,)
ifeq ($(strip $(DO_TRAINING)),)
mkdir -p model
wget $(PRETRAINED_LSTM_MODEL_URL) -O pretrained_lstm_train_val.t7.zip
unzip pretrained_lstm_train_val.t7.zip -d model
else
$(torch-act) && th prepro_img.lua -input_json data_prepro.json -image_root $(DATA_DIR)/Images/mscoco/ -cnn_proto model/VGG_ILSVRC_19_layers.prototxt -cnn_model model/VGG_ILSVRC_19_layers.caffemodel
$(torch-act) && th train.lua
mv model/save/lstm.t7 model/lstm.t7
endif
# 3. Run evaluations on the downloaded features and pretrained LSTM model
result/MultipleChoice_mscoco_lstm_results.json result/OpenEnded_mscoco_lstm_results.json: data_img.h5 data_prepro.h5 data_prepro.json model/lstm.t7 torch
$(torch-act) && th eval.lua -input_img_h5 data_img.h5 -input_ques_h5 data_prepro.h5 -input_json data_prepro.json -model_path model/lstm.t7
# 4. Clone the evaluation repository
VQA/README.md:
git clone https://github.com/VT-vision-lab/VQA.git VQA
VQA/Annotations: VQA/README.md
ln -s $(DATA_DIR)/Annotations/ VQA/Annotations
VQA/Questions: VQA/README.md
ln -s $(DATA_DIR)/Questions/ VQA/Questions
VQA/Images: VQA/README.md
ln -s $(DATA_DIR)/Images/ VQA/Images
VQA/Results: VQA/README.md
ln -s result/ VQA/Results
VQA/Results/%_mscoco_lstm_accuracy.json: result/%_mscoco_lstm_results.json \
VQA/README.md VQA/Annotations VQA/Questions VQA/Images VQA/Results
$(modload) && cd VQA/PythonEvaluationTools/ && python vqaEvalDemo.py
########################################################################
# Only for training
########################################################################
VGG_ILSVRC_19_LAYERS_CAFFEMODEL_URL:=http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel
VGG_ILSVRC_19_LAYERS_PROTOTXT_URL:=https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/bb2b4fe0a9bb0669211cf3d0bc949dfdda173e9e/VGG_ILSVRC_19_layers_deploy.prototxt
model/VGG_ILSVRC_19_layers.caffemodel:
wget $(VGG_ILSVRC_19_LAYERS_CAFFEMODEL_URL) -O $@
model/VGG_ILSVRC_19_layers.prototxt:
wget $(VGG_ILSVRC_19_LAYERS_PROTOTXT_URL) -O $@
data/vqa_raw_train.json data/vqa_raw_test.json:
$(modload) && cd data && python vqa_preprocessing.py --download True --split 1
########################################################################
# Data download
########################################################################
ANNOTATION_TRAIN_URL:=http://visualqa.org/data/mscoco/vqa/Annotations_Train_mscoco.zip
ANNOTATION_VALIDATION_URL:=http://visualqa.org/data/mscoco/vqa/Annotations_Val_mscoco.zip
QUESTIONS_TRAIN_URL:=http://visualqa.org/data/mscoco/vqa/Questions_Train_mscoco.zip
QUESTIONS_VALIDATION_URL:=http://visualqa.org/data/mscoco/vqa/Questions_Val_mscoco.zip
QUESTIONS_TEST_URL:=http://visualqa.org/data/mscoco/vqa/Questions_Test_mscoco.zip
MSCOCO_TRAIN_IMAGES_URL:=http://msvocds.blob.core.windows.net/coco2014/train2014.zip
MSCOCO_VALIDATION_URL:=http://msvocds.blob.core.windows.net/coco2014/val2014.zip
MSCOCO_TEST_URL:=http://msvocds.blob.core.windows.net/coco2015/test2015.zip
# Canned recipe for downloading and unziping
define wget-data-to-dir
mkdir -p $2
wget $1 -O tmp.zip
unzip tmp.zip -d $2
endef
# Required data organization
downloaddata: \
$(DATA_DIR)/Annotations/mscoco_train2014_annotations.json\
$(DATA_DIR)/Annotations/mscoco_val2014_annotations.json\
$(DATA_DIR)/Questions/OpenEnded_mscoco_train2014_questions.json\
$(DATA_DIR)/Questions/OpenEnded_mscoco_val2014_questions.json\
$(DATA_DIR)/Questions/MultipleChoice_mscoco_train2014_questions.json\
$(DATA_DIR)/Questions/MultipleChoice_mscoco_val2014_questions.json\
$(DATA_DIR)/Images/mscoco/train2014/COCO_train2014_000000581921.jpg\
$(DATA_DIR)/Images/mscoco/val2014/COCO_val2014_000000581929.jpg\
$(DATA_DIR)/Images/mscoco/test2015/COCO_test2015_000000581923.jpg
$(DATA_DIR)/Annotations/mscoco_train2014_annotations.json:
$(call wget-data-to-dir,$(ANNOTATION_TRAIN_URL),$(dir $@))
$(DATA_DIR)/Annotations/mscoco_val2014_annotations.json:
$(call wget-data-to-dir,$(ANNOTATION_VALIDATION_URL),$(dir $@))
$(DATA_DIR)/Questions/OpenEnded_mscoco_train2014_questions.json $(DATA_DIR)/Questions/MultipleChoice_mscoco_train2014_questions.json:
$(call wget-data-to-dir,$(QUESTIONS_TRAIN_URL),$(dir $@))
$(DATA_DIR)/Questions/OpenEnded_mscoco_val2014_questions.json $(DATA_DIR)/Questions/MultipleChoice_mscoco_val2014_questions.json:
$(call wget-data-to-dir,$(QUESTIONS_VALIDATION_URL),$(dir $@))
$(DATA_DIR)/Images/mscoco/train2014/COCO_train2014_000000581921.jpg:
$(call wget-data-to-dir,$(MSCOCO_TRAIN_IMAGES_URL),$(abspath $(dir $@)../))
$(DATA_DIR)/Images/mscoco/val2014/COCO_val2014_000000581929.jpg:
$(call wget-data-to-dir,$(MSCOCO_VALIDATION_URL),$(abspath $(dir $@)../))
$(DATA_DIR)/Images/mscoco/test2015/COCO_test2015_000000581923.jpg:
$(call wget-data-to-dir,$(MSCOCO_TEST_URL),$(abspath $(dir $@)../))
########################################################################
# Torch install
########################################################################
$(TORCH_INSTALL)/README.md:
mkdir -p $(dir $@)
git clone https://github.com/torch/distro.git $(dir $@) --recursive
CC:=$(shell which gcc)
CXX:=$(shell which g++)
export CC CXX
$(TORCH_INSTALL)/install/bin/th $(TORCH_INSTALL)/.installdone: $(TORCH_INSTALL)/README.md
ifeq ($(call checkos,Ubuntu),Ubuntu)
cd $(dir $^) && bash install-deps
else ifeq ($(call checkos,flux),flux)
$(modload) && pip install --user pillow nltk
$(modload) && CPATH=$$(HDF5_INC) LIBRARY_PATH=$$(HDF5_LINK) pip install --user h5py
endif
$(modload) && cd $(dir $^) && ./install.sh
touch $(TORCH_INSTALL)/.installdone
# Check OS
checkos=$(findstring $1,$(shell uname -a))
# Load env
modload:=$(if $(call checkos,flux),module load cmake gcc/4.7.0 cuda/6.5 git gnuplot python openmpi cudnn hdf5/1.8.9/gcc/4.7.0,true)
# Activate torch related environment variable
define torch-act
source $(TORCH_INSTALL)/install/bin/torch-activate
endef
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment