Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
G
GenVQA
Manage
Activity
Members
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Releases
Model registry
Analyze
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Vision
GenVQA
Commits
2af6e15a
Commit
2af6e15a
authored
8 years ago
by
tgupta6
Browse files
Options
Downloads
Patches
Plain Diff
add constants file for gpu-3 machine
parent
5387af45
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
constants.py
+2
-0
2 additions, 0 deletions
constants.py
constants_vision_gpu_3.py
+299
-0
299 additions, 0 deletions
constants_vision_gpu_3.py
with
301 additions
and
0 deletions
constants.py
+
2
−
0
View file @
2af6e15a
...
...
@@ -5,6 +5,8 @@ if hostname=='vision-gpu-1':
from
constants_vision_gpu_1
import
*
elif
hostname
==
'
vision-gpu-2
'
:
from
constants_vision_gpu_2
import
*
elif
hostname
==
'
vision-gpu-3
'
:
from
constants_vision_gpu_3
import
*
elif
hostname
==
'
crunchy
'
:
from
constants_crunchy
import
*
This diff is collapsed.
Click to expand it.
constants_vision_gpu_3.py
0 → 100644
+
299
−
0
View file @
2af6e15a
import
os
import
pdb
def
mkdir_if_not_exists
(
dir_name
):
if
not
os
.
path
.
exists
(
dir_name
):
os
.
mkdir
(
dir_name
)
experiment_name
=
'
ans_through_obj_atr_det_debug_rel
'
##########################################################################
# Machine Specific Paths #
##########################################################################
# Global output directory (all subexperiments will be saved here)
global_output_dir
=
'
/data/tanmay/GenVQA_Exp_Results
'
global_experiment_dir
=
os
.
path
.
join
(
global_output_dir
,
experiment_name
)
tb_log_dir
=
os
.
path
.
join
(
global_experiment_dir
,
'
tensorboard_logdir
'
)
mkdir_if_not_exists
(
global_output_dir
)
mkdir_if_not_exists
(
global_experiment_dir
)
mkdir_if_not_exists
(
tb_log_dir
)
# Genome Parent Directory
data_absolute_path
=
'
/home/nfs/tgupta6/data/VisualGenome
'
# Pretrained resnet ckpt
resnet_ckpt
=
'
/home/nfs/tgupta6/data/Resnet/
'
+
\
'
ResNet-L50.ckpt
'
# Pretrained word vectors
word2vec_binary
=
'
/home/nfs/tgupta6/data/word_vectors/
'
+
\
'
GoogleNews-vectors-negative300.bin
'
# VQA parent directory
vqa_basedir
=
'
/home/nfs/tgupta6/data/VQA/
'
# Pretrained obj atr model to be restored
pretrained_model
=
'
/home/nfs/tgupta6/projects/GenVQA/Exp_Results/
'
+
\
'
object_attribute_classifier_large_images_vqa_split/
'
+
\
'
object_attribute_classifiers/model-80000
'
##########################################################################
# Model Parameters #
##########################################################################
# height and width to which images are resized before feeding into networks
image_size
=
(
224
,
224
)
# Token to be used if object or attribute variable is unknown
unknown_token
=
'
UNK
'
##########################################################################
# Labels and Word2Vec Parameters #
##########################################################################
num_object_labels
=
1000
num_attribute_labels
=
1000
word_vector_size
=
300
resnet_feat_dim
=
2048
# Numpy matrix storing vocabulary word vectors
pretrained_vocab_word_vectors_npy
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/pretrained_vocab_word_vectors.npy
'
)
##########################################################################
# Genome Parameters #
##########################################################################
# Genome paths
image_dir
=
os
.
path
.
join
(
data_absolute_path
,
'
cropped_regions_large
'
)
genome_resnet_feat_dir
=
os
.
path
.
join
(
data_absolute_path
,
'
cropped_regions_large_resnet_features
'
)
object_labels_json
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/object_labels.json
'
)
attribute_labels_json
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/attribute_labels.json
'
)
regions_json
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/region_with_hypernym_labels.json
'
)
mean_image_filename
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/mean_image.jpg
'
)
vocab_json
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/vocab_subset.json
'
)
genome_train_region_ids
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/train_region_ids_simple.json
'
)
genome_val_region_ids
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/val_region_ids_simple.json
'
)
genome_test_region_ids
=
os
.
path
.
join
(
data_absolute_path
,
'
restructured/test_region_ids_simple.json
'
)
##########################################################################
# Genome Training/Fine-Tuning Parameters #
##########################################################################
# Logging parameters
region_log_every_n_iter
=
500
region_output_dir
=
os
.
path
.
join
(
global_experiment_dir
,
'
object_attribute_classifiers
'
)
mkdir_if_not_exists
(
region_output_dir
)
region_model
=
os
.
path
.
join
(
region_output_dir
,
'
model
'
)
# Object Attribute Classifier Training Params
region_batch_size
=
200
region_num_epochs
=
20
region_queue_size
=
400
region_regularization_coeff
=
1e-5
region_lr
=
1e-3
# Object Attribute Finetuning Params
region_fine_tune_from_iter
=
50500
region_fine_tune_from
=
region_model
+
'
-
'
+
str
(
region_fine_tune_from_iter
)
##########################################################################
# Genome Evaluation Parameters #
##########################################################################
# Object Attribute Model Selection
region_start_model
=
8000
region_step_size
=
8000
region_model_accuracies_txt
=
os
.
path
.
join
(
region_output_dir
,
'
model_accuracies.txt
'
)
# Object Attribute Classifier Evaluation Params
region_eval_on
=
'
train_held_out
'
# One of {'test','train_held_out','train_subset'}
region_model_to_eval
=
region_model
+
'
-
'
+
'
102000
'
# Path to results
visualize_object_predictions
=
False
region_pred_vis_dirname
=
os
.
path
.
join
(
region_output_dir
,
'
region_pred_vis_
'
+
region_eval_on
)
mkdir_if_not_exists
(
region_pred_vis_dirname
)
region_attribute_scores_dirname
=
os
.
path
.
join
(
region_output_dir
,
'
attribute_scores
'
)
region_object_scores_dirname
=
os
.
path
.
join
(
region_output_dir
,
'
object_scores
'
)
mkdir_if_not_exists
(
region_attribute_scores_dirname
)
mkdir_if_not_exists
(
region_object_scores_dirname
)
##########################################################################
# VQA Parameters #
##########################################################################
# Answer prediction
num_region_proposals
=
100
num_mcq_candidates
=
18
num_negative_answers
=
num_mcq_candidates
-
1
# VQA data paths
vqa_train_image_dir
=
os
.
path
.
join
(
vqa_basedir
,
'
train2014_cropped_large
'
)
vqa_train_resnet_feat_dir
=
os
.
path
.
join
(
vqa_basedir
,
'
train2014_cropped_large_resnet_features
'
)
vqa_train_anno
=
os
.
path
.
join
(
vqa_basedir
,
'
mscoco_train2014_annotations_with_parsed_questions.json
'
)
vqa_train_subset_qids
=
os
.
path
.
join
(
vqa_basedir
,
'
train_subset_qids.json
'
)
vqa_train_held_out_qids
=
os
.
path
.
join
(
vqa_basedir
,
'
train_held_out_qids.json
'
)
vqa_val_image_dir
=
os
.
path
.
join
(
vqa_basedir
,
'
val2014_cropped_large
'
)
vqa_val_resnet_feat_dir
=
os
.
path
.
join
(
vqa_basedir
,
'
val2014_cropped_large_resnet_features
'
)
vqa_val_anno
=
os
.
path
.
join
(
vqa_basedir
,
'
mscoco_val2014_annotations_with_parsed_questions.json
'
)
vqa_val_qids
=
os
.
path
.
join
(
vqa_basedir
,
'
val_qids.json
'
)
vqa_answer_vocab_json
=
os
.
path
.
join
(
vqa_basedir
,
'
answer_vocab.json
'
)
# Dataset statistics
# num_train_questions = 248349
# num_train_held_out_questions = 12500
# num_train_subset_questions = num_train_questions - num_train_held_out_questions
# num_val_questions = 121512
##########################################################################
# VQA Training/Finetuning Parameters #
##########################################################################
# Logging parameters
answer_log_every_n_iter
=
500
answer_output_dir
=
os
.
path
.
join
(
global_experiment_dir
,
'
answer_classifiers
'
)
mkdir_if_not_exists
(
answer_output_dir
)
answer_model
=
os
.
path
.
join
(
answer_output_dir
,
'
model
'
)
# Answer classifier training params
answer_train_from_scratch
=
True
answer_batch_size
=
25
answer_num_epochs
=
20
answer_queue_size
=
500
answer_regularization_coeff
=
1e-5
answer_lr
=
1e-3
answer_obj_atr_loss_wt
=
0.1
answer_ans_loss_wt
=
1.0
answer_mil_loss_wt
=
0.0
# Answer classifier additional joint training params
num_regions_with_labels
=
100
# Answer fine tune params
answer_fine_tune_from_iter
=
29500
answer_fine_tune_from
=
answer_model
+
'
-
'
+
str
(
answer_fine_tune_from_iter
)
##########################################################################
# VQA Evaluation Parameters #
##########################################################################
# Select best model
models_dir
=
answer_output_dir
start_model
=
30000
step_size
=
4000
model_accuracies_txt
=
os
.
path
.
join
(
answer_output_dir
,
'
model_accuracies.txt
'
)
# Answer eval params
answer_eval_on
=
'
val
'
answer_model_to_eval
=
answer_model
+
'
-66000
'
vqa_results_dir
=
os
.
path
.
join
(
answer_output_dir
,
'
Results
'
)
mkdir_if_not_exists
(
vqa_results_dir
)
answer_eval_data_json
=
os
.
path
.
join
(
vqa_results_dir
,
'
eval_
'
+
answer_eval_on
+
'
_data.json
'
)
answer_eval_results_json
=
os
.
path
.
join
(
vqa_results_dir
,
'
eval_
'
+
answer_eval_on
+
'
_results.json
'
)
# Fine Grained Evaluation File paths
raw_vqa_val_ques_json
=
os
.
path
.
join
(
vqa_basedir
,
'
MultipleChoice_mscoco_val2014_questions.json
'
)
raw_vqa_val_anno_json
=
os
.
path
.
join
(
vqa_basedir
,
'
mscoco_val2014_annotations.json
'
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment