mirror of
https://github.com/open-mmlab/mmclassification.git
synced 2025-06-03 21:53:55 +08:00
* [Feat] Migrate blip caption to mmpretrain. (#50) * Migrate blip caption to mmpretrain * minor fix * support train * [Feature] Support OFA caption task. (#51) * [Feature] Support OFA caption task. * Remove duplicated files. * [Feature] Support OFA vqa task. (#58) * [Feature] Support OFA vqa task. * Fix lint. * [Feat] Add BLIP retrieval to mmpretrain. (#55) * init * minor fix for train * fix according to comments * refactor * Update Blip retrieval. (#62) * [Feature] Support OFA visual grounding task. (#59) * [Feature] Support OFA visual grounding task. * minor add TODO --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feat] Add flamingos coco caption and vqa. (#60) * first init * init flamingo coco * add vqa * minor fix * remove unnecessary modules * Update config * Use `ApplyToList`. --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature]: BLIP2 coco retrieval (#53) * [Feature]: Add blip2 retriever * [Feature]: Add blip2 all modules * [Feature]: Refine model * [Feature]: x1 * [Feature]: Runnable coco ret * [Feature]: Runnable version * [Feature]: Fix lint * [Fix]: Fix lint * [Feature]: Use 364 img size * [Feature]: Refactor blip2 * [Fix]: Fix lint * refactor files * minor fix * minor fix --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * Remove * fix blip caption inputs (#68) * [Feat] Add BLIP NLVR support. (#67) * first init * init flamingo coco * add vqa * add nlvr * refactor nlvr * minor fix * minor fix * Update dataset --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature]: BLIP2 Caption (#70) * [Feature]: Add language model * [Feature]: blip2 caption forward * [Feature]: Reproduce the results * [Feature]: Refactor caption * refine config --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feat] Migrate BLIP VQA to mmpretrain (#69) * reformat * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * refactor code --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * Update RefCOCO dataset * [Fix] fix lint * [Feature] Implement inference APIs for multi-modal tasks. (#65) * [Feature] Implement inference APIs for multi-modal tasks. * [Project] Add gradio demo. * [Improve] Update requirements * Update flamingo * Update blip * Add NLVR inferencer * Update flamingo * Update hugging face model register * Update ofa vqa * Update BLIP-vqa (#71) * Update blip-vqa docstring (#72) * Refine flamingo docstring (#73) * [Feature]: BLIP2 VQA (#61) * [Feature]: VQA forward * [Feature]: Reproduce accuracy * [Fix]: Fix lint * [Fix]: Add blank line * minor fix --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feature]: BLIP2 docstring (#74) * [Feature]: Add caption docstring * [Feature]: Add docstring to blip2 vqa * [Feature]: Add docstring to retrieval * Update BLIP-2 metafile and README (#75) * [Feature]: Add readme and docstring * Update blip2 results --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature] BLIP Visual Grounding on MMPretrain Branch (#66) * blip grounding merge with mmpretrain * remove commit * blip grounding test and inference api * refcoco dataset * refcoco dataset refine config * rebasing * gitignore * rebasing * minor edit * minor edit * Update blip-vqa docstring (#72) * rebasing * Revert "minor edit" This reverts commit 639cec757c215e654625ed0979319e60f0be9044. * blip grounding final * precommit * refine config * refine config * Update blip visual grounding --------- Co-authored-by: Yiqin Wang 王逸钦 <wyq1217@outlook.com> Co-authored-by: mzr1996 <mzr1996@163.com> * Update visual grounding metric * Update OFA docstring, README and metafiles. (#76) * [Docs] Update installation docs and gradio demo docs. (#77) * Update OFA name * Update Visual Grounding Visualizer * Integrate accelerate support * Fix imports. * Fix timm backbone * Update imports * Update README * Update circle ci * Update flamingo config * Add gradio demo README * [Feature]: Add scienceqa (#1571) * [Feature]: Add scienceqa * [Feature]: Change param name * Update docs * Update video --------- Co-authored-by: Hubert <42952108+yingfhu@users.noreply.github.com> Co-authored-by: yingfhu <yingfhu@gmail.com> Co-authored-by: Yuan Liu <30762564+YuanLiuuuuuu@users.noreply.github.com> Co-authored-by: Yiqin Wang 王逸钦 <wyq1217@outlook.com> Co-authored-by: Rongjie Li <limo97@163.com>
96 lines
2.7 KiB
Python
96 lines
2.7 KiB
Python
_base_ = [
|
|
'../_base_/default_runtime.py',
|
|
]
|
|
|
|
zeroshot_prompt = (
|
|
'Output:A child holding a flowered umbrella and petting a yak.<|endofchunk|>' # noqa: E501
|
|
'Output:The child is holding a brush close to his mouth.<|endofchunk|>' # noqa: E501
|
|
)
|
|
|
|
# model settings
|
|
model = dict(
|
|
type='Flamingo',
|
|
tokenizer=dict(
|
|
type='LlamaTokenizer', name_or_path='decapoda-research/llama-7b-hf'),
|
|
vision_encoder=dict(
|
|
type='VisionTransformer',
|
|
arch='l',
|
|
patch_size=14,
|
|
pre_norm=True,
|
|
norm_cfg=dict(type='LN', eps=1e-5),
|
|
layer_cfgs=dict(act_cfg=dict(type='QuickGELU')),
|
|
final_norm=False,
|
|
out_type='raw',
|
|
pretrained=(
|
|
'https://download.openmmlab.com/mmclassification/v0/clip/'
|
|
'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'),
|
|
),
|
|
lang_encoder=dict(
|
|
base=dict(
|
|
type='AutoModelForCausalLM',
|
|
name_or_path='decapoda-research/llama-7b-hf',
|
|
local_files_only=True),
|
|
adapter=dict(
|
|
type='FlamingoLMAdapter',
|
|
vis_hidden_size=1024,
|
|
cross_attn_every_n_layers=4,
|
|
use_media_placement_augmentation=False),
|
|
),
|
|
task='caption',
|
|
zeroshot_prompt=zeroshot_prompt,
|
|
final_prompt_tmpl='<image>Output:',
|
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
|
)
|
|
|
|
# data settings
|
|
data_preprocessor = dict(
|
|
type='MultiModalDataPreprocessor',
|
|
mean=[122.770938, 116.7460125, 104.09373615],
|
|
std=[68.5005327, 66.6321579, 70.32316305],
|
|
to_rgb=True,
|
|
)
|
|
|
|
test_pipeline = [
|
|
dict(type='LoadImageFromFile'),
|
|
dict(
|
|
type='ResizeEdge',
|
|
scale=224,
|
|
interpolation='bicubic',
|
|
backend='pillow'),
|
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
|
dict(
|
|
type='PackInputs',
|
|
algorithm_keys=['gt_caption'],
|
|
meta_keys=['image_id'],
|
|
),
|
|
]
|
|
|
|
val_dataloader = dict(
|
|
batch_size=8,
|
|
num_workers=8,
|
|
dataset=dict(
|
|
type='FlamingoEvalCOCOCaption',
|
|
data_root='data/coco',
|
|
ann_file='annotations/captions_train2014.json',
|
|
data_prefix=dict(img_path='train2014'),
|
|
pipeline=test_pipeline,
|
|
num_shots=0,
|
|
num_support_examples=2048,
|
|
num_query_examples=5000,
|
|
),
|
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
|
persistent_workers=True,
|
|
)
|
|
|
|
val_evaluator = dict(
|
|
type='COCOCaption',
|
|
ann_file='data/coco/annotations/captions_train2014.json')
|
|
|
|
# If you want standard test, please manually configure the test dataset
|
|
test_dataloader = val_dataloader
|
|
test_evaluator = val_evaluator
|
|
|
|
# schedule settings
|
|
val_cfg = dict()
|
|
test_cfg = dict()
|