mirror of
https://github.com/open-mmlab/mmclassification.git
synced 2025-06-03 21:53:55 +08:00
* [Feat] Migrate blip caption to mmpretrain. (#50) * Migrate blip caption to mmpretrain * minor fix * support train * [Feature] Support OFA caption task. (#51) * [Feature] Support OFA caption task. * Remove duplicated files. * [Feature] Support OFA vqa task. (#58) * [Feature] Support OFA vqa task. * Fix lint. * [Feat] Add BLIP retrieval to mmpretrain. (#55) * init * minor fix for train * fix according to comments * refactor * Update Blip retrieval. (#62) * [Feature] Support OFA visual grounding task. (#59) * [Feature] Support OFA visual grounding task. * minor add TODO --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feat] Add flamingos coco caption and vqa. (#60) * first init * init flamingo coco * add vqa * minor fix * remove unnecessary modules * Update config * Use `ApplyToList`. --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature]: BLIP2 coco retrieval (#53) * [Feature]: Add blip2 retriever * [Feature]: Add blip2 all modules * [Feature]: Refine model * [Feature]: x1 * [Feature]: Runnable coco ret * [Feature]: Runnable version * [Feature]: Fix lint * [Fix]: Fix lint * [Feature]: Use 364 img size * [Feature]: Refactor blip2 * [Fix]: Fix lint * refactor files * minor fix * minor fix --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * Remove * fix blip caption inputs (#68) * [Feat] Add BLIP NLVR support. (#67) * first init * init flamingo coco * add vqa * add nlvr * refactor nlvr * minor fix * minor fix * Update dataset --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature]: BLIP2 Caption (#70) * [Feature]: Add language model * [Feature]: blip2 caption forward * [Feature]: Reproduce the results * [Feature]: Refactor caption * refine config --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feat] Migrate BLIP VQA to mmpretrain (#69) * reformat * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * refactor code --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * Update RefCOCO dataset * [Fix] fix lint * [Feature] Implement inference APIs for multi-modal tasks. (#65) * [Feature] Implement inference APIs for multi-modal tasks. * [Project] Add gradio demo. * [Improve] Update requirements * Update flamingo * Update blip * Add NLVR inferencer * Update flamingo * Update hugging face model register * Update ofa vqa * Update BLIP-vqa (#71) * Update blip-vqa docstring (#72) * Refine flamingo docstring (#73) * [Feature]: BLIP2 VQA (#61) * [Feature]: VQA forward * [Feature]: Reproduce accuracy * [Fix]: Fix lint * [Fix]: Add blank line * minor fix --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feature]: BLIP2 docstring (#74) * [Feature]: Add caption docstring * [Feature]: Add docstring to blip2 vqa * [Feature]: Add docstring to retrieval * Update BLIP-2 metafile and README (#75) * [Feature]: Add readme and docstring * Update blip2 results --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature] BLIP Visual Grounding on MMPretrain Branch (#66) * blip grounding merge with mmpretrain * remove commit * blip grounding test and inference api * refcoco dataset * refcoco dataset refine config * rebasing * gitignore * rebasing * minor edit * minor edit * Update blip-vqa docstring (#72) * rebasing * Revert "minor edit" This reverts commit 639cec757c215e654625ed0979319e60f0be9044. * blip grounding final * precommit * refine config * refine config * Update blip visual grounding --------- Co-authored-by: Yiqin Wang 王逸钦 <wyq1217@outlook.com> Co-authored-by: mzr1996 <mzr1996@163.com> * Update visual grounding metric * Update OFA docstring, README and metafiles. (#76) * [Docs] Update installation docs and gradio demo docs. (#77) * Update OFA name * Update Visual Grounding Visualizer * Integrate accelerate support * Fix imports. * Fix timm backbone * Update imports * Update README * Update circle ci * Update flamingo config * Add gradio demo README * [Feature]: Add scienceqa (#1571) * [Feature]: Add scienceqa * [Feature]: Change param name * Update docs * Update video --------- Co-authored-by: Hubert <42952108+yingfhu@users.noreply.github.com> Co-authored-by: yingfhu <yingfhu@gmail.com> Co-authored-by: Yuan Liu <30762564+YuanLiuuuuuu@users.noreply.github.com> Co-authored-by: Yiqin Wang 王逸钦 <wyq1217@outlook.com> Co-authored-by: Rongjie Li <limo97@163.com>
218 lines
7.9 KiB
Python
218 lines
7.9 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
from typing import Optional
|
|
|
|
import torch
|
|
import torch.nn.functional as F
|
|
from mmengine.model import BaseModule
|
|
|
|
from mmpretrain.models.utils.box_utils import (box_cxcywh_to_xyxy,
|
|
generalized_box_iou)
|
|
from mmpretrain.registry import MODELS, TOKENIZER
|
|
|
|
|
|
@MODELS.register_module()
|
|
class GroundingHead(BaseModule):
|
|
"""bbox Coordination generation head for multi-modal pre-trained task,
|
|
adapted by BLIP. Normally used for visual grounding.
|
|
|
|
Args:
|
|
loss: dict,
|
|
decoder: dict,
|
|
init_cfg (dict, optional): the config to control the initialization.
|
|
Defaults to None.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
decoder: dict = None,
|
|
tokenizer: dict = None,
|
|
box_l1_loss_coeff=4.0,
|
|
box_giou_loss_coeff=2.0,
|
|
init_cfg: Optional[dict] = None,
|
|
) -> None:
|
|
super(GroundingHead, self).__init__(init_cfg=init_cfg)
|
|
''' init the decoder from med_config'''
|
|
self.decoder = None
|
|
if decoder:
|
|
self.decoder = MODELS.build(decoder)
|
|
self.loss_fn = torch.nn.CrossEntropyLoss(
|
|
reduction='none', ignore_index=-100)
|
|
|
|
self.box_l1_loss_coeff = box_l1_loss_coeff
|
|
self.box_giou_loss_coeff = box_giou_loss_coeff
|
|
|
|
if isinstance(tokenizer, dict):
|
|
self.tokenizer = TOKENIZER.build(tokenizer)
|
|
else:
|
|
self.tokenizer = tokenizer
|
|
|
|
self.image_res = 640
|
|
prefix_ids = torch.tensor(
|
|
self.tokenizer.convert_tokens_to_ids(['[unused339]']))
|
|
target_ids = torch.tensor(
|
|
self.tokenizer.convert_tokens_to_ids(
|
|
[f'[unused{340+_}]' for _ in range(self.image_res + 1)]))
|
|
self.register_buffer('prefix_ids', prefix_ids)
|
|
self.register_buffer('target_ids', target_ids)
|
|
|
|
bbox_prob_mask = torch.zeros(len(self.tokenizer))
|
|
bbox_prob_mask[self.target_ids[0]:self.target_ids[-1] + 1] = 1
|
|
bbox_prob_mask = (1.0 - bbox_prob_mask) * -10000.0
|
|
self.register_buffer('bbox_prob_mask', bbox_prob_mask)
|
|
self.bin_start_idx = self.target_ids[0]
|
|
|
|
def forward(self, text_embedding, text_embedding_mask,
|
|
encoder_hidden_states, encoder_attention_mask):
|
|
|
|
# localize prompt token, text embedding
|
|
|
|
merged_encode_hs = torch.cat([encoder_hidden_states, text_embedding],
|
|
1)
|
|
merge_att_mask = torch.cat(
|
|
[encoder_attention_mask, text_embedding_mask], 1)
|
|
|
|
loc_prompt = self.prompt.weight.T
|
|
loc_prompt = torch.repeat_interleave(loc_prompt,
|
|
merge_att_mask.shape[0],
|
|
0).unsqueeze(1)
|
|
|
|
loc_prompt_mask = torch.ones(loc_prompt.shape[:-1]).long().to(
|
|
loc_prompt.device)
|
|
|
|
decoder_out = self.decoder(
|
|
inputs_embeds=loc_prompt,
|
|
attention_mask=loc_prompt_mask,
|
|
encoder_hidden_states=merged_encode_hs,
|
|
encoder_attention_mask=merge_att_mask,
|
|
output_hidden_states=True,
|
|
labels=None,
|
|
)
|
|
decoder_hs = decoder_out.hidden_states[-1][:, 0, :]
|
|
box_pred = self.box_head(decoder_hs)
|
|
return decoder_out, decoder_hs, box_pred
|
|
|
|
def loss(self,
|
|
text_embedding,
|
|
text_embedding_mask,
|
|
encoder_hidden_states,
|
|
encoder_attention_mask,
|
|
decoder_targets,
|
|
return_scores=False):
|
|
"""Calculate losses from the extracted features.
|
|
|
|
Args:
|
|
feats (dict): The features extracted from the backbone.
|
|
data_samples (List[BaseDataElement]): The annotation data of
|
|
every samples.
|
|
|
|
Returns:
|
|
dict[str, Tensor]: a dictionary of loss components
|
|
"""
|
|
|
|
merged_encode_hs = torch.cat([encoder_hidden_states, text_embedding],
|
|
1)
|
|
merge_att_mask = torch.cat(
|
|
[encoder_attention_mask, text_embedding_mask], 1)
|
|
|
|
answer_targets = (decoder_targets *
|
|
self.image_res).long() + self.bin_start_idx
|
|
prefix_ids = torch.repeat_interleave(self.prefix_ids,
|
|
merge_att_mask.shape[0],
|
|
0).unsqueeze(-1)
|
|
prefix_ids = torch.cat([prefix_ids, answer_targets], dim=1)
|
|
|
|
answer_output = self.decoder(
|
|
prefix_ids,
|
|
encoder_hidden_states=merged_encode_hs,
|
|
encoder_attention_mask=merge_att_mask,
|
|
labels=None,
|
|
return_dict=True,
|
|
)
|
|
prob_mask = self.bbox_prob_mask.view(1, 1,
|
|
self.bbox_prob_mask.shape[-1])
|
|
prediction_scores = answer_output.logits + prob_mask
|
|
|
|
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
|
|
labels = prefix_ids[:, 1:].contiguous()
|
|
vocab_size = len(self.tokenizer)
|
|
loss_seq_init = self.loss_fn(
|
|
shifted_prediction_scores.view(-1, vocab_size), labels.view(-1))
|
|
|
|
with torch.no_grad():
|
|
pred_box = (torch.argmax(
|
|
prediction_scores[:, :-1, :].contiguous(), dim=-1) -
|
|
self.bin_start_idx) / self.image_res
|
|
weight_bbox = F.l1_loss(
|
|
pred_box, decoder_targets, reduction='none').clamp(
|
|
0, 5) * self.box_l1_loss_coeff
|
|
weight_giou = (1 - torch.diag(
|
|
generalized_box_iou(
|
|
box_cxcywh_to_xyxy(pred_box),
|
|
box_cxcywh_to_xyxy(decoder_targets)))
|
|
) * self.box_giou_loss_coeff
|
|
bs = text_embedding.shape[0]
|
|
loss_seq = loss_seq_init[:].view(bs, -1, 4)
|
|
loss_seq = loss_seq * weight_bbox
|
|
loss_seq = loss_seq * weight_giou.unsqueeze(1)
|
|
|
|
loss_seq = loss_seq.mean()
|
|
|
|
losses = {
|
|
'loss_seq': loss_seq,
|
|
'loss_seq_init': loss_seq_init.mean(),
|
|
'loss': loss_seq,
|
|
'box_l1': weight_bbox.mean(-1).mean().detach(),
|
|
'box_giou': weight_giou.mean().detach()
|
|
}
|
|
|
|
return losses
|
|
|
|
def predict(
|
|
self,
|
|
text_embedding,
|
|
text_embedding_mask,
|
|
encoder_hidden_states,
|
|
encoder_attention_mask,
|
|
):
|
|
"""Generates the bbox coordinates at inference time."""
|
|
|
|
merged_encode_hs = torch.cat([encoder_hidden_states, text_embedding],
|
|
1)
|
|
merge_att_mask = torch.cat(
|
|
[encoder_attention_mask, text_embedding_mask], 1)
|
|
|
|
prefix_ids = torch.repeat_interleave(self.prefix_ids,
|
|
merge_att_mask.shape[0],
|
|
0).unsqueeze(-1)
|
|
|
|
for _ in range(4):
|
|
decoder_output = self.decoder(
|
|
prefix_ids,
|
|
encoder_hidden_states=merged_encode_hs,
|
|
encoder_attention_mask=merge_att_mask,
|
|
labels=None,
|
|
return_dict=True,
|
|
)
|
|
prob_mask = self.bbox_prob_mask.view(1, 1,
|
|
self.bbox_prob_mask.shape[-1])
|
|
prediction_scores = decoder_output.logits + prob_mask
|
|
|
|
prefix_ids = torch.cat([
|
|
prefix_ids,
|
|
torch.argmax(prediction_scores[:, -1, :], dim=-1).unsqueeze(1)
|
|
],
|
|
dim=1)
|
|
|
|
pred_box = self.process_bbox(prefix_ids[:, 1:]) # xywh 0-1 to xyxy 0-1
|
|
|
|
return pred_box
|
|
|
|
@torch.no_grad()
|
|
def process_bbox(self, bbox):
|
|
bbox = bbox - self.bin_start_idx
|
|
bbox = torch.true_divide(bbox, self.image_res)
|
|
bbox = box_cxcywh_to_xyxy(bbox)
|
|
bbox = torch.clip(bbox, 0, 1)
|
|
assert torch.all(bbox <= 1)
|
|
return bbox
|