mirror of
https://github.com/open-mmlab/mmclassification.git
synced 2025-06-03 21:53:55 +08:00
* remove basehead * add moco series * add byol simclr simsiam * add ut * update configs * add simsiam hook * add and refactor beit * update ut * add cae * update extract_feat * refactor cae * add mae * refactor data preprocessor * update heads * add maskfeat * add milan * add simmim * add mixmim * fix lint * fix ut * fix lint * add eva * add densecl * add barlowtwins * add swav * fix lint * update readtherdocs rst * update docs * update * Decrease UT memory usage * Fix docstring * update DALLEEncoder * Update model docs * refactor dalle encoder * update docstring * fix ut * fix config error * add val_cfg and test_cfg * refactor clip generator * fix lint * pass check * fix ut * add lars * update type of BEiT in configs * Use MMEngine style momentum in EMA. * apply mmpretrain solarize --------- Co-authored-by: mzr1996 <mzr1996@163.com>
50 lines
1.4 KiB
Python
50 lines
1.4 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import torch
|
|
|
|
from mmpretrain.registry import MODELS
|
|
from .mae_head import MAEPretrainHead
|
|
|
|
|
|
@MODELS.register_module()
|
|
class MixMIMPretrainHead(MAEPretrainHead):
|
|
"""Head for MixMIM Pre-training.
|
|
|
|
Args:
|
|
loss (dict): Config of loss.
|
|
norm_pix_loss (bool): Whether or not normalize target.
|
|
Defaults to False.
|
|
patch_size (int): Patch size. Defaults to 16.
|
|
"""
|
|
|
|
def __init__(self,
|
|
loss: dict,
|
|
norm_pix: bool = False,
|
|
patch_size: int = 16) -> None:
|
|
super().__init__(loss=loss, norm_pix=norm_pix, patch_size=patch_size)
|
|
|
|
def loss(self, x_rec: torch.Tensor, target: torch.Tensor,
|
|
mask: torch.Tensor) -> torch.Tensor:
|
|
"""Generate loss.
|
|
|
|
Args:
|
|
pred (torch.Tensor): The reconstructed image.
|
|
target (torch.Tensor): The target image.
|
|
mask (torch.Tensor): The mask of the target image.
|
|
|
|
Returns:
|
|
torch.Tensor: The reconstruction loss.
|
|
"""
|
|
target = self.construct_target(target)
|
|
|
|
B, L, C = x_rec.shape
|
|
|
|
# unmix tokens
|
|
x1_rec = x_rec[:B // 2]
|
|
x2_rec = x_rec[B // 2:]
|
|
|
|
unmix_x_rec = x1_rec * mask + x2_rec.flip(0) * (1 - mask)
|
|
|
|
loss_rec = self.loss_module(unmix_x_rec, target)
|
|
|
|
return loss_rec
|