mirror of
https://github.com/open-mmlab/mmclassification.git
synced 2025-06-03 21:53:55 +08:00
* remove basehead * add moco series * add byol simclr simsiam * add ut * update configs * add simsiam hook * add and refactor beit * update ut * add cae * update extract_feat * refactor cae * add mae * refactor data preprocessor * update heads * add maskfeat * add milan * add simmim * add mixmim * fix lint * fix ut * fix lint * add eva * add densecl * add barlowtwins * add swav * fix lint * update readtherdocs rst * update docs * update * Decrease UT memory usage * Fix docstring * update DALLEEncoder * Update model docs * refactor dalle encoder * update docstring * fix ut * fix config error * add val_cfg and test_cfg * refactor clip generator * fix lint * pass check * fix ut * add lars * update type of BEiT in configs * Use MMEngine style momentum in EMA. * apply mmpretrain solarize --------- Co-authored-by: mzr1996 <mzr1996@163.com>
41 lines
1.2 KiB
Python
41 lines
1.2 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import torch
|
|
from mmengine.model import BaseModule
|
|
|
|
from mmpretrain.registry import MODELS
|
|
|
|
|
|
@MODELS.register_module()
|
|
class SimMIMHead(BaseModule):
|
|
"""Head for SimMIM Pre-training.
|
|
|
|
Args:
|
|
patch_size (int): Patch size of each token.
|
|
loss (dict): The config for loss.
|
|
"""
|
|
|
|
def __init__(self, patch_size: int, loss: dict) -> None:
|
|
super().__init__()
|
|
self.patch_size = patch_size
|
|
self.loss_module = MODELS.build(loss)
|
|
|
|
def loss(self, pred: torch.Tensor, target: torch.Tensor,
|
|
mask: torch.Tensor) -> torch.Tensor:
|
|
"""Generate loss.
|
|
|
|
This method will expand mask to the size of the original image.
|
|
|
|
Args:
|
|
pred (torch.Tensor): The reconstructed image (B, C, H, W).
|
|
target (torch.Tensor): The target image (B, C, H, W).
|
|
mask (torch.Tensor): The mask of the target image.
|
|
|
|
Returns:
|
|
torch.Tensor: The reconstruction loss.
|
|
"""
|
|
mask = mask.repeat_interleave(self.patch_size, 1).repeat_interleave(
|
|
self.patch_size, 2).unsqueeze(1).contiguous()
|
|
loss = self.loss_module(pred, target, mask)
|
|
|
|
return loss
|