From 51a2a15f1ef5138e38b4933c654280c7978262e9 Mon Sep 17 00:00:00 2001
From: HustQBW <bowenqu@stu.pku.edu.cn>
Date: Mon, 25 Sep 2023 01:42:49 +0800
Subject: [PATCH] ram init commit

---
 mmpretrain/models/multimodal/__init__.py      |   24 +-
 mmpretrain/models/multimodal/ram/__init__.py  |    4 +
 mmpretrain/models/multimodal/ram/bert.py      | 1197 +++++++++++++++++
 .../models/multimodal/ram/config/__init__.py  |    1 +
 .../ram/config/ram_swin_large_14m.py          |   93 ++
 .../multimodal/ram/data/ram_tag_list.pickle   |  Bin 0 -> 51099 bytes
 .../ram/data/ram_tag_list_chinese.pickle      |  Bin 0 -> 50796 bytes
 .../ram/data/ram_tag_list_threshold.pickle    |  Bin 0 -> 41289 bytes
 .../models/multimodal/ram/gradio_demo.py      |  109 ++
 .../models/multimodal/ram/openset_utils.py    |  212 +++
 mmpretrain/models/multimodal/ram/ram.py       |  332 +++++
 .../models/multimodal/ram/run/__init__.py     |    1 +
 .../models/multimodal/ram/run/inference.py    |   29 +
 mmpretrain/models/multimodal/ram/utils.py     |   87 ++
 mmpretrain/models/utils/tokenizer.py          |    1 +
 tools/model_converters/ram2mmpretrain.py      |  117 ++
 16 files changed, 2203 insertions(+), 4 deletions(-)
 create mode 100644 mmpretrain/models/multimodal/ram/__init__.py
 create mode 100644 mmpretrain/models/multimodal/ram/bert.py
 create mode 100644 mmpretrain/models/multimodal/ram/config/__init__.py
 create mode 100644 mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py
 create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle
 create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle
 create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list_threshold.pickle
 create mode 100644 mmpretrain/models/multimodal/ram/gradio_demo.py
 create mode 100644 mmpretrain/models/multimodal/ram/openset_utils.py
 create mode 100644 mmpretrain/models/multimodal/ram/ram.py
 create mode 100644 mmpretrain/models/multimodal/ram/run/__init__.py
 create mode 100644 mmpretrain/models/multimodal/ram/run/inference.py
 create mode 100644 mmpretrain/models/multimodal/ram/utils.py
 create mode 100644 tools/model_converters/ram2mmpretrain.py

diff --git a/mmpretrain/models/multimodal/__init__.py b/mmpretrain/models/multimodal/__init__.py
index 73645f0f..e6650cfe 100644
--- a/mmpretrain/models/multimodal/__init__.py
+++ b/mmpretrain/models/multimodal/__init__.py
@@ -11,13 +11,29 @@ if WITH_MULTIMODAL:
     from .minigpt4 import *  # noqa: F401, F403
     from .ofa import *  # noqa: F401, F403
     from .otter import *  # noqa: F401, F403
+    from .ram import *  # noqa: F401, F403
 else:
     from mmpretrain.registry import MODELS
     from mmpretrain.utils.dependency import register_multimodal_placeholder
 
     register_multimodal_placeholder([
-        'Blip2Caption', 'Blip2Retrieval', 'Blip2VQA', 'BlipCaption',
-        'BlipNLVR', 'BlipRetrieval', 'BlipGrounding', 'BlipVQA', 'Flamingo',
-        'OFA', 'ChineseCLIP', 'MiniGPT4', 'Llava', 'Otter', 'CLIP',
-        'CLIPZeroShot'
+        'Blip2Caption',
+        'Blip2Retrieval',
+        'Blip2VQA',
+        'BlipCaption',
+        'BlipNLVR',
+        'BlipRetrieval',
+        'BlipGrounding',
+        'BlipVQA',
+        'Flamingo',
+        'OFA',
+        'ChineseCLIP',
+        'MiniGPT4',
+        'Llava',
+        'Otter',
+        'CLIP',
+        'CLIPZeroShot',
+        'RAM',
+        'RAMNormal',
+        'RAMOpenset',
     ], MODELS)
diff --git a/mmpretrain/models/multimodal/ram/__init__.py b/mmpretrain/models/multimodal/ram/__init__.py
new file mode 100644
index 00000000..35619d88
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .ram import RAM, RAMNormal, RAMOpenset
+
+__all__ = ['RAM', 'RAMNormal', 'RAMOpenset']
diff --git a/mmpretrain/models/multimodal/ram/bert.py b/mmpretrain/models/multimodal/ram/bert.py
new file mode 100644
index 00000000..f54b2ce8
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/bert.py
@@ -0,0 +1,1197 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Modify from:
+# https://github.com/xinyu1205/recognize-anything/blob/main/ram/models/bert.py
+
+import math
+from typing import Tuple
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, device, nn
+from torch.nn import CrossEntropyLoss
+from transformers.activations import ACT2FN
+from transformers.modeling_outputs import (
+    BaseModelOutputWithPastAndCrossAttentions,
+    BaseModelOutputWithPoolingAndCrossAttentions,
+    CausalLMOutputWithCrossAttentions)
+from transformers.modeling_utils import (PreTrainedModel,
+                                         apply_chunking_to_forward,
+                                         find_pruneable_heads_and_indices,
+                                         prune_linear_layer)
+from transformers.models.bert.configuration_bert import BertConfig
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+
+class BertEmbeddings_nopos(nn.Module):
+    """Construct the embeddings from word and position embeddings."""
+
+    def __init__(self, config):
+        super().__init__()
+        self.word_embeddings = nn.Embedding(
+            config.vocab_size,
+            config.hidden_size,
+            padding_idx=config.pad_token_id)
+        # self.position_embeddings = nn.Embedding(
+        #               config.max_position_embeddings, config.hidden_size)
+        '''self.LayerNorm is not snake-cased to stick with
+        TensorFlow model variable name and be able to load'''
+        # any TensorFlow checkpoint file
+        self.LayerNorm = nn.LayerNorm(
+            config.hidden_size, eps=config.layer_norm_eps)
+        self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+        # position_ids (1, len position emb) is contiguous
+        # in memory and exported when serialized
+        # self.register_buffer("position_ids",
+        #       torch.arange(config.max_position_embeddings).expand((1, -1)))
+        # self.position_embedding_type = \
+        #           getattr(config, "position_embedding_type", "absolute")
+
+        self.config = config
+
+    def forward(self,
+                input_ids=None,
+                position_ids=None,
+                inputs_embeds=None,
+                past_key_values_length=0):
+        if input_ids is not None:
+            input_shape = input_ids.size()
+        else:
+            input_shape = inputs_embeds.size()[:-1]
+
+        seq_length = input_shape[1]  # noqa: F841
+
+        # if position_ids is None:
+        #   position_ids = self.position_ids[:, \
+        #       past_key_values_length : seq_length + \
+        #       past_key_values_length]
+
+        if inputs_embeds is None:
+            inputs_embeds = self.word_embeddings(input_ids)
+
+        embeddings = inputs_embeds
+
+        # if self.position_embedding_type == "absolute":
+        #     position_embeddings = self.position_embeddings(position_ids)
+        #     # print('add position_embeddings!!!!')
+        #     embeddings += position_embeddings
+        embeddings = self.LayerNorm(embeddings)
+        embeddings = self.dropout(embeddings)
+        return embeddings
+
+
+class BertEmbeddings(nn.Module):
+    """Construct the embeddings from word and position embeddings."""
+
+    def __init__(self, config):
+        super().__init__()
+        self.word_embeddings = nn.Embedding(
+            config.vocab_size,
+            config.hidden_size,
+            padding_idx=config.pad_token_id)
+        self.position_embeddings = nn.Embedding(config.max_position_embeddings,
+                                                config.hidden_size)
+
+        # self.LayerNorm is not snake-cased to stick with
+        # TensorFlow model variable name and be able to load
+        # any TensorFlow checkpoint file
+        self.LayerNorm = nn.LayerNorm(
+            config.hidden_size, eps=config.layer_norm_eps)
+        self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+        # position_ids (1, len position emb) is contiguous
+        # in memory and exported when serialized
+        self.register_buffer(
+            'position_ids',
+            torch.arange(config.max_position_embeddings).expand((1, -1)))
+        self.position_embedding_type = getattr(config,
+                                               'position_embedding_type',
+                                               'absolute')
+
+        self.config = config
+
+    def forward(self,
+                input_ids=None,
+                position_ids=None,
+                inputs_embeds=None,
+                past_key_values_length=0):
+        if input_ids is not None:
+            input_shape = input_ids.size()
+        else:
+            input_shape = inputs_embeds.size()[:-1]
+
+        seq_length = input_shape[1]
+
+        if position_ids is None:
+            position_ids = self.position_ids[:, past_key_values_length:
+                                             seq_length +
+                                             past_key_values_length]
+
+        if inputs_embeds is None:
+            inputs_embeds = self.word_embeddings(input_ids)
+
+        embeddings = inputs_embeds
+
+        if self.position_embedding_type == 'absolute':
+            position_embeddings = self.position_embeddings(position_ids)
+            # print('add position_embeddings!!!!')
+            embeddings += position_embeddings
+        embeddings = self.LayerNorm(embeddings)
+        embeddings = self.dropout(embeddings)
+        return embeddings
+
+
+class BertSelfAttention(nn.Module):
+
+    def __init__(self, config, is_cross_attention):
+        super().__init__()
+        self.config = config
+        if config.hidden_size % config.num_attention_heads != 0 and \
+                not hasattr(config, 'embedding_size'):
+            raise ValueError('''The hidden size (%d) is not a multiple of
+                the number of attention heads (%d)''' %
+                             (config.hidden_size, config.num_attention_heads))
+
+        self.num_attention_heads = config.num_attention_heads
+        self.attention_head_size = int(config.hidden_size /
+                                       config.num_attention_heads)
+        self.all_head_size = self.num_attention_heads * \
+            self.attention_head_size
+
+        self.query = nn.Linear(config.hidden_size, self.all_head_size)
+        if is_cross_attention:
+            self.key = nn.Linear(config.encoder_width, self.all_head_size)
+            self.value = nn.Linear(config.encoder_width, self.all_head_size)
+        else:
+            self.key = nn.Linear(config.hidden_size, self.all_head_size)
+            self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+        self.position_embedding_type = getattr(config,
+                                               'position_embedding_type',
+                                               'absolute')
+        if (self.position_embedding_type == 'relative_key'
+                or self.position_embedding_type == 'relative_key_query'):
+            self.max_position_embeddings = config.max_position_embeddings
+            self.distance_embedding = nn.Embedding(
+                2 * config.max_position_embeddings - 1,
+                self.attention_head_size)
+        self.save_attention = False
+
+    def save_attn_gradients(self, attn_gradients):
+        self.attn_gradients = attn_gradients
+
+    def get_attn_gradients(self):
+        return self.attn_gradients
+
+    def save_attention_map(self, attention_map):
+        self.attention_map = attention_map
+
+    def get_attention_map(self):
+        return self.attention_map
+
+    def transpose_for_scores(self, x):
+        new_x_shape = x.size()[:-1] + (self.num_attention_heads,
+                                       self.attention_head_size)
+        x = x.view(*new_x_shape)
+        return x.permute(0, 2, 1, 3)
+
+    def forward(
+        self,
+        hidden_states,
+        attention_mask=None,
+        head_mask=None,
+        encoder_hidden_states=None,
+        encoder_attention_mask=None,
+        past_key_value=None,
+        output_attentions=False,
+    ):
+        mixed_query_layer = self.query(hidden_states)
+
+        # If this is instantiated as a cross-attention module, the keys
+        # and values come from an encoder; the attention mask needs to be
+        # such that the encoder's padding tokens are not attended to.
+        is_cross_attention = encoder_hidden_states is not None
+
+        if is_cross_attention:
+            # print(self.key.weight.shape)
+            key_layer = self.transpose_for_scores(
+                self.key(encoder_hidden_states))
+            value_layer = self.transpose_for_scores(
+                self.value(encoder_hidden_states))
+            attention_mask = encoder_attention_mask
+        elif past_key_value is not None:
+            key_layer = self.transpose_for_scores(self.key(hidden_states))
+            value_layer = self.transpose_for_scores(self.value(hidden_states))
+            key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+            value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+        else:
+            key_layer = self.transpose_for_scores(self.key(hidden_states))
+            value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+        query_layer = self.transpose_for_scores(mixed_query_layer)
+
+        past_key_value = (key_layer, value_layer)
+
+        # compatible with higher versions of transformers
+        if key_layer.shape[0] > query_layer.shape[0]:
+            key_layer = key_layer[:query_layer.shape[0], :, :, :]
+            attention_mask = attention_mask[:query_layer.shape[0], :, :]
+            value_layer = value_layer[:query_layer.shape[0], :, :, :]
+
+        # Take the dot product between "query" and "key"
+        # to get the raw attention scores.
+        attention_scores = torch.matmul(query_layer,
+                                        key_layer.transpose(-1, -2))
+
+        if (self.position_embedding_type == 'relative_key'
+                or self.position_embedding_type == 'relative_key_query'):
+            seq_length = hidden_states.size()[1]
+            position_ids_l = torch.arange(
+                seq_length, dtype=torch.long,
+                device=hidden_states.device).view(-1, 1)
+            position_ids_r = torch.arange(
+                seq_length, dtype=torch.long,
+                device=hidden_states.device).view(1, -1)
+            distance = position_ids_l - position_ids_r
+            positional_embedding = self.distance_embedding(
+                distance + self.max_position_embeddings - 1)
+            positional_embedding = positional_embedding.to(
+                dtype=query_layer.dtype)  # fp16 compatibility
+
+            if self.position_embedding_type == 'relative_key':
+                relative_position_scores = torch.einsum(
+                    'bhld,lrd->bhlr', query_layer, positional_embedding)
+                attention_scores = attention_scores + relative_position_scores
+            elif self.position_embedding_type == 'relative_key_query':
+                relative_position_scores_query = torch.einsum(
+                    'bhld,lrd->bhlr', query_layer, positional_embedding)
+                relative_position_scores_key = torch.einsum(
+                    'bhrd,lrd->bhlr', key_layer, positional_embedding)
+                attention_scores = attention_scores + \
+                    relative_position_scores_query + \
+                    relative_position_scores_key
+
+        attention_scores = attention_scores / math.sqrt(
+            self.attention_head_size)
+        if attention_mask is not None:
+            # Apply the attention mask is (precomputed for
+            # all layers in BertModel forward() function)
+            attention_scores = attention_scores + attention_mask
+
+        # Normalize the attention scores to probabilities.
+        attention_probs = nn.Softmax(dim=-1)(attention_scores)
+
+        if is_cross_attention and self.save_attention:
+            self.save_attention_map(attention_probs)
+            attention_probs.register_hook(self.save_attn_gradients)
+
+        # This is actually dropping out entire tokens to attend to, which might
+        # seem a bit unusual, but is taken from the original Transformer paper.
+        attention_probs_dropped = self.dropout(attention_probs)
+
+        # Mask heads if we want to
+        if head_mask is not None:
+            attention_probs_dropped = attention_probs_dropped * head_mask
+
+        context_layer = torch.matmul(attention_probs_dropped, value_layer)
+
+        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+        new_context_layer_shape = context_layer.size()[:-2] + (
+            self.all_head_size, )
+        context_layer = context_layer.view(*new_context_layer_shape)
+
+        outputs = (context_layer,
+                   attention_probs) if output_attentions else (context_layer, )
+
+        outputs = outputs + (past_key_value, )
+        return outputs
+
+
+class BertSelfOutput(nn.Module):
+
+    def __init__(self, config):
+        super().__init__()
+        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+        self.LayerNorm = nn.LayerNorm(
+            config.hidden_size, eps=config.layer_norm_eps)
+        self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+    def forward(self, hidden_states, input_tensor):
+        hidden_states = self.dense(hidden_states)
+        hidden_states = self.dropout(hidden_states)
+        hidden_states = self.LayerNorm(hidden_states + input_tensor)
+        return hidden_states
+
+
+class BertAttention(nn.Module):
+
+    def __init__(self, config, is_cross_attention=False):
+        super().__init__()
+        self.self = BertSelfAttention(config, is_cross_attention)
+        self.output = BertSelfOutput(config)
+        self.pruned_heads = set()
+
+    def prune_heads(self, heads):
+        if len(heads) == 0:
+            return
+        heads, index = find_pruneable_heads_and_indices(
+            heads, self.self.num_attention_heads,
+            self.self.attention_head_size, self.pruned_heads)
+
+        # Prune linear layers
+        self.self.query = prune_linear_layer(self.self.query, index)
+        self.self.key = prune_linear_layer(self.self.key, index)
+        self.self.value = prune_linear_layer(self.self.value, index)
+        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+        # Update hyper params and store pruned heads
+        self.self.num_attention_heads = self.self.num_attention_heads - len(
+            heads)
+        self.self.all_head_size = self.self.attention_head_size * \
+            self.self.num_attention_heads
+        self.pruned_heads = self.pruned_heads.union(heads)
+
+    def forward(
+        self,
+        hidden_states,
+        attention_mask=None,
+        head_mask=None,
+        encoder_hidden_states=None,
+        encoder_attention_mask=None,
+        past_key_value=None,
+        output_attentions=False,
+    ):
+        self_outputs = self.self(
+            hidden_states,
+            attention_mask,
+            head_mask,
+            encoder_hidden_states,
+            encoder_attention_mask,
+            past_key_value,
+            output_attentions,
+        )
+        attention_output = self.output(self_outputs[0], hidden_states)
+        outputs = (attention_output,
+                   ) + self_outputs[1:]  # add attentions if we output them
+        return outputs
+
+
+class BertIntermediate(nn.Module):
+
+    def __init__(self, config):
+        super().__init__()
+        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+        if isinstance(config.hidden_act, str):
+            self.intermediate_act_fn = ACT2FN[config.hidden_act]
+        else:
+            self.intermediate_act_fn = config.hidden_act
+
+    def forward(self, hidden_states):
+        hidden_states = self.dense(hidden_states)
+        hidden_states = self.intermediate_act_fn(hidden_states)
+        return hidden_states
+
+
+class BertOutput(nn.Module):
+
+    def __init__(self, config):
+        super().__init__()
+        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+        self.LayerNorm = nn.LayerNorm(
+            config.hidden_size, eps=config.layer_norm_eps)
+        self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+    def forward(self, hidden_states, input_tensor):
+        hidden_states = self.dense(hidden_states)
+        hidden_states = self.dropout(hidden_states)
+        hidden_states = self.LayerNorm(hidden_states + input_tensor)
+        return hidden_states
+
+
+class BertLayer(nn.Module):
+
+    def __init__(self, config, layer_num):
+        super().__init__()
+        self.config = config
+        self.chunk_size_feed_forward = config.chunk_size_feed_forward
+        self.seq_len_dim = 1
+        self.attention = BertAttention(config)
+        self.layer_num = layer_num
+        if self.config.add_cross_attention:
+            self.crossattention = BertAttention(
+                config, is_cross_attention=self.config.add_cross_attention)
+        self.intermediate = BertIntermediate(config)
+        self.output = BertOutput(config)
+
+    def forward(
+        self,
+        hidden_states,
+        attention_mask=None,
+        head_mask=None,
+        encoder_hidden_states=None,
+        encoder_attention_mask=None,
+        past_key_value=None,
+        output_attentions=False,
+        mode=None,
+    ):
+
+        if mode == 'tagging':
+
+            assert encoder_hidden_states is not None, \
+                '''encoder_hidden_states must be given
+                for cross-attention layers'''
+
+            cross_attention_outputs = self.crossattention(
+                hidden_states,
+                attention_mask,
+                head_mask,
+                encoder_hidden_states,
+                encoder_attention_mask,
+                output_attentions=output_attentions,
+            )
+            attention_output = cross_attention_outputs[0]
+            outputs = cross_attention_outputs[
+                1:-1]  # add cross attentions if we output attention weights
+
+            present_key_value = cross_attention_outputs[-1]
+
+        else:
+            # decoder uni-directional self-attention
+            # cached key/values tuple is at positions 1,2
+            self_attn_past_key_value = \
+                (past_key_value[:2]
+                    if past_key_value is not None else None)
+            self_attention_outputs = self.attention(
+                hidden_states,
+                attention_mask,
+                head_mask,
+                output_attentions=output_attentions,
+                past_key_value=self_attn_past_key_value,
+            )
+            attention_output = self_attention_outputs[0]
+
+            outputs = self_attention_outputs[1:-1]
+            present_key_value = self_attention_outputs[-1]
+
+            if mode == 'multimodal':
+                assert encoder_hidden_states is not None, \
+                    '''encoder_hidden_states must be
+                    given for cross-attention layers'''
+
+                cross_attention_outputs = self.crossattention(
+                    attention_output,
+                    attention_mask,
+                    head_mask,
+                    encoder_hidden_states,
+                    encoder_attention_mask,
+                    output_attentions=output_attentions,
+                )
+                attention_output = cross_attention_outputs[0]
+                outputs = outputs + cross_attention_outputs[
+                    1:
+                    -1]  # add cross attentions if we output attention weights
+        layer_output = apply_chunking_to_forward(self.feed_forward_chunk,
+                                                 self.chunk_size_feed_forward,
+                                                 self.seq_len_dim,
+                                                 attention_output)
+        outputs = (layer_output, ) + outputs
+
+        outputs = outputs + (present_key_value, )
+
+        return outputs
+
+    def feed_forward_chunk(self, attention_output):
+        intermediate_output = self.intermediate(attention_output)
+        layer_output = self.output(intermediate_output, attention_output)
+        return layer_output
+
+
+class BertEncoder(nn.Module):
+
+    def __init__(self, config):
+        super().__init__()
+        self.config = config
+        self.layer = nn.ModuleList(
+            [BertLayer(config, i) for i in range(config.num_hidden_layers)])
+        self.gradient_checkpointing = False
+
+    def forward(
+        self,
+        hidden_states,
+        attention_mask=None,
+        head_mask=None,
+        encoder_hidden_states=None,
+        encoder_attention_mask=None,
+        past_key_values=None,
+        use_cache=None,
+        output_attentions=False,
+        output_hidden_states=False,
+        return_dict=True,
+        mode='multimodal',
+    ):
+        all_hidden_states = () if output_hidden_states else None
+        all_self_attentions = () if output_attentions else None
+        all_cross_attentions = (
+        ) if output_attentions and self.config.add_cross_attention else None
+
+        next_decoder_cache = () if use_cache else None
+
+        for i in range(self.config.num_hidden_layers):
+            layer_module = self.layer[i]
+            if output_hidden_states:
+                all_hidden_states = all_hidden_states + (hidden_states, )
+
+            layer_head_mask = head_mask[i] if head_mask is not None else None
+            past_key_value = past_key_values[
+                i] if past_key_values is not None else None
+
+            if self.gradient_checkpointing and self.training:
+
+                if use_cache:
+                    logger.warn('''`use_cache=True` is incompatible with
+                        gradient checkpointing. Setting `use_cache=False`...'''
+                                )
+                    use_cache = False
+
+                def create_custom_forward(module):
+
+                    def custom_forward(*inputs):
+                        return module(*inputs, past_key_value,
+                                      output_attentions)
+
+                    return custom_forward
+
+                layer_outputs = torch.utils.checkpoint.checkpoint(
+                    create_custom_forward(layer_module),
+                    hidden_states,
+                    attention_mask,
+                    layer_head_mask,
+                    encoder_hidden_states,
+                    encoder_attention_mask,
+                    mode=mode,
+                )
+            else:
+                layer_outputs = layer_module(
+                    hidden_states,
+                    attention_mask,
+                    layer_head_mask,
+                    encoder_hidden_states,
+                    encoder_attention_mask,
+                    past_key_value,
+                    output_attentions,
+                    mode=mode,
+                )
+
+            hidden_states = layer_outputs[0]
+            if use_cache:
+                next_decoder_cache += (layer_outputs[-1], )
+            if output_attentions:
+                all_self_attentions = all_self_attentions + (
+                    layer_outputs[1], )
+
+        if output_hidden_states:
+            all_hidden_states = all_hidden_states + (hidden_states, )
+
+        if not return_dict:
+            return tuple(v for v in [
+                hidden_states,
+                next_decoder_cache,
+                all_hidden_states,
+                all_self_attentions,
+                all_cross_attentions,
+            ] if v is not None)
+        return BaseModelOutputWithPastAndCrossAttentions(
+            last_hidden_state=hidden_states,
+            past_key_values=next_decoder_cache,
+            hidden_states=all_hidden_states,
+            attentions=all_self_attentions,
+            cross_attentions=all_cross_attentions,
+        )
+
+
+class BertPooler(nn.Module):
+
+    def __init__(self, config):
+        super().__init__()
+        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+        self.activation = nn.Tanh()
+
+    def forward(self, hidden_states):
+        # We "pool" the model by simply taking the hidden state corresponding
+        # to the first token.
+        first_token_tensor = hidden_states[:, 0]
+        pooled_output = self.dense(first_token_tensor)
+        pooled_output = self.activation(pooled_output)
+        return pooled_output
+
+
+class BertPredictionHeadTransform(nn.Module):
+
+    def __init__(self, config):
+        super().__init__()
+        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+        if isinstance(config.hidden_act, str):
+            self.transform_act_fn = ACT2FN[config.hidden_act]
+        else:
+            self.transform_act_fn = config.hidden_act
+        self.LayerNorm = nn.LayerNorm(
+            config.hidden_size, eps=config.layer_norm_eps)
+
+    def forward(self, hidden_states):
+        hidden_states = self.dense(hidden_states)
+        hidden_states = self.transform_act_fn(hidden_states)
+        hidden_states = self.LayerNorm(hidden_states)
+        return hidden_states
+
+
+class BertLMPredictionHead(nn.Module):
+
+    def __init__(self, config):
+        super().__init__()
+        self.transform = BertPredictionHeadTransform(config)
+
+        # The output weights are the same as the input embeddings, but there is
+        # an output-only bias for each token.
+        self.decoder = nn.Linear(
+            config.hidden_size, config.vocab_size, bias=False)
+
+        self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+        # Need a link between the two variables so that
+        # the bias is correctly resized with `resize_token_embeddings`
+        self.decoder.bias = self.bias
+
+    def forward(self, hidden_states):
+        hidden_states = self.transform(hidden_states)
+        hidden_states = self.decoder(hidden_states)
+        return hidden_states
+
+
+class BertOnlyMLMHead(nn.Module):
+
+    def __init__(self, config):
+        super().__init__()
+        self.predictions = BertLMPredictionHead(config)
+
+    def forward(self, sequence_output):
+        prediction_scores = self.predictions(sequence_output)
+        return prediction_scores
+
+
+class BertPreTrainedModel(PreTrainedModel):
+    """An abstract class to handle weights initialization and a simple
+    interface for downloading and loading pretrained models."""
+
+    config_class = BertConfig
+    base_model_prefix = 'bert'
+    _keys_to_ignore_on_load_missing = [r'position_ids']
+
+    def _init_weights(self, module):
+        """Initialize the weights."""
+        if isinstance(module, (nn.Linear, nn.Embedding)):
+            # Slightly different from the TF version
+            # which uses truncated_normal for initialization
+            # cf https://github.com/pytorch/pytorch/pull/5617
+            module.weight.data.normal_(
+                mean=0.0, std=self.config.initializer_range)
+        elif isinstance(module, nn.LayerNorm):
+            module.bias.data.zero_()
+            module.weight.data.fill_(1.0)
+        if isinstance(module, nn.Linear) and module.bias is not None:
+            module.bias.data.zero_()
+
+
+class BertModel(BertPreTrainedModel):
+    """The model can behave as an encoder (with only self-attention) as well as
+    a decoder, in which case a layer of cross-attention is added between the
+    self-attention layers, following the architecture described in `Attention
+    is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani,
+    Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N.
+
+    Gomez, Lukasz Kaiser and Illia Polosukhin. argument and
+    :obj:`add_cross_attention` set to :obj:`True`; an
+    :obj:`encoder_hidden_states` is then expected as an input to the forward
+    pass.
+    """
+
+    def __init__(self, config, add_pooling_layer=True):
+        super().__init__(config)
+        self.config = config
+
+        self.embeddings = BertEmbeddings(config)
+
+        self.encoder = BertEncoder(config)
+
+        self.pooler = BertPooler(config) if add_pooling_layer else None
+
+        self.init_weights()
+
+    def get_input_embeddings(self):
+        return self.embeddings.word_embeddings
+
+    def set_input_embeddings(self, value):
+        self.embeddings.word_embeddings = value
+
+    def _prune_heads(self, heads_to_prune):
+        """Prunes heads of the model.
+
+        heads_to_prune:
+        dict of {layer_num: list of heads to prune in this layer}
+        See base class PreTrainedModel
+        """
+        for layer, heads in heads_to_prune.items():
+            self.encoder.layer[layer].attention.prune_heads(heads)
+
+    def get_extended_attention_mask(self, attention_mask: Tensor,
+                                    input_shape: Tuple[int], device: device,
+                                    is_decoder: bool) -> Tensor:
+        """Makes broadcastable attention and causal masks so that future and
+        masked tokens are ignored.
+
+        Arguments:
+            attention_mask (:obj:`torch.Tensor`):
+                Mask with ones indicating tokens to attend to,
+                zeros for tokens to ignore.
+            input_shape (:obj:`Tuple[int]`):
+                The shape of the input to the model.
+            device: (:obj:`torch.device`):
+                The device of the input to the model.
+
+        Returns:
+            :obj:`torch.Tensor` The extended attention mask,
+            with a the same dtype as :obj:`attention_mask.dtype`.
+        """
+        # We can provide a self-attention mask of dimensions
+        # [batch_size, from_seq_length, to_seq_length]
+        # ourselves in which case we just need to make it
+        # broadcastable to all heads.
+        if attention_mask.dim() == 3:
+            extended_attention_mask = attention_mask[:, None, :, :]
+        elif attention_mask.dim() == 2:
+            # Provided a padding mask of dimensions [batch_size, seq_length]
+            # - if the model is a decoder, apply a causal mask
+            # in addition to the padding mask
+            # - if the model is an encoder, make the mask
+            # broadcastable to [batch_size, num_heads, seq_length, seq_length]
+            if is_decoder:
+                batch_size, seq_length = input_shape
+
+                seq_ids = torch.arange(seq_length, device=device)
+                causal_mask = seq_ids[None, None, :].repeat(
+                    batch_size, seq_length, 1) <= seq_ids[None, :, None]
+                # in case past_key_values are used we need to
+                # add a prefix ones mask to the causal mask
+                # causal and attention masks must have same type
+                # with pytorch version < 1.3
+                causal_mask = causal_mask.to(attention_mask.dtype)
+
+                if causal_mask.shape[1] < attention_mask.shape[1]:
+                    prefix_seq_len = attention_mask.shape[
+                        1] - causal_mask.shape[1]
+                    causal_mask = torch.cat(
+                        [
+                            torch.ones(
+                                (batch_size, seq_length, prefix_seq_len),
+                                device=device,
+                                dtype=causal_mask.dtype),
+                            causal_mask,
+                        ],
+                        axis=-1,
+                    )
+
+                extended_attention_mask = (
+                    causal_mask[:None, :, :] *
+                    attention_mask[:, None, None, :])
+            else:
+                extended_attention_mask = attention_mask[:, None, None, :]
+        else:
+            raise ValueError(
+                '''Wrong shape for input_ids (shape {}) or attention_mask
+                (shape {})'''.format(input_shape, attention_mask.shape))
+
+        # Since attention_mask is 1.0
+        # for positions we want to attend and 0.0
+        # for masked positions, this operation will
+        # create a tensor which is 0.0 for positions
+        # we want to attend and -10000.0 for masked positions.
+        # Since we are adding it to the raw scores
+        # before the softmax, this is effectively
+        # the same as removing these entirely.
+        extended_attention_mask = extended_attention_mask.to(
+            dtype=self.dtype)  # fp16 compatibility
+        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
+        return extended_attention_mask
+
+    def forward(
+        self,
+        input_ids=None,
+        attention_mask=None,
+        position_ids=None,
+        head_mask=None,
+        inputs_embeds=None,
+        encoder_embeds=None,
+        encoder_hidden_states=None,
+        encoder_attention_mask=None,
+        past_key_values=None,
+        use_cache=None,
+        output_attentions=None,
+        output_hidden_states=None,
+        return_dict=None,
+        is_decoder=False,
+        mode='multimodal',
+    ):
+        r"""
+        encoder_hidden_states  (:obj:`torch.FloatTensor` of shape :obj:
+        `(batch_size, sequence_length, hidden_size)`, `optional`):
+            Sequence of hidden-states at the output of the last layer
+            of the encoder. Used in the cross-attention if
+            the model is configured as a decoder.
+        encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:
+        `(batch_size, sequence_length)`, `optional`):
+            Mask to avoid performing attention on the padding token
+            indices of the encoder input. This mask is used in
+            the cross-attention if the model is configured as
+            a decoder. Mask values selected in ``[0, 1]``:
+            - 1 for tokens that are **not masked**,
+            - 0 for tokens that are **masked**.
+        past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :
+        obj:`config.n_layers` with each tuple having 4 tensors of shape :
+        obj:`(batch_size, num_heads, sequence_length - 1,
+        embed_size_per_head)`):
+            Contains precomputed key and value hidden states of the
+            attention blocks. Can be used to speed up decoding.
+            If :obj:`past_key_values` are used, the user can optionally
+            input only the last :obj:`decoder_input_ids`
+            (those that don't have their past key value states given to
+            this model) of shape :obj:`(batch_size, 1)`
+            instead of all :obj:`decoder_input_ids` of shape :obj:
+            `(batch_size, sequence_length)`.
+        use_cache (:obj:`bool`, `optional`):
+            If set to :obj:`True`, :obj:`past_key_values` key value
+            states are returned and can be used to speed up
+            decoding (see :obj:`past_key_values`).
+        """
+        output_attentions = (
+            output_attentions if output_attentions is not None else
+            self.config.output_attentions)
+        output_hidden_states = (
+            output_hidden_states if output_hidden_states is not None else
+            self.config.output_hidden_states)
+        return_dict = (
+            return_dict
+            if return_dict is not None else self.config.use_return_dict)
+
+        if is_decoder:
+            use_cache = (
+                use_cache if use_cache is not None else self.config.use_cache)
+        else:
+            use_cache = False
+
+        if input_ids is not None and inputs_embeds is not None:
+            raise ValueError('''You cannot specify both
+                input_ids and inputs_embeds at the same time''')
+        elif input_ids is not None:
+            input_shape = input_ids.size()
+            batch_size, seq_length = input_shape
+            device = input_ids.device
+        elif inputs_embeds is not None:
+            input_shape = inputs_embeds.size()[:-1]
+            batch_size, seq_length = input_shape
+            device = inputs_embeds.device
+        elif encoder_embeds is not None:
+            input_shape = encoder_embeds.size()[:-1]
+            batch_size, seq_length = input_shape
+            device = encoder_embeds.device
+        else:
+            raise ValueError('''You have to specify either
+                input_ids or inputs_embeds or encoder_embeds''')
+
+        # past_key_values_length
+        past_key_values_length = past_key_values[0][0].shape[
+            2] if past_key_values is not None else 0
+
+        if attention_mask is None:
+            attention_mask = torch.ones(
+                ((batch_size, seq_length + past_key_values_length)),
+                device=device)
+
+        # We can provide a self-attention mask of dimensions
+        # [batch_size, from_seq_length, to_seq_length]
+        # ourselves in which case we just need to
+        # make it broadcastable to all heads.
+        extended_attention_mask: torch.Tensor = \
+            (self.get_extended_attention_mask(
+                attention_mask, input_shape, device, is_decoder))
+
+        # If a 2D or 3D attention mask is provided for the cross-attention
+        # we need to make broadcastable to
+        # [batch_size, num_heads, seq_length, seq_length]
+        if encoder_hidden_states is not None:
+            if type(encoder_hidden_states) == list:
+                encoder_batch_size, encoder_sequence_length, _ = \
+                    (encoder_hidden_states[0].size())
+            else:
+                encoder_batch_size, encoder_sequence_length, _ = \
+                    (encoder_hidden_states.size())
+            encoder_hidden_shape = (encoder_batch_size,
+                                    encoder_sequence_length)
+
+            if type(encoder_attention_mask) == list:
+                encoder_extended_attention_mask = [
+                    self.invert_attention_mask(mask)
+                    for mask in encoder_attention_mask
+                ]
+            elif encoder_attention_mask is None:
+                encoder_attention_mask = torch.ones(
+                    encoder_hidden_shape, device=device)
+                encoder_extended_attention_mask = self.invert_attention_mask(
+                    encoder_attention_mask)
+            else:
+                encoder_extended_attention_mask = self.invert_attention_mask(
+                    encoder_attention_mask)
+        else:
+            encoder_extended_attention_mask = None
+
+        # Prepare head mask if needed
+        # 1.0 in head_mask indicate we keep the head
+        # attention_probs has shape bsz x n_heads x N x N
+        # input head_mask has shape
+        # [num_heads] or [num_hidden_layers x num_heads]
+        # and head_mask is converted to shape
+        # [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+        head_mask = self.get_head_mask(head_mask,
+                                       self.config.num_hidden_layers)
+
+        if encoder_embeds is None:
+            embedding_output = self.embeddings(
+                input_ids=input_ids,
+                position_ids=position_ids,
+                inputs_embeds=inputs_embeds,
+                past_key_values_length=past_key_values_length,
+            )
+        else:
+            embedding_output = encoder_embeds
+
+        encoder_outputs = self.encoder(
+            embedding_output,
+            attention_mask=extended_attention_mask,
+            head_mask=head_mask,
+            encoder_hidden_states=encoder_hidden_states,
+            encoder_attention_mask=encoder_extended_attention_mask,
+            past_key_values=past_key_values,
+            use_cache=use_cache,
+            output_attentions=output_attentions,
+            output_hidden_states=output_hidden_states,
+            return_dict=return_dict,
+            mode=mode,
+        )
+        sequence_output = encoder_outputs[0]
+        pooled_output = self.pooler(
+            sequence_output) if self.pooler is not None else None
+
+        if not return_dict:
+            return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+        return BaseModelOutputWithPoolingAndCrossAttentions(
+            last_hidden_state=sequence_output,
+            pooler_output=pooled_output,
+            past_key_values=encoder_outputs.past_key_values,
+            hidden_states=encoder_outputs.hidden_states,
+            attentions=encoder_outputs.attentions,
+            cross_attentions=encoder_outputs.cross_attentions,
+        )
+
+
+class BertLMHeadModel(BertPreTrainedModel):
+
+    _keys_to_ignore_on_load_unexpected = [r'pooler']
+    _keys_to_ignore_on_load_missing = [
+        r'position_ids', r'predictions.decoder.bias'
+    ]
+
+    def __init__(self, config):
+        super().__init__(config)
+
+        self.bert = BertModel(config, add_pooling_layer=False)
+        self.cls = BertOnlyMLMHead(config)
+
+        self.init_weights()
+
+    def get_output_embeddings(self):
+        return self.cls.predictions.decoder
+
+    def set_output_embeddings(self, new_embeddings):
+        self.cls.predictions.decoder = new_embeddings
+
+    def forward(
+        self,
+        input_ids=None,
+        attention_mask=None,
+        position_ids=None,
+        head_mask=None,
+        inputs_embeds=None,
+        encoder_hidden_states=None,
+        encoder_attention_mask=None,
+        labels=None,
+        past_key_values=None,
+        use_cache=None,
+        output_attentions=None,
+        output_hidden_states=None,
+        return_dict=None,
+        return_logits=False,
+        is_decoder=True,
+        reduction='mean',
+        mode='multimodal',
+    ):
+        r"""
+        encoder_hidden_states  (:obj:`torch.FloatTensor` of shape :obj:
+        `(batch_size, sequence_length, hidden_size)`, `optional`):
+            Sequence of hidden-states at the output of the last layer
+            of the encoder. Used in the cross-attention if
+            the model is configured as a decoder.
+        encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:
+        `(batch_size, sequence_length)`, `optional`):
+            Mask to avoid performing attention on the padding token
+            indices of the encoder input. This mask is used in
+            the cross-attention if the model is configured as a decoder.
+            Mask values selected in ``[0, 1]``:
+            - 1 for tokens that are **not masked**,
+            - 0 for tokens that are **masked**.
+        labels (:obj:`torch.LongTensor` of shape :obj:
+        `(batch_size, sequence_length)`, `optional`):
+            Labels for computing the left-to-right
+            language modeling loss (next word prediction).
+            Indices should be in
+            ``[-100, 0, ..., config.vocab_size]``
+            (see ``input_ids`` docstring) Tokens with indices set to
+            ``-100`` are ignored (masked), the loss is only computed
+            for the tokens with labels n ``[0, ..., config.vocab_size]``
+        past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length
+        :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:
+        `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+            Contains precomputed key and value hidden states of the attention
+            blocks. Can be used to speed up decoding.
+            If :obj:`past_key_values` are used, the user can optionally
+            input only the last :obj:`decoder_input_ids`
+            (those that don't have their past key value states given to
+            this model) of shape :obj:`(batch_size, 1)`
+            instead of all :obj:`decoder_input_ids` of shape :obj:
+            `(batch_size, sequence_length)`.
+        use_cache (:obj:`bool`, `optional`):
+            If set to :obj:`True`, :obj:`past_key_values` key value states
+            are returned and can be used to speed up
+            decoding (see :obj:`past_key_values`).
+        Returns:
+        Example::
+            >>> from transformers import (BertTokenizer,
+                    BertLMHeadModel, BertConfig)
+            >>> import torch
+            >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
+            >>> config = BertConfig.from_pretrained("bert-base-cased")
+            >>> model = BertLMHeadModel.from_pretrained(
+                    'bert-base-cased', config=config)
+            >>> inputs = tokenizer("Hello, my dog is cute",
+                    return_tensors="pt")
+            >>> outputs = model(**inputs)
+            >>> prediction_logits = outputs.logits
+        """
+        return_dict = (
+            return_dict
+            if return_dict is not None else self.config.use_return_dict)
+        if labels is not None:
+            use_cache = False
+
+        outputs = self.bert(
+            input_ids,
+            attention_mask=attention_mask,
+            position_ids=position_ids,
+            head_mask=head_mask,
+            inputs_embeds=inputs_embeds,
+            encoder_hidden_states=encoder_hidden_states,
+            encoder_attention_mask=encoder_attention_mask,
+            past_key_values=past_key_values,
+            use_cache=use_cache,
+            output_attentions=output_attentions,
+            output_hidden_states=output_hidden_states,
+            return_dict=return_dict,
+            is_decoder=is_decoder,
+            mode=mode,
+        )
+
+        sequence_output = outputs[0]
+        prediction_scores = self.cls(sequence_output)
+        # sequence_output.shape torch.Size([85, 30, 768])
+        # prediction_scores.shape torch.Size([85, 30, 30524])
+        # labels.shape torch.Size([85, 30])
+
+        if return_logits:
+            return prediction_scores[:, :-1, :].contiguous()
+
+        lm_loss = None
+        if labels is not None:
+            # we are doing next-token prediction; shift
+            # prediction scores and input ids by one
+            shifted_prediction_scores = prediction_scores[:, :
+                                                          -1, :].contiguous()
+            labels = labels[:, 1:].contiguous()
+            loss_fct = CrossEntropyLoss(
+                reduction=reduction, label_smoothing=0.1)
+            lm_loss = loss_fct(
+                shifted_prediction_scores.view(-1, self.config.vocab_size),
+                labels.view(-1))
+            if reduction == 'none':
+                lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
+
+        if not return_dict:
+            output = (prediction_scores, ) + outputs[2:]
+            return ((lm_loss, ) + output) if lm_loss is not None else output
+
+        return CausalLMOutputWithCrossAttentions(
+            loss=lm_loss,
+            logits=prediction_scores,
+            past_key_values=outputs.past_key_values,
+            hidden_states=outputs.hidden_states,
+            attentions=outputs.attentions,
+            cross_attentions=outputs.cross_attentions,
+        )
+
+    def prepare_inputs_for_generation(self,
+                                      input_ids,
+                                      past=None,
+                                      attention_mask=None,
+                                      **model_kwargs):
+        input_shape = input_ids.shape
+        # if model is used as a decoder in encoder-decoder model,
+        # the decoder attention mask is created on the fly
+        if attention_mask is None:
+            attention_mask = input_ids.new_ones(input_shape)
+
+        # cut decoder_input_ids if past is used
+        if past is not None:
+            input_ids = input_ids[:, -1:]
+
+        return {
+            'input_ids':
+            input_ids,
+            'attention_mask':
+            attention_mask,
+            'past_key_values':
+            past,
+            'encoder_hidden_states':
+            model_kwargs.get('encoder_hidden_states', None),
+            'encoder_attention_mask':
+            model_kwargs.get('encoder_attention_mask', None),
+            'is_decoder':
+            True,
+        }
+
+    def _reorder_cache(self, past, beam_idx):
+        reordered_past = ()
+        for layer_past in past:
+            reordered_past += (tuple(
+                past_state.index_select(0, beam_idx)
+                for past_state in layer_past), )
+        return reordered_past
diff --git a/mmpretrain/models/multimodal/ram/config/__init__.py b/mmpretrain/models/multimodal/ram/config/__init__.py
new file mode 100644
index 00000000..ef101fec
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/config/__init__.py
@@ -0,0 +1 @@
+# Copyright (c) OpenMMLab. All rights reserved.
diff --git a/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py b/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py
new file mode 100644
index 00000000..e4b88653
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py
@@ -0,0 +1,93 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# data settings
+test_transforms_cfg = [
+    dict(type='Resize', scale=(384, 384), interpolation='bicubic'),
+    dict(
+        type='mmpretrain.PackInputs',
+        algorithm_keys=['text'],
+        meta_keys=['image_id', 'scale_factor'],
+    ),
+]
+
+
+def get_ram_cfg(mode='normal'):
+    assert mode in ['normal', 'openset'], 'mode must "normal" or "openset"'
+    model_type = 'RAMNormal' if mode == 'normal' else 'RAMOpenset'
+    model_cfg = dict(
+        type=model_type,
+        tokenizer=dict(
+            type='BertTokenizer',
+            name_or_path='/public/DATA/qbw/ckpt/bert-base-uncased',
+            use_fast=False),
+        vision_backbone=dict(
+            type='SwinTransformer',
+            arch='large',
+            img_size=384,
+            window_size=12,
+        ),
+        tag_encoder={
+            'architectures': ['BertModel'],
+            'attention_probs_dropout_prob': 0.1,
+            'hidden_act': 'gelu',
+            'hidden_dropout_prob': 0.1,
+            'hidden_size': 768,
+            'initializer_range': 0.02,
+            'intermediate_size': 3072,
+            'layer_norm_eps': 1e-12,
+            'max_position_embeddings': 512,
+            'model_type': 'bert',
+            'num_attention_heads': 12,
+            'num_hidden_layers': 12,
+            'pad_token_id': 0,
+            'type_vocab_size': 2,
+            'vocab_size': 30524,
+            'encoder_width': 512,
+            'add_cross_attention': True
+        },
+        text_decoder={
+            'architectures': ['BertModel'],
+            'attention_probs_dropout_prob': 0.1,
+            'hidden_act': 'gelu',
+            'hidden_dropout_prob': 0.1,
+            'hidden_size': 768,
+            'initializer_range': 0.02,
+            'intermediate_size': 3072,
+            'layer_norm_eps': 1e-12,
+            'max_position_embeddings': 512,
+            'model_type': 'bert',
+            'num_attention_heads': 12,
+            'num_hidden_layers': 12,
+            'pad_token_id': 0,
+            'type_vocab_size': 2,
+            'vocab_size': 30524,
+            'encoder_width': 768,
+            'add_cross_attention': True
+        },
+        tagging_head={
+            'architectures': ['BertModel'],
+            'attention_probs_dropout_prob': 0.1,
+            'hidden_act': 'gelu',
+            'hidden_dropout_prob': 0.1,
+            'hidden_size': 768,
+            'initializer_range': 0.02,
+            'intermediate_size': 3072,
+            'layer_norm_eps': 1e-12,
+            'max_position_embeddings': 512,
+            'model_type': 'bert',
+            'num_attention_heads': 4,
+            'num_hidden_layers': 2,
+            'pad_token_id': 0,
+            'type_vocab_size': 2,
+            'vocab_size': 30522,
+            'encoder_width': 512,
+            'add_cross_attention': True,
+            'add_tag_cross_attention': False
+        },
+        data_preprocessor=dict(
+            type='MultiModalDataPreprocessor',
+            mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255],
+            std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255],
+            to_rgb=False,
+        ),
+    )
+    return model_cfg
diff --git a/mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle b/mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..0519d1ee759eacdad99df2811ff59432369e1599
GIT binary patch
literal 51099
zcmZX-%W~wolBXv-aleqv%u{E2#<Yo@debtK%@b()jCw0GnMo@xf{Y*;$+)oNLKbtS
z2dIS>rDxJ>>HB_;e~@QXSw=YU$Hn0Q9PSPW;{WRZKmYgt``_ii|MSm({^RaH|9$bV
z|F$@;_p9}Bvw!;Yk0~in+vVxBKK=Q}aCyIcxB!d*wuk-t&p#%+!+v=>ua6&p{xP^;
zZ?}Y_<->Bd-T_UP4-f0p>EPEf@Mg8%pDX!rTx||}3N0TFN5Y%s!|`Lgd04zIH|FUr
zANV`uZ?eq_SUhgFyVP~wylu`l%7?=dM3<}O>v>tZd&||zDp$|z)8=ixI4@tpb+}yp
zyqu_Yuv}fX!DICXLYE^S$^5oHo;RoUE;x>s`}2CgU)tFEc(~s@(C^9V^|<_4JS;)D
zzkEC%{`_OMd^&C(s4lhimKz$rTW*evhr@og5uG*Iefx)$P<Cw6>G1IK=O4F#569)>
znVRo6)cu*TJFM1QDMseU&3a4iDY)H8eL*2w7y|CZYw)^V`b{JPF5Wl$)#2T7A?0wS
z<JK><I9~SeOQ<M*Nnj**oBfg|&3$^_ympW^43?YImbwSa?efSt%n~jhw&@0Qf$`rh
z-{}%L?k|;t5Mn@FYuY{>o)6SM6nd+wo|YAE*I>d2rWD<rFvHdhiZKMe<rX~n^vcw-
z+uPUY<^7tT+&zKta=$n|uV0_nN6OLic}ZJ?<?h}#CxjPB@<1a~rtNC6UOw50AOG`?
zNzPbO7+O5jU#s4&&(9yulKFD?`n)+mua^)d^}KF3i~;cNa(AhT!gM`m1g{B4Cb_uo
zAE?zX?;keZj9SyR^mYH_s1BC<U_$22PMUu!U~ytXz1EDn?UJ6?+lGgkYo6Cf;p@W4
zM;oT{&8`}IvAZ-vNWW&w{hmc}JS|<8voovx;h7Oy@~y|Lmk7-L?ZGwUGT8k4rOKbT
zcAkpgHXu7$()V+%mR=SZe;KARe-|63ziqz0t{seN0W}H~d|jV6zpTv6+a<)bUO>WG
z!o|<a#vwEFcD;Dr09y+%7+)i&<>i`dQ`KAk01qk5Y~XlUK0Mo(SH}B52Wjdke*NXY
zUoxKvjvMFejVXc<^hB94@%8Fbyyz%jbMyl&cGQ|>*e8L!GlRuF6X1zl|IyeIGCncK
zwu|-e56ibUY$HE~Wv0mHb-i6a!AmBlZ_aC5n_uBDmK0&s>;;*!XlZs0_5*_AQ^%)E
zHyo}`s@pABo9&jWDYd&_x~|4RAm(H@@vxEmQOC|nLzTt12^RNDhs9bx;Gp}g>LdKm
zaXlW_#YvAx8AFem5W<}4==0`2j4u>9FP@fL<_)E&>AYYFoMKbq6H7#9P?7V69pTSE
z{>d+I(BN=ffIAHP*vMx24Mkpx3cyYm`g7hqw%tMoUc?hPEw+b8qP?X<IpQyMIp>$B
z&2n*G?=s&e%M%lM`Q*kn2Y!ZuESNH(6&v5~zM3hJ;x}%Gq<$=l3sc?c@UXdJ3C4&0
z@{(Q#KEP;f|NP7X49bBv4-)>&?kb9BMr_L80~o9wli~7Qa}wa32|Rn;JUzn%7C&9b
zCbLne6#Z%&B^A9vl=ah-%%5x{R!DJ+{t^n?WD$C52$eJMhs(?HAgcv9hjGmT4hspJ
z?fWLt-I}Sh7x78ra)1toi!u``pYZW;a07T)9+^p?>J+%M)Q;VhWxQKDkl((h)Mj_H
zJ2?1n>*pF_QM&9{uGS5(cs$hX6Gh|O_0p9wYtdrKU97mp(8af86adzH`Pq-<8$(7r
zrwd%TNjRhB+kr*SG#o77mu@Gw2^S9x3Cx==%qxLSHT!O#ori3HS^jR_zn0(vaKF4i
zguwtf;S3h{hcg_MZX^a>j+>>NqQ0_;ve$H9*IV|pjNc@=9}fpwDfq;W)RmIZPOd(Z
z84Gl5usR-K!{mK<**albGJ9v2<0D-gkRu}4{W2U2tO==%T2@S+l@bpi0>RmQ->&e2
z>Gt9r?A<RB5~(Qr*@*BJZk8IJE^7uom~9H(m-TA#L_esO5-+wylY~%6AY#$H05^0S
zpvty)L#05-NE9?3<S9wdx9|{f-fr0@C<D%N9K@K^3rr$J7g(&^B&dLP5<k6coWj<(
z&5-2-&_X`TjFk)ealv}}Xw=zmYjT72j)6|?0@s<zhduw&i~zgBT82U=5@nV=e#UBw
z(vk3IgGJ;>klL>@MJ7lgs}nt^R8rT=OQ^Vif|}!d#)BC{Ka_C7GTo@4^8P2-Su;Zq
zUG^`M4@AfZ{I-l74DI$W`-7t;;+faB!q!KX5G2q7#t5P9!UsZAAx#s9UoaK2zru1^
z@JxxCLW;m*L9;Bfb2vRM9q-o<uD{z3Tp&POF;(zQ2qb{0g|-9~(3Jm-DF@K#(S>6P
z*gRyWhl*k5h!km@8EbR<#o?Vg#bUo$9o*&2c8pjp!1gSfR(6bZH)i3jAQOGJluWjb
zd(DIlcbgQoM?#`&yeQV9&#qL~O^br-s>F<I(h~w_h|#h<Z6g+cmSIXOEPQSByk0`+
z&n%by29`(?{q6J@XmnCaO9(0G^C?T#VgTwVXH+d~W-^%MEdTS5-##Po?T459Lncfs
zmX(04=+gMdq>J=&wX~Yp_wCu&6n1-t@%xuN-J1IGCGAS2uSKiN4C{&c&a^n47EhN=
zI67lQj6I89GKi-WQzFxlzZ5ttnHelPNggT@LxxVz0nhMIdOkkAZ+7pISCa*;j}A1@
zy$meR&#wH%7rhoLw8SWHjn}lKns0QZ#djOzr5ekYa@x2w=jQ`kBGM6ZG@m~Ubh;bE
zcaD13e-D>19(8~ox4kt<5ui<CgJl~4e+JiBsd6k%SSCsAMomslq-%!Gez4#gNB*^R
zDWleBxTgu36U7BgTr@#)R-c{^5#%Q=rUcZ!Gzx#KFK_6H>yN5vnk}l8?DHMwG$e>(
zWIW57K)M)b91IxjTLbWU>d+XOkR8b!<R2R>9xnIQi>QYrIX}!nAg&kW#|z_cvlnd&
z^8ywpjU1I8^|g2JR!^m?rq5D(wRk*cqSH7VuhT?mmCcMASFa7#e7x%7G_g9NDK55X
zna<XzH2j6A-FC2E!|iEe{jy$XT2Ng9ATK!}F3hnLE5M6bY!mh2iG{lce+G^Zk#VZN
zV`Jr30}ZS;f(awNk*^M#LQggBdu!A+TB90bj%1C578G-w7xxz=4a?oDN`|)XWb|>5
z@V_`yDMkExR<dG(*{q=x&Co>({%ZRo(7WF-WMlqr&|09+0MHhO*kPU>lM%p2<p0|S
zvAnnF?()g{w0K8^2nXC`@=(Xh?-YGnfQz)?3nLIiR^V3_s=YFuJvD<^xtdzfm(8C2
zZdw530J}c&EDcfd7P|;Nr^t5O7$^8`w;Phtq0$FBmq~?HMzWB1e?Z@NzW%u198efh
zERY{jdm?#Lt)hJBi=7DkXq@mmPd2SdnqaLL{=kpxbs8}D3jWyErjh;HB>;Vot11B!
zBtL}s(5a1w;Yx-9Ned*4Li^p9_<mI~?Pm_rq1Ua1Dgsr|3#=N7XN+O(=QMP3EFlQV
z5K#eSaKDY5I!hQa5DHHdYZGQt5iq`fs6DE3opu;)s-oK0AzpUA*+~=EshK*)3<fOB
zAs7L6dpw*HmK>oL?T!}4>koC66MSg&h@OxVlhgk2j!fgG3&t^j^$Dw7WOb8^8*C5g
zhh!X*l0h&hs?t;~e6eamhiPSV;Ije}6u7LBLQACuU1C2SjRH?EfRN&{Ze&R^WC!Qo
z>n4~KS>~CE3)a@j-cBo8+w&xm_vM!E^??}(laQ0wY76E)cXMa>idogct{k-@#`lN&
z$88D@0UxY^idI36AobfB;mc;*rli{)mbfR-q!<wpqpIxzZb1vwAbTO$!3fAvw!GVW
z6zkJ+aUqInAQ@a3BhnvZ$RI27D@6rPOI9j%9I7V=gx|75($eAO0dkP<@lZG=Z5J_;
z{vBQz=xz9~Fe6RgJJNTH-R6Ov6)r@|fRx%z4%(Gy6qZX@ZvIOE+GFjL?;A{7y%}!U
zab@zBItY!kT|da^Zd!z`JE9q5myf51<67<d{&0ChYQq2*MU#$S3VorH*Xam5T5NXE
zt3ZYji=Io(iAUb|qBCv3U-u*^rKIdvramJ>Yk?MQ`PybdrsB1AfA6B}f4^*Ao8{^L
zfFjJ7-xp6zMYpG`oIx5k&9>7VewR-3i|cN6T(YSk*g=!u(=sgyuxUjaX7(gnXA*P*
zWCl@sM7L8$P0{?G5WZq4x0)rySQA}%NG!XQpIzWdaK4(LcsWm~z84~r9H|5?ix5nb
zGlI#)q?gAC6eHAf){z?yT*XNNW^4LYHE&|*hZqkp?3b*A^b={$64XQb`#~M%=Dnl`
z7z3;*ZN;KIcq-{)DnZw$UAhYHWo-;lYZHF**yc80)8{@w7*Ml>(nIW9WI6{6dUkkV
zs<K+B=dk|;(aZv{*sWKwOt<J4%k)i`#HmXR#KaG7PXp%anw~(oa6rT2$k+-Q7kWU*
z7VMS>k%+iPX!??Y0i6qLxORi%Mcdu*;qVdt^p0;>-?pa(M=?xpi(T%suTBd<FQBVk
z1iit{2mwWv$J3QC2c+SgrS$GEnE{v8>Y0qA`^)1aLSJfqd^9UL!b%;oWN0jG{5gnJ
zU*oxehda!7*96RGQ7*q<Q&2lFO-RyyiGF=fC$j1YR&F44NXvvn5P*H26`x+1zJYR`
zN|x`?;qZvH%qf>5u@YurC%g62vau=<#%xi^9LYd3#J;q)i+I;R!mARm9UU|{r630d
zF%=-M*2gAGUG`;sxwsa|%R+!3Lkc^mKwcc1-j!Z<LAh4?gPu%khUetY7qJ?oPYY~J
z&_&&7W)vl`jFz-+Off5kI!%R&W1lBrOkF599mqJl3I4do*S}+O;J}d1d}caD82OBs
zhDisizQy|?`j*(FfzrN+L1yO3RVgnB9Al#x@TKf1hV;h8pw2SMhvTCMq|6UL0$;aQ
zQ<R11qriJa2xvv_{(1SkE0ft9lhO!4gPV2Nap5GP-3GP5PM=C$pTmGi4R=UbIjQjB
ze1UNNhvg%+QIauGE+)b131pvB0|!KANo1?A%7&N{Thtm#j7dg>5&=rNZyLziGW(dZ
za7|3X?1oQAW~ALBCS4wuJ5GSJYto;cyA+sVF=steLI4Br-bq93a6txZ$f-D)HPaUi
zcCRG^(2?D1F*c!7<jQ2Ux4mdc)P-TFZL+kRBTw?|b0{VdE1^Kiw*@d>wcT1IGEPF|
zZhCH=tDFu)HBdLwBnLXHxX}A)J}cczHG#t`w6G*VEixl#n9xP-{ksvwBNobaC$-qy
zlCB7RM1qd&KqIle8X$SpXuZB(ICtN(@*vCW>EbL9()QD!I=Rm5x#cgSAA{M>ejzIf
zv6{^VKbL8~An{T&7#|Uf%Cs@l{0{XwdtnkLR~f7e%ppO*_ngY4Y`3UEOl6uWKwn(L
z%ntODCaMZI)^~G-Yie2HYV)M3U0}wOK{|?`y?Q!eQ>|0>LYRcIiP*;Gp52JT!uyW%
z2yfnG)k)sGMF_Nh#rTFKbI`BMq%3!a1Xvbv+HOj3o<;TIJh7yXW;~m)t$K`WtDe4h
z=l{(HX+7+knMVRFS=XXHay?5PKAJ0RL&9NUr7k8+AXzYiQIPtrOG9dmakyBkM6?i-
z^uuXk<?=`>S28xp)~n8g)JS7+LdZ#f*@D?F$OJWj!}6%Ez@|IgAwcOF!s2LFY_Mz+
zhjm2|WHm}G{?_EDzB5*IColB@vVCmoBs`zWHX_K`g}MRAoR^60ZS;T-7mvN@3@o~;
zE-jB02{y0Lpz+Cbsaw{-8P3sx2Covz#i+C*rhOn8JXD~T3vK`?YjSsJB*iGkVG{Ab
z{OjLA>0keTJuDbJAV2Y8^swHPP(5o9M{AjI*zJ)w-81-7a6Dcn6^oP0P`LOwdXFm(
z`R{mcJ3C)X_@2O9-_~`wLuJlL&dqZ^Zj6(eD8z8KB}xp^LW9r+982n9uYw9=fjaG?
z8jn{u5P=*>wqYBSgxJR*NYdzG((p#{z;%e#pacdoMwS@~<Urs2YMZ7s4-FwY==8Og
z-Rts6Zh3cw7mwSE?T?cZ^2|H17V(rQsszdwsN5QPk_R=Di*2D6X30LQW(`T;)cwyt
z{t8gb<g3$#?HpOCnGt|)IMxA>u>{(<IFptUy74Q}0#04ik2u>KQajsSQOQMR2`$t~
ziHc2+3Dr2|OK1+K#^dV4Hr8xa3Y#`<%@Ieeq{7w-DFl$Tf}WKSErvmR826T7&<A2)
z{!YN)E2ABDudEeTnwfTK$NGhwYjml>nxm@i1LsXjGtjS$caT~@$+wIgmNJ23UW5cE
zb&JPo!+t2ML15(7z4AMaqx&d!P*<QFmPMNF5*~7JK-KjfyQ~DD-xX*3KyDtRS;K06
zxjgvt_;^6AD4K1aFH8Op(Idx$6(MH)$T1Z=VKCQB1CCh^(1In)=11Kkx!bD)TQw6D
z)d<R)W$$?~Ftb%Tk)5{_&+Jc9I+b69li*=n;h*U7;4s>~L^~5Jai%JyLb*l+57qHc
zV^Y%Y0evlNl41=mDTG$Lcuj$?390dKUviMZL^!;tfPKlh+qgu(eP)EK!rURm`G|^C
zyTvoUBcT0~1FHSvg;S_>0}0l(_c@IZudsa;Bk=aX4r^LkYcO~#D$;zHz(&OQ{`Q%G
zHA;JERz6sD`B0)`C4mNA66-Id?BY3rV{E#3vjTjnM4OL0_K(j@oF&1LT*qIKs;$SS
zK28W_EDjY-R>CDlSq4|2GmsB-c7!~H48L7BENP|S+BI*JF3iM{!jpHL>^O^yX#&Y@
zy<^_aVfE)jc!`ycNe9D{A(0&%kI@4_=`;=MMqrpYB%i*!12$it`NM{>JaK%HLVco4
zZ)%2#q2&Z)z&i8=OW?iMgtv{t09_!VJT0sVN%sZ!P1*LWqWV#N#v~r(tYXdRk$hEM
z{{dy#*4dk>gq)l~;EU5juo)RY#q%UktyK%Wm6wQBfS3XO7`<XQYg;Dl67DjEO33_G
zj@pPw%5Q<3ujtX`=ta{pO(X;rOFA3UyWs<SOmTSjIv6UsIHYi<7%n1yG}+n&+ptWW
zW<ai%G=Y{~2?E$w3P?4P(E<n*sr=vx7>yiq(oc9=|H~y?m|#&V<`oK+2MI(02u}oc
z(@v;E4opRAn83GkbvlY|XbqVs{B&hV6p~th^!33{T(Ibtm1D<gM(c;I$82O$A<jnu
z);8?3Uwi3*K)!C2U~HiAPNnQXVbYC|{_w))arkrn&s0dn)<y#H?-(>IR`Q|8gm>^!
zu(O)onL7!mXC!CJ-(FLiPyjX{P!%7t70pcnY9}i+jSVPsIDi(bOr(lEI11Eio=;!E
zSxYE$z@%bKMoB}~9Q`~Tp7u-RuOZ_Sv6GJbfj}XjesN?G?m>!Xr~WB(`-WIM844$a
zB*Ya?rb774C7Hexc~WP;l*9E{Z<^QaWY{Aj*3<i~I=pq_0~UU*2=e_J|6-|nji=49
zlbylJ8UM|I{m)j>_8dBDjbuq*F&r{!AcsJ6PxZ4?7Q}aWEC50BKuzYSAC!l2iOO%E
zk+0f}RgwoG#kB;5cbH~EEx))>%Q}%SxJDFP;|n5W%_H}3c?8*CVJZ!ZYaVHc%}!EL
z(M>fH0z-nZl;aZ{0c;+QspDY&WT__X)T>~zd*(=sBK&2`ye44Cb;7@J8s2Pr*H3Ww
zYMT*i#fx__ye;mbKnYG=N)sKx5&OlfT4VKCYerE!P~++h$KJIH>U*kE!iH$Lt=Hbi
zAmTU<Tedm?c$hn0vfxwb>|rLz@<%(kdl(MWXa9)yKr{Hvl}-%8*R<MB<}H8Y#<q{K
z^^BD(Ac{f_z#h5Zsxm@s^(NcAkj+ErJ%fU4U->8+<(vb-C<{Skm?q`_xF|c&U;iOZ
zlY5&K@rJ4DwT(2E(R^gCrUGUM-{k)z_6u$!fSWkS0khD=`xB1DsF8T!^t&`gaX~d;
z?|?GRD#R2st&p`Zu-8+=-EI=il$HL%bq0>d_Hv)2OmSocTr12&gt#2qG)M!vJ0YW3
z?An|lW<1Aj0A--ygGMN3Lzod*5poGcbpfleHYJe5M7LUW8t0btB?rJVPad)nf(1w)
zJBJ}*e2S5j>T_{Ue6ZRz2EXsy(F&*>Lvh7RS6w=U=I_u$-s653#ngPk(KJpzM8UM|
zgQT6{E{X`uxB^e;60sA*tGblVPzA|O`X8k&Sf$=d|A90iFI#MP7&1;9R8d-)ZuaQw
z%}7B>JaXl`G+uUF2qeIQi&+r5TwGan5^DsvFX%m`fUS)23{>6&um@tOOb7%~!k9Xh
z(9Z0syW-^q@S}^q9wJ1fOjU_wT4zOMao9lb5+7%)h#S3PWTAz!7_*IeAYjHaK3t4H
zVdpQ9NSPGOi3KPl!gP$#&c=<<Mc3Xvkxh&5Ii3R{0a-btNu7yB8t=0Ov3-mU7*{^K
zDY<<N31e<GK|{hMA;u0`72dbAZokWT4DIO1Fm_;5%9vt!M0Isb9c>Ty+M=q7*i7d!
z%(JnS5`9zAtG6AffEq<X6CgIy86Ymxa#c2EKb+)oLG7?PsTBmJ>>u<}K^e?cQu#oG
zfttE=jn=^fy~|f|qzngXmLZjWbrJa?@Bx2*kfL%=L_mQ#ki!xBjRsyxm=H?kFsVG*
zEqua;uUc1dS+{Homu$#um4z=uWpQhKr=MndXYscbvQ8i3ykDCSI9wjE5b6I6h=S?_
z574+K6_{LAa`Kdl+OgiTq(!oD=SF)=FWy>x{)YCK2haK`N_zT@_Gu?oLgP6%Lg(>-
zhQvoKE(%V`40ZVQ+L=u+ubvE3?J;uZ<w#Sr_+NPJRYd2ADkDhkxQFf^!V>|*Ay^WT
z>IZyO#q8k>5k%nSaqkB~28=ebYnG|5{F6rdDTOl!%6>V1P*;k~GJN>J#R+Wq`dTNx
z^9D6B6ie~L^XWO3n(=ChU4uCQvGEL!y$9eK2cx8HUL4B73Nb_M2~UK*Rcto=t)mkF
z$ivrct+Rxf*ka|MR;)w~swNB62ZvbKGEp#SGK)av_=3HHIoAhtvKn++{|YIy!F<j_
zS}iZ=k5)%YI3i}kX7&YYn)#UH2h9Ase+&RUI3BLnxG6z&0Tg)~$G%m}wBI~c43xN3
zT`9<}P}#H_Q1nNV*j%{|_=<dy1x;Dep|;2707S_MjB{;?8a=UkK+%y=cph_~n~j{5
z*PcB>NGrB-V={1Or0B)-j#{{ch1X7T0BT{gS6KYETI(03oOKljNOJrmxq!yeivzUA
zCaPB&5qi%mqV%i)q^H;cYnMtly6tMvbew5In4kkr^QV_E3z`oU8(<TQN&2k46Pg@U
z<U$k+=X_xH4cQzWRNG$VD0@Ue_gN4_pz;<hp!C1dPqvgC$@Ern@8K_XOxmTWY~%a3
z&~G@$!Lb+nWOJ+X22(07Th9n?#Ka02kVsKnYOc=97^F=?iJxk?=Em1Xi81j5x5XGj
za0oSUtHVf{s*e#Z$s-^-bgZT{!9sUH$3nQ1UJ|JRs1Hq(rH&IyZcQs(-e`of15zI}
zOeSnQz$_8e^SGHnL(a*v5hIm2W$DrBdEqqAJyf$lFFZA~a<f0<EgAGl%Az3YbkM#%
z{rl#Gxw5$7gshxTg#%lxgr3_5UEP0%*l3~-(04~0oHOG=<4r<#D?2{QIMve3UsgYz
zcKAux-P8UmTHuf%ROA!<JM(4MfjMC9l~t9$wf-l8*ie{4Q(;wDHz8_xYPw<aG_iz+
zYErapeA{l@w}CJ-U_{6TPPaCc7dk{;D2{d15p{+E0acs>$xW%)07y7Q1fh@<0lc{*
z<l#b$Y{pQGT?;^MplIoE%#}P+xj$Rl2sOg*((F5quQEK6a_h0rX;VgyK$bc&jt(T+
z17MLhbaqY?HU-*(EW4pW%>saIE;ExHrkHw0H~?z>NHyWnG(kv5vTy=(a^f7P0l#3d
zfJR*;Ho;N8*v?HLt8ex*d^gJL1~hZzL;Zy1w_uvXW2oKwHld|sfUFNrG5S$+Y#bmY
zq)_%qx~hmny$9~yvT-<~z~L@JZYEF{^8$k!0e~A*437IYg;iC_cEKV*2dL(<YgXvq
zDqA|nzB-KP17$-aC{Nz8DYa=&v<bpQlrqWa0UCkaYq&xP(HhTD<6hcPYR5UUJr@LP
z^)YgboB@qc2nU(W$&0E#$?gX?IlI7QBjY7`WO6*m2dm4Qt@Zx?zyDp<^+8>O<{sZG
z4=VodN-P3u8rn#3tLu_bx&2Im&d=CobCN8t0n?%G@KR`A02^pLd&q2hxoFZ5C`_3r
zXE<ntq~RJvYc7!jY+hrzX45?(#;u`zUqKnNdv~^JwBv$ZRJ}N6_u~HPRYhi1s3ypr
zjud=)!rT_j2J5FdjL-io2G{jfqn>kX))9r9X6Ip}j{ndtM~M#@haNN<iQ#7gv&?p8
zU5<>zXDpVY`sS*fcAzTdm`J;kcLP{gc`^GjMg0^LghXs$SzcOB7>FFAFiB3#W%usy
zqE-o*DOGr;Tr9<1xp?Nt$VmzA28f9Tf3GhC<5V~##5i5$jt7q%$W1xbGkEDlhz;8{
z@%;)f*b1<kGn2lgU9VeG`cUG{-~KCpG{^Pgf5W1-Ui_D<*W!@-#_`gaP%~EO$Wqf>
zXK1@EIcS$gr(OYgiuoM1{4=tsDM>!gX+dOGKw~fCirK^V!RvfX=I?xv3UVvva)y`8
z-6h=tTTLrO*qL!|l4`7o_uD_juVyOh(Xt|0ED^97%J}rwF2qmWt+hQyCH><0I)voe
zV*BgkGp=wWJS0k(o1i9gVp%()FwQ6p^%%3foY!5*h2@q%0^r;P!riS|INJ|i0XQ{1
zVC2zqsBqJu&e(m8H$FM*zQ;%WGj?%X@mm%cn>*`D>9&WPszdeViuxFzx#L5Wxj8qw
zxuLHcczlY!UhwX(e|MRa8(Hco>5=IGtK+xp-;rH}+0Rv(sY){;8e3!-@+vijFeBlx
z*B%b{VuRZ~i=tkxAflhBEqx$~$0Rd+T%ruHH0K3$f;8#nblC@RFb$L@+rh~!@mF<P
z@SP^6kq*Z28xh?X5z<e1+jq`hXqJc#(F;a^wY`695RQUJoW*kw<S`SHDmRc8qYYgi
z7-2lYYKs{U&IlmieZ&XD{hr(c#W3hfB0273^0#Phy1jUeP(Is=+HR^jW`m=b0&t`{
z&72r-5)nwV*W38_2K~bYwl1yo2#Eq3DWDH`k)@(=PbNaA<6Lv0IkIPgLFH<1mRvtW
z97Ya=m_y~-^H*U(tc+i_lt&$LXdc?|ssr;1;^l&>p|@%2yJ8fC&<Xp|Es8Zm*p*u&
ze<fceNxrPP;Nm7IRBNX}-R3Z4M+pbw);U%-m;{dEl8WlX3&_yUVz6tIP8{n1?al=@
zr|UH%qZv;vwcGZ*#UuSIAF{#YCM<h|GflJjHmDtUkRd+F(}ZwBmAf9!yy8*<&<!EX
zmb2E{8OMnS{eIb^DeP+*W#*eFa&-tbi@P5IFS5YFB^6a_=)5XE_0{X7OZr$}VN*L?
zDYBN#T_>fE6rI^}E&z%{pzvNWhDv=U=Crk!T^EV@L>Z9g3>wbkc;?auNYg1;q%-<1
zkx%{D72@Y68G#=Wxu_D^B(Z0fcGatHpP@06JT*g^GS~&%L84{Kq9}w6A1kwPdEkHR
zK*-XRCFpQGo&N=&`vvX}L0sQBnx}J38x8!q{QSbX8APDN+Cp**S+FTf(2n&Y?HH#(
zUu)t6*R-lH_3;(6eaKT$uYkmyLp|rR!`wCqZd)c@=&P7bj^E4RF?jqLM%>qnR{ROT
zGAb%?1Vh4TONL;<_T;@MlyBya<&by=1WpG->5x!qmN+Pn4eXQCf&=;})HsbW-DU<?
zCPlfp1(2&I__qQUc;V?z>_$bMIp;I@7mQTu1j0I8@tG=O3^2?Iq;{<E-Lhwq7tyOZ
z+ZCLdJuR5gXSU8O(z&7jAnFkaf-}6f2`><Vb5a?OO=<Qr)Sdu_GB`R>M?E(MAeYvS
zYnwt_D^2$r(IeV+{nA!G#7VTcZdXk=rD*P=5!0{{9n+00!fk}Ip0c(XMV`D>FhERl
zZS)@xngDx`xuM2iWwY_)VYBA;eHbQpuw3pOH#_s1<lJYlFt^S~ez_h`OcRS`5JOfX
zF;X)eA<qcMs~Wl?Y$&E{ny;bE(9jB;zdL7keOElE54VZHVd~+q?kj8ArBqoPD**ok
zmTq|(4JcB9D6X<d1$ocH@9k2%wFacwTxR~;mxMYjZx`{e>C6&Oa6U`6K@$6_s$Fou
z#=8rsch?jxEwV|d)XLD0_5#2)sa<WatEUr_+Z#E--wo0kPcmigYm)1lI>_-N!V0tE
zLT8%LAx9IWuYEGYSqW$r&_;kc;>w+Ov05eF$9Dg<eXK2LKr9~jgsgFzKcX9}tUz@+
zHWXpE_`KN$vhE&Xy@9zeu=t-UJEhFtTuPIL`GWC8w80hoCtYNi5uQ4|(TE#!ux@)a
z_N+|j$N}vF_9Mo-+(RBc!%fPVz?zNp^}jUR)@=c3x$U;iyd^wxZHhJBMbG7_GNDcw
zc9S>Tyt*+j6S(tRB$M%Iv(ti+OCfS1Jfjk?yAfYbxpLFz;kaz-C<xGFOcWuZHc>&w
zaY))PyEtg>SyY8dhFv?B$cnH#F+t}Q>A1wKqRmCJi*>6fmDJu_pSpW1y~mPWjvp@n
zvOGLqvW)tV7g_)05lddv0eX+B5+nYqXNo0vg}kFen0<l_+37Dn0^*iH>8m3*xpU$)
zeDb{+J`Q)6U%%XgNZBrbr~jktqfz}@WmALB1bLST(zFNcIlSrNHOvI#0)V0j#+2-g
zvjUp(O%*2B22KMwi{=OyD8~A41uVWi2W15=t`aab(ip>QC#r)Er^H?0lL9k{CO2Lo
z_;iSK{qWsmM0Z5eI9f;_k=SEPf6^h6#=Pm&Eu4zw`Xk741ezPj+C^ilrbUI`v^aR;
zWTnXWYkkj#-G9Q=!>~NDAXG1()=U-Hd@s-R++YxJ+&YD6Qt{GHIa1=Hs>ZXy6WwNX
z`cGNxV}mm1&l*_UTl?UCHpz!N9xb>Oqk`W#qZJ1wTK+PFhB|3WKWY?c?f~Dv#Grld
zOq=6{I~z55M{JRtgwbcPQ$-<&q{Ax*Jhzt-!ACd`Jh2Y*_JQtOFqwIaa9;0}>%NtE
z)_tQdw++5pJzCDP<jm?T8+MGu6>F$YDwexGEnhPfw^(;LQ~9{c^NB~fMm8Kfd1RD~
zvZn!%w=;)WK{5w11!4!uhfI||H)fGVr~v)-m1~WeAb(Mc8$vx;t+a|~`_!O&TEaBO
zdS(D;t21#D?^)HOC*%e0#iyiJB}(Ev-EM~d_%|Y~ihqlZsIkaz<{%M83Er4vgt18_
z6jJZjQ(OaBni5?DWBMyN?u6iDmL|^9gDqN?y=CWz3}B{3kD1LUTpn3fe7d-ofFsQ|
zi_iDZ$bv;5a~MddOOBC$R+kuGrVT9hkBC!rM2Nc;o%b^@GqE`{J7$S=3TwqMn?-%C
z80Sz~+*W`mSFi=bwNE6=ZNVyE*WuU@m*FOt-;^$`<>c~-!P~1HC%ga^f`)<JB>>7o
z3J`mGLO0L=98GsMzL7nNdSp1#eR7ayYfdP!{MDFAJJ*kKsE)D)DFdxKKd+k()NTPz
z;J5x@{)-<~E2mN=qDfSX_QDhRGFr4o-vrn*-C4jPFD+Q0RBcnvMFZqOuNUOF1e*#@
zHt0ZQTv?pBcU})UQ2+;*R7}|EzotqS5Q+uj>4Ig5)7wxPGh6UBEGPdNPnN~Z6^1jd
zSeaf1*sL`DlgJ<~(K55ouqDUQqMjm(^xJ>BIJXVxTJOn?a>(Bt!H?12SI9a{fzi|D
z7lK7t;RoDy^z^awC0!t%Hl<#m;d4`2(WVuP44D1(ZbangF4BCxCT3_JPgJ8rxWgs{
zMWIg;qgDpWSy2kLD`2z`?Bk}l{^vjV#(ksrSP8#hVHRZbm->?S%83UaXu(KJK)I#R
z49~9~=_XYLa{bbS-3A6y&+8D03Cp~@IzR-fa~^;?o}dVekCU5Lo=CW2DkIV?c+{~k
zlTTjpD(#>i^vV%QzGqP8=EP$FF>cL(LF;zJCy<*>V65;Kk*nYmqwpKquilxVwO)ul
z`V<ABlfuAB5g<DeSB)TaPT@#A@<s#`wE!&f%1_^jVGGmMr_-WtO!ECL)@!V5Cobvg
zkKbOop--7gOv>91>^AiJ7G0~psb6(l6=p7;yh5Z`@tyiyUl$Q4Zp(7%!<7N0aCC7(
z()U1ihEyNBL=QmZsws-g2Mm!97a&*)t-t$(B-N9r5<WVC=oxOa%|UCw^pmMUr<LWI
zyJiXq+?_d_DMyf24_u6JCg?qbFu@VACJzaLoZt0S@~)QygaGEa5L8zb$b<%5|5JXV
zYQkrQb27`Czz$jM@R|aA#_kZeWh$x1hREmB#66aB>d8qxiE%R`fRJF}neYMwl)gvA
zU|}>_7pHwJl}8m|c6S}<P}S|i=P;eI>I^IpOeX;gw<H%KlHBOle~!1pz#tRN7mGZ-
zKhXJaq#FBjZb5!I)!e|HR#(I>dX+|LYr8GqLNe}Gm+R}Pw1ea(1>`Tc-X~(ux=v$0
z^BQju*XcE<CR7$!8Mr_i#8!ZLE-eS3hP-Oa?OD^?9pIZ5>lEvGi+3lxIm7N_w|6w(
z;P$13Vg`vz0F`;^p*w<oj$6R3!Gbl46ICp}lO$lhp^|Cf=1xTG<ovt>O?AW%c=fC8
zfeSHaR86n3(K09wpE-q&Uy`rJ&_`Yf<8f2BCfXupm#e$rt>p?z;NitL;~Ld|!dWF1
z<ol&%nws&P&8UGMj<c>G4N!?>ilg-wmkH3E2S6WV5Lfludq@_}eKprgi`1z?i?Dwe
zie!@&1zljIl0r`kfei{#eD|P0fo1BVy%w!al>W$0dN}4dU!0G3{PkWov%5XNNjG_X
z$R!AASiAMUGYA1|7vTe=na6A(KVf&2Y8GPE6iA($%uO}yKVBY6$?WY6IU!FKv#xVy
zOkolzp*ltc(Xp`}i27dc($7fK&a2+D$Lf=3ts0CFLo;NrIyo{^270r?8M0O$qYlFL
zB-1UQUc=Ad*+)Y@mKxR&9Q)iz=tg)DbLE(3Xn^AdTnorMc*a?Pc|+3_89sk3vo-*<
zb&1Re?jNi}1&e~oH)eCOOX8dvI^u7MQd4Zc-bRBzlF#J2$SkP8D+0-FQiYja*75B#
z0ofMAN>{R6HT??XBI9?#|7M}yeMu{W*LcgCgq=dai64Cqm@4~>F%#{^q%V)nBny@p
zrISI9KVYoM!%;|Pg`MMog`pYZ55ko#GU^yYLLV0Hy1{ziPDXB+*^r(6w8YoJ2E9qP
z+?eQ)bCx`|F=d%Crd)ENzPXXl^REH#ut)9oLa1W>h%5dcMoTLMS!`6aLO=3`8Xn|2
zQlV?WZHGjnInE1U+J@s>u3cB?-1hHG%7L>6X-9b6QkW2cx~0pp$*0-fcRrekPi0q2
zoUZJU^_C<Q2@R-qAOKcK?ywu@;$*XDZnoPqoBdA=4bg?b_UFYmSh`d~nQS8QfE{f1
zx!^t9?75Dz;SpF#O$lxwQzay6W=l%hZdp8Ti1><#tSfSRm?}rBDNR>uVKxVRs(D}E
z*ZmC$#y2ZZR!}Bdmk9a9x;=N->1mLkh-f=Ne-mtno8})wTwdGX#GRBttZxili*4<H
zN4wzGrY$Wx$ClxyX<-9^7_WH5E_DrjAR<Ik*&1zrEnk)&3|C+GPln2%WPst-Art`9
zmlr60=A9tEF!5Lhlb>E@<Jv!yg;zsZk9R+LZ$I~g5iglnH1QL&P;^Fe!2kJ6!_@Ta
z7mLH-x()wG>=E~Uy3&c5gP+`IgDdubayg8-|C83JV46_gKpT@pN^S4u*>Qg5fsMSB
zNQ&g#oLq$E_dE}b&Q_+Mo1Yu5jK5=xC==fpp2l%`yx4xWEq_`-nF4x0V;ud-jX35H
zpW-<h-D>>vfO`?;MYfESy`M7UA%F8=D9NyaBoIPJzWfY-nz)Oy%#;Mj!u9?S-|IIR
zNZY^EJ%|2F81&p=vpV3onL;lS?Bx=p7gRVd)lj`qbbEki!+lwDD4|Kl;TP2xHcuqK
zUc{Rbe;dzvLA7w`U(zE2eV&Ie8$4-g<Ug>+F!9u2<G#QKR(sq0`hhdD6qm<MvLu^~
z`GWs~4Q^b4?S3{hsJp3sJrVv)QSOso*W27pU7#_JL1F@Rz1((DCO=9LEU`<#2cN!P
zi#HS3wE<45a0Fp8Uu)=$TeDU}-J*vIl9S*a(u{F1$Bd=fZXif`!dx>SD4e)o5T}=S
z2H8i+mi?0t-YFds)VZ$-<Q%+U_F`G~UJkU-d-1va>DTu*$3#>}%nN9vCSfLW%j@rA
zFX8d7SojS(H=H3B$1=(RL;@xJsO_I`*sL6`I%T~v@O)WEHCh4#;*mlaOCu~F4{CfR
zjBb#`<ziGbw@YAb91*4{2h4g>QXAmrEAyaC#>U|D8GSmj#{efs83|@<oHwYw*^)dS
zC8QO!-*(JyE*`Y85zE#(5n|HNsa{Tn>EmnM%kK(UFx49;g^=9ROW{*J?m<d<OSKbZ
z=XW;XI(Fmpd5v0W=!77QGc9e@BXJ-u(7m_OMVt3GWWnTe^bLwMY6RAHTglp!%66S%
zd}?ANgcD1sfRj{BFD~BFLGp3axxLKCA|)C_1Ra;<a!kq;of&Y=uuR&c#57k8g)&zG
zpsd-cb3h>R3_ZxPK<T5m4Z2<&n0E;!d2?^rhk`|meNcxGO*3Dj&1i*%Eo$v{{diWS
zb)eR=OK%4|Lum3zTRZQd0Bu;)xgWvlZ@CwOX)MPCY}`Q==9Hcic0bWF#siUeIWq$R
zb0pqg>@q+qjI#i(B64N+t+~<=8v~ENU_k?uA&&$<Yx0}oX304o)&$z22(zL9uk(X3
z?;dXJH8FNyl8~E4aZ<7Lg<hS{`B@l9i{GC8+^~L~0YFVeyJ?V5^*F<Z#G}Rc0T`to
z(>;IrHsQPzsfxFb?YnTdgO82XW|&Ow-V~z_4@hZ)UZH+9_ltV$+MQ7weSPH7@f<W$
z6M54CY65J&epi?|O3g^9@SL>ZW=%t~2ZO?Mhsh@5x=9d*K;fO*_jyb<Bmh+EqAu>T
zwlI7nG8K7WRmc!J1lx`aeK5Cz0eE})voki)fLa}&>4w{x7iB^+tw-?QkSsR{+WzI+
z1_)XF>Poh1T%{V#;_)=cpCdj9?Jfb)*!(dtZMQkp9pdScA?ugR2PH6*T_>%dOO<H9
z{*q^TC;Uag$lJ@5bIuUB|MCTF%Cs7|@hm=j`Pl$k5#+=H`vJ2K$`63l>G@!PQD^8c
zD_8;}jg)49?hvQ1X#vg}OBM^8Bt+PR&-KfvW4OKi9!Yy%z~TdM1*+`f)()op<!8=z
z8#$?PE{=?a6kMrUkg4TI?hk@ipv-`oX-<MzKPmL2!JjM?;wOseLBd#QhJdjWe-qd|
z=1{ol&}QQlLmbSOA7-MJ=bun9No-<Z%s%O4;kg`L>Rh$KAEEXH;qqMpirQY*#hY#f
zG2VCLDF`QCzZ^jmTlk3cuR#l)^wT1eCpCf%F9EpP1u*l3Y)tPd&Rz^hX&!tuQmjO^
zu84$vUZLQQuiRn-V{b0YJfV4|0Mn;NFigB@uvk45jGvSZJ+UpvTu#qWtui;P18XVP
z;~=*$`xN~Rgsc(XYyo~r&Q0F}du?<SfDC(Ypwuy_=$CUX-G!Y(j{u4r-$}XXl}a5x
z_$^1Uk(L(LB8rz%)IuJ)08Z={=aW~2*=o?SY1PQ-<0zw`3B2d%GB_gd<^^`s@}?x^
z^8gM_X-rprf@GnVeX+=fgS)~M(vtR9)0m3?sIXaqsogjZUoRYf)m%{`vBlh^HKXsj
za{iN@g&9%t<{(29OQWL)xe&{p6t@k!PA{f*KAFIeN#qPBNiHN?fERDNGY0C>Z@UH^
zg#e_;f;|F@0y|iC`NZX-?WmprSqCIcpa5%_ztn^akdh>HE5@HNTmV|ZT!OD4XWh#2
z%!bPelctCA4ASqsNmU>Ebu0dc03UoQUvH9p#@oG>x@FB(nPu~b0%T$4O`6*d#33LK
z#)EdSYv?Nfo*OoN{bDqxNp9Vcwt)rE#cLd8-wV3m^4-C_>KeJI!(|7?V(3@>d8Y!M
zitZy4N31y#EXR!?kOQJI(nYfy@MW)WJ?BNmUwUCvBugl;@VX|eyR2MPP<F|L*O8~k
zBqQ;9v9U%1kP+dz++V+-ic@sTRWY>1dgi+#O%a^r%X<!7$C&%_Dw2iqyls2$H(>gO
zTzUOzmOe?$VG7_%hgGiwF@{xHm0~%f8KDtD@fw0KD?^EQ;|IMr+FdHz#l^OM#VByQ
ztUfY5w_$0M9O+>a-)!CbAvHbY3M%;cSj(_`8i9|}lid1afmA_CVl?5)jb{ygy`4wl
z1OAHQj4-P|t`wALLQj7eUFutY_Dho+bi`Df70`a&G9Ke#LE2DQv3OoU4)#cA63}qC
zBNfU`xO^LDMf(zizqfIV6%Bx5XD6&jRLGL+R|bB;+go6PS5EFcPKh6Q+AB;G69<vH
zzHLTn*ZCh)QIWxiGs_)|?1z2Cw4hQmsvH#hr*~C<@Zs&npo|xk`{azLF=A?pM<$I_
ztkrV;%5Rf}B-~{w+VG@3TQx+2_b~!c#Y1rU4~RKtG!^4G(Rdpf)aV50^f8h*F{Rdd
zmZij-u4vq;`>uf-8NPo;u1AoJD4CPNAIq>62CXGUD1@p+^HxbPi+cb#V{C|X1^q6t
zU9r9WhdA}!eN71AXtWi%`hI-YQo8kzYnG_3rNVw@swaQG#wFBe({1Dbl7K)9E}seh
z_ycq36FIc_*vvmGEsNwQvR7|yhPLp}&GRd&#Iknub6_xXN+F>*lu)%~TmZRwk^TC>
z32vpGadT96WV97RAwLbrG|)!Fs1`G~QR<D6dAfW3&({Y^|FWZNz1=0-Ge_i#97I85
zyt`}>UL)%Qs&DoQ(+q5lo6WrFsk7n$m9N-3l0``p0lEbZ=5Afo$BpI4)Rf3mZnp{f
zr4n?E><qI?fQ|Z8&Af}%4;KEdqS(;a2Y!|2sBznFeoHzW@zdU5TVN=o!?}=6f!am@
zrEE@iAILB$0V2;!T+b|@*<OPy$0Bj692>({LTAa43z5V9viutWI9)@^zNx3LXc!F_
zg54VTSBc?pc-UdWBSF$E+9~G(3_=R3C{|_>^>_Ajtk@Kp?s?CyOwLe@2;$aJtqKrq
zhkKs+R8Z~n1d`i{L5W)d%PLi1_74g?gm+8|=n~6DMn*KvI3(OO@N%qENE(pOO%1xA
zP8hK9h$P<>Xej-@_=v@61V}X$m;=Ro*R@x9wjUTyQtBNBbw>t&gQa^CiivCrLu39y
z+fC;QuNMU-70n^owBTA{^`YLLs}HTB;hs}e$FI+|e}b_=6HM!;?nvRRAV;ht!CGhq
z#rcFPSUQxQ)!PK^M{Rq1net#?S1d*eT_kfM7ja46WOIMz@Ac+MFK_vc`P<wWNnrjC
z@HO^Pi>W@xS3#dm6M0QZ0LaalTlP#&2K_s4*)R}wF43NsnssS8g1Gawn<Z-zIv1Kf
zR8#9f;E6NafKzMN&agYZzAFI7==v=K`qNU>;K+H~MKAoml7yYLggRNeX|WSByM~D_
z-kEtv^__~wfg<Gg4yuJ?{<bG7=7RYJ@G$!!QIrR1@S^ZNVaM-|@CGbZCnITui_|nN
zxO_y1?B!7m2M7U~%L&`f(>^As5I3IWBlhRon7=jX%P)QoNV4@=!p{LRRUP{_=`lVR
zdz-Wb8=d1k5=ps^LnhHHFHr@DosKy#ptGmVTC}<`u2|TxTQiet#ICE)k5mR?G4e)!
zE<RBuDwyjCAD(OharD8$QpaVKguCouC{)|MaXRFf9`8NtjX4@dKZuosoq(;zbSJS^
z5TQD_9J>kCK6M9~>%ui@p}5(~?$keouM_0NN=-MW%CS%x*#p+5+>7Wq^pr>BZ_uRk
zkYzV!^%JHegHrEUvt=goK%2G7FIqx#GOWrp#7`D+F2&dZJRMd5eP<VAnMu~Y4n~2T
zBeItOajvI@7_~p%JQOXqN-laATcKN@VjQ(8tWOAb-I|W`JbCx+?I9o=9(a1^Y$s$$
zecHh!Z+BCRvN#RZQLgDOd8*x?@v4-6@$ogsiF^!q;v-=xI`i&Ljxv(oRP<_|r?X~l
zhRzmi4s*Bp_QlbxKEJy$fD^4C+TnIkC~yb@Ko<aVIT*C+o%&>q*4MrxpHe<q5nAau
z6}y>XO>=C`|Ilu;TS>NE2%(}sHNe~&$s9`N_XJpH4e}?HO6%+rQ0Ow*ySUHbR5jSC
zn%#$Q4s7AUlH3&0t)A<14I}p2n9Lz^UXc+?o$jKQwznb;(mZ|hv2J~I$k#N_pxJ4m
zb@Suy`jIJ8a<3^K#WvU9)YK$#KKYBLV89~CxU(o-oF+v7;ZqC;r0`=|&Kw~qK%c4U
zZCyPUL=BY*@mQ7T!%U#$_-JZvkK-a+;&;$!DIsfrWIT4;J`kP5y#=EY&D<2g9#Qkm
z?-0&{`rwnVy$SN_>r4*7n#}>48u|Vix!%zFnnu28{Z#5Qp)%MCwU1xdPfcqf?TMp^
zpvY>sR=U97Ie(X12C9xy$+zdiwnH5NK{QSoErz1)?={qnsSMq(0Fzu-*Ju^D3#_(j
z%q#0OGa<1Y7krr(I>8?#KLn^ZGjI#$$NOL%P>sa)YHIua#M}BIMSt$L&XypL&fQ#%
zueyzt8Q0Vri3qvrNT701gKXCrR*m=qfz8nyI>DDqCo(;{k3_<w$C%55J+U47Y(`R#
zB}7w{Zp!>8b0*HYZy98;kJK9x3`opZF~va_9V&AS84&%h=1wdvU~xt_pzZEC?6!)>
z{HQs<+$OcAfP41l`~=F37kaR)V`y<W25n;2uf{aTS5AZ2%&rQHdDM3)Nf^IINY!HP
zw>T??OM8B4hT3=+k;e>Z{71q3GW^$HSbT*@`8*J!@-fCwwzL|4)TbPm@cwPxFB6FE
zY*v6{K$mY??0UG^;_WP)H+h=VjTs~MH+1>AK4?g-Pm#IFxMh+s^@b@5+If0codX18
zKF4vFusQlro-yt7mue;k%Gbc)2aKMS5gaF$m7GkvJd){0`3!MD4-LBKvgThCt8V7B
zO@Ix-EB!U72Js=T`_154G7&8cG|i}s6+fhLrBhxc09#hxf)fENn!kE*<NeJXr&8BB
zJ0A2ndtWcTZ=5e)3CtKsNlI;QqDI}s>2X%z26LYE8e$tQu0<B3uz$xHSc|L6hFv~F
z)j%j6GeifWUeh918HU*Gu8s?V``o;M1)Nab-0Z~!+=QL;hD``#auF#37chn#TW==1
zKw5b$<l$R})eP7QF@9Q{#}mAH8E}JkQs{SFIfv4>WRn-IwD8rMuY~%cX%w-Rq5HvR
ziJ2VOAN{@3F5X>KmAHQAQadbo7Pux&SJ&6y^_4Q^t1sa74Ar~T(8NYbmV>3DdKy=K
zQlLfnxqgP1VW|qXe!d<xbs3s#xdi%v5f@cHnxTP2rvT#-pUggQlQ0|xEGB{&It;o!
zzOnr7;O-mWeB_!($U1u_bu~VVVp|@&HE};@Fs51g_pH*J$4W0=VnhDsdL2OhHW#8R
zn+k%Zvv6Pp40*r4Nu+${G*dne{klk;t*PEGjlJ~i8b1YLT~1jVP$W*F$i`Ih8HtPS
zZIhZ=j-?+&jCRU7xhH&mS>lY_D&zdV5_#f1Vuek*h7EYEK!b!8<cF2wfBy=bhYMrm
zC^UR>g54nt$ET+$OdYWk_{2Cy!R-8<=KdNh5OhQHpyJP?K|1+b+a|?j9gywSH;h5$
zh|^q_C~4V6$s#ux6Ex}W8({G_e;Yfexe$vaTDJyP7jjb-(NIJKmR5L!6e%sp<&E}S
zBTbZyjtSPqXq=o35DI-MzTgOzb>yb1)47I|R%_8Gr(7(8B|R>ZGc2INM*$XwF7|NA
z#M%vC@D;@<d-V7mgB-u>Qt|`84jSQ+FIj&erh5kMw%2a}Xts2<f2xS9;l#6pxUz&F
zM-fT3lZT}@mFqfX#ym5PGo};m=6^*l=icGJ!nP#Z&=;snhJ&TcrtSUpF_Ago7oX97
zbBiz)skY6?iP3oLItPHepJ};EMB`cKDa(u8>oeC&(y*?$X-y!8@ph}8R$W;L6itOp
zv@qjCW6S6x@=PE3=9rn98pODO#Q}?Dox)OE7pd7((F65?;6WE!H1oFae{U&D{@vT{
z^e?$A(FbPsn1px~RZEy3DvBg53dl*I01@#yWd>9$?Salkxip%sp0KyDkaVBDUhb8d
zYvul^n6}E#i9J<;F28O2+g%>z7`7bW`>(k|MZB4bU*(>px;2x!W$$$<UbZd<(sOO$
zdM^wNazhMy@Oj?Fm{i#q;?0$Ne$-H!ANr1N+Nqq^?5fh3yLnx|lFO-d(5zsi*AFyn
z_vzuQCSFJI-@Sq!-y}|p!(Ti#jz)F8Iy4M~*X)4JSS~juJfY&oc3@(D;FOaVCUJ7b
zS946lwq^`@AT{lB$){{ItO_P3b@e?Pp-KZ4K-uKTNSs;5Ne*kKY%|oOUCXQa)?Hy9
z2+A(&U2MKQ!I*PU2rGjA)VSHE8opsQi!aSx2VOO<xhXW<mV7c8y)I`Oh^k`{;kv5c
zBj2&WO$DMGSa^aU%PYrwbT97aOg9K*1a?f1e4IIGd$!qQbP@?4^zLhoKKb)LGx^(f
zSDe&e+u;L+sFMCE=vC!0CXpFGqN<g)sN<K5XqM1hE)IIXWXX|1Ep4Ej8u*3mJZi{A
zh<5HaNu(^xcfx|#UpAx?e!J~8$Oy&yXj<p=4jhSiSSkyPvw$xTCXFk_75S||WeX>|
zqicSeb&WQU#NR$sP}Z(*^noa5{qkHE?w**wxx$>NLH8b>An7B1_A0&L4hCKZ%=15j
z$ibSYl3X{C>F25h8)#&C14J-`%rst026Rl;kLpQjAyC8u5K6?+pj!M2!+|I+N~Ea&
zN`b+Vt=hYTglLD<2;~seXp3BoNmpG!ovrd+4gi1y0M${py*Fl3TY~@&4k?)PXlPbI
zQ(P&+h-HIv19EATBv1%rDv#jcdzS%Cr&Ai;^OIM?Zb>fd|JY!?Opt-<IHitI=w2p2
zXq=E9f`))c+F<^J<o2AVZ&EP1Nn}!76E*KBg_u<0EH#yk1O}K@>S8_3@HxGG%Yni4
zh!>9crVonIoae0Ie|p1Ge~))t&C2Vo#hbEm&wQ`sx{pp1YhpF`al$;C=tRj{NBCbe
z$f+<(I*W_UkzBY3QtX;-lsq>EUrrV%n-f(UB5d}OE)s4suP@k+W3uDY02%X(g8kxb
z69as0weuAWJru4?uZxN|fQVm6O>kARQPMZ%2Lw(WLy;XcpABls_1g&U|8NzzzSP;#
zJYRV6wEX&LL)Sy2Awc`=udg`ZRT7sh+PzIy^|rPL4^41t6+rgXCO>FLE};0}wJ$Q%
zK1c#*pH3vh&h*ne=Ji?rVzX%#Hi-e=T#FCHOsbdT($Z&?Swe*<eYA{%?`ptty5y%o
zNOvLMfrUv<er>wDf&xvISg8HrBXS<i(jil;_nIhz62&wUTxSu&i(4=`ogj=FPk1I}
z3dpUj`Y9?R=QF10h(A<;4BM$Y(4-zNr}x@L_^Vz)_@P)dKk}>K_P1p2p;4!+>6@4T
zfdk0g3h5IKfe~r^JUCvS^6q*<W~MtVWk~!qK=Fu<*Jj8Yym~Pc@t5Xlr(P+iOg%r2
zP+Rrph@AG^AIYX-@9Ocb%l;`s!4QcfN0yTU>d^!4ePbO@Cf_@mwR_-oHJg(0Fe?lD
z*r1$|M}nD>)QYKyQMv1$OW-%k$_a+j?#hFvN=y?rOHLxpf($5iaL=HS2;U?gJEBG9
zBZen-5o**&`kxG<LNuFae4CCCjjr*f{4{Lh>=CaFoqiFid=Q9O&K&5+?(pFzB*bBQ
zArX}kbK;62qHA<qP)Azo0qlNW_BOFE_Xw$&?T$fBk$2=d34>A^Z>{c#-%8F{jJPfU
z&SZVXOsSm{B>0_Sg${b^sE~kBOVU6B%vDK|m(v7&#T5r1iJ~!rVsb^D01rY?r&yl0
zq@qg=CJ?7sy2thM3<2?Xf51NR>f!HkOk-aaIextxeH97~03DrKLPnV%Ef614^K4!p
zD8xbteP?K^#F0f!$v7;KSGzd9n4BJ-_sdIOD<1+!6Q33UCx<GxEpqj6qgk^x>kfDk
zi;<mL^Rq{P{_$IbFs9`GUb~n~RmpJmkDzrAHo6Ytg@zXqNclQWJ1GX`m^ck{GWqFM
z2IN?QEnz!403Dudk33<CoMxbcanB*okUXKtkdF8nf+?9oDG-m;>3l>ka}%MH0?6Jj
zDVi|G8fPRd?{6X`%R5g^XOFXF<a9s#3wd`5V^V4_b$}bX6NdDVT`<QyUf*RRE69ul
zvr{Lr4t1s`lC_>OGUD$~hJO>m2o1%CLbZATHky#E3zHs@b>#}UvpJGOYA^9u(EGN`
zpj$yWyIv}hW2`mmvBl{5#GqY|uzKDLp?Xpwi0dj%;n`f?LMMI^8A{@jK+mYbywteP
zWCwUECx3i)5J8edB&Zidrt^1zbs(~$p+ut$ZYcn>0-ESdiT27hO@SrdK)%(0D3z#r
z2)D`e!h^7#O302pB7q-yW2f~~3`~SM=rCj{P7JX&)-(~037^zpawCwP9C4lv(rSGF
zlMsnAEfF$6Cgk#^AxO>=x)9tR{HpOYA;oEx5vkjl;K+Rx+ovDvt<%Jx`STzpV@t>e
zn9<>338s&Zvs9F>lM@jw`@@e8^vpoFJgcOk%`3i<@i7vv8($Xhs&wAPuEOk>p=!P<
z9FRl8im3#UKP;+(%cFr>A|L#xgi@(aOx$g`oe)DObim%%IgeQS^$`J<?$Gb{CK8<h
zN+;$WNL4K5yD!nY=*56kUiBbq!6(~U^_j6deiS(e+9uUOoG3}7(9|?MbfQy0bs`L*
zspqSsxRJVH0dy(5oY(~{>@1mUNp|49&L&MO=ez<y6RO>Ik?z-rB#Q>S_6S*vF{f(C
zQQXQ9h@+jXh*~*EqM`t;E8#*=5fe(yUXRFKP@|JC6z41isHBH}$<p(z|En+eB++_*
zkWxI4Q5y?TQEW(J28QyA=w#w2FBMhz0+hKdP^^wzI!E6ac(3Te%dAc*B$pIndNV7*
zLZEwjj2GvPwu_^>UO7|*)lA*ZpbigWdMC95{$>W%RZ)k|hm=u)<8_co+b`yyky>Ky
z#pR^agdQnWJ7#w23{4k@PBg^{fedAjv#n&=G(*abhh1O1=xKiQd6UdYT^pm;jjo9F
zBUw7WJJIp9A;=>LIne;2jrcGX)=b7eQIO$lta8*un7ITt+1FqEZSLU5$L>d$Mgg55
zH2A>|RU6=t8-e^rj`ex^WZ$?F?T?y}5WY1}C<0k{lSJ!rQuCsrCU5pG$}mAv2_fTY
zLJe9S0QT!@5OuPku56lIab=znzF(D8^2Yi|5d84<Vp0vb5Fa`Iw9EbM!3p<H#>r4C
znV}-@h_Eb8V<Cu_d4XdN<C$y`5&9C`c}Kv-n09ONCehB1*AaGD!cS%a{V7lTm+@+P
zbipO~()ytUuxTI^QjtzQbutyuAuU|=HtyFi`^*fGz)GFDO^k)eR>XFPm&NJufLyuw
z`nKPMG^`VKu^Je93=Q0Gs5RD~)8VnZ8X{P0>gH6hgmi}_BjP+2K{FI{WsV8jSrFik
zgIf&|e5_GzXAY-4%?1`+?rP6q3*#2<x|srFqOE<BTKKt>y*8Wq>G0tj1eo9C4Lz<4
zN{qTRc$1Jx4-=DgUA#LrnC3gvI6opt<O$QEzc1PccCmJT|5oVYA0@0&FeRGZZ6!Lp
zn`O*|=$qz0#}gL_FPR!{{9Zhp0ecYgitcrwX2w^nZ(f4J%JU`G88MPn%3EdT44`fA
z?&+BcoFov^In6a?N%l8{Gucc9;F9uv=oBA*x?2Jv0b);mur7+##cKdYt7^}9e2=?6
zub^hu)N#RGrTqiz^4x$|pp<UF*R2p1;2NZJiG9i4WvF{SU}}cq68sVr1YsEJ$4d1?
zbTb1jI4lgEq5{Zt);&%vkNi$L5New{3sxZd74#Rl`aTT2E0%gS^@jieDxUe0)vxW}
zw-8P73}M_U%&g@4<@9XqeQXMlcxX_(24{v$q3&&GOiH;0WhiN92|FY6G%+kQH6C>n
z;*LaLQt^kF@OlgnA|-t@gtbr1a`c@IQRJ{P^e}8{O?gm{`P44=i(>3>qXMW+<OwV<
z70k?IwbH@rkDWi7aPelby>5~7ij<<~qUqM#j|O?=3HRW-I+LQ{RtK!+(9}xP@hXET
zgp2Mu2V+bnS|D81@4q;g62+PGfNd9zD#SEL=Si0gtgPlVV(n7z)-V(2y<SVB<Ni(#
zC^z3Quv_zTLftfgQY<lemsoLdgo>!tZy}9K?Fux==?mm(ecrZ7)Rj+N92L2HR|o5K
zh(vCH=79h}veCHjRbC;lR-&6!3*pO*M<@J{87rJ~guXz;FZR=$N<ipR#|YA9Xqq?3
z7^`b!pdQtoo|KkM!0Y-!8K^t=@M~j47Dw_f4~84a?DJ+_M#zKjDvu~fz=s|cD=T21
ziN`vyN#~XoaS~8cdlI{kVHtenWvR0BL4IOva%PLj5Z8vxPY%9f6oWLLDoH)E1BPt?
zKtzgR(#RYpELRSi2X1FbmdN}v6-CRtfHLW8WTP#mi|bm@hBcYvD?!&2DczZ_C)Nnq
zmBg5pZI$(q&+$=X4#W?wE7OcEU4<nfcQyF%gc}IMkMivBvoHg7Jr85|b3O29*D?#N
zK~9PBDdN0DQRqzuXR|=Ks5N#EK#^EyPoN{9AX${nVJ~x!koAR$ESIazmV|TOdQ2?M
z(_9XG8K*LRW>~oERAKGj=Xwj<*G9D=O-`44NERjQ#}($$H{RE5rnxRdcinMFw)}{z
zKr98f1!(H6-O5@yzsmgv;0LTLIYl1un?5<qI9;$`J3$SbNYWxWm1gSZOS2Kzu{xzA
zLDB9m2Z*#QkQYcofqZdV_Hu1+#NV2VV<0?DJUS*1_D5#AUpd@`Re>Teca=zfcZD&H
z<=0?nrG3TSL{kZfa_*<QF$4suc%E8Yowe9N{T#MHy$fUeXzuh3w^hnx+`9I<_Uh<w
zAi@`9jqpL?ZaRO@mx?h#Vw0$Pnvv0>*5n-Ra~Bz#1t=Zs<b#jL$ELP`-m#u@0BG)x
zfyzq6lXoRF(lIa&aR`wtYP=AVv`Eu)nm1Bj7~t1Mu4fDk0x$cFxPXcE6W;K*2C<25
z;iW3S^?Qq?t0H?<6UlI50CkE-BBS8HfiXBAh}0<%wE`E55Nc+T0m*B!&J&`+F(V@n
zzw697=A{pRcZaqpk0Y+-Zvwe&+oJ&2csV~&W#$%Dn(2Cgk|^`Z@A17R47V^N#)az<
z(upT1nKg8auy*?4iJu(ye%%-VA+zkgAL!=?ym!RzLq?L?+x`FU^|O&C`j~)5=YMI?
z4RR2-<KZ0jR^x!UXDEM-e1wP?ztYaPVbW6L(-oLKK$pq8L^!WI@Z5H2TUkpL;fHLo
zCwCID0+*r}JC47qIy*0U*8iPD^A)iXBkZYPKF8t1A(y9;J<6W}URg;{x1e$e;thK0
z?6HeMFaC0mJSh~DeU1&_oOBJpT-F8#IzC0?A#e&2F#LS28;4K=0U`;s0787>0c&uC
zW&&<Q*?IVYC4+W*$UAlcYi=pjLMR=&b=?O3;z*io%(7M>%YHy6cMZR>6VU6SkfKH(
zfKi_k@EcCBc@X(t5iAU!E?4Grx#3py>^)EEQ4y5B+?m9E%KPFV8@gZuM<$>M7~-Gr
zcGt`olV2VT`U;RF<zo}9sE((3@C{J|1cxrd@?cp&fx9nhC0%^Y#TFBPi5W5-Y^qt2
z=qNwB2}bmtIh5BLItSHxn}*(pbA`4jXR#6sbTUcf%FC^zMD!=k5^eEr216eC0gdSX
z1FmCctF~!qOCrA+q>@&U$`}16?HH%e!0q>8sYjuMwmGG4i13}rxy~ke0IN{_o$qJ{
z(Iu#t-_UElY&Y!Z{1W2+<-|`Qgjf9aYm`&jI*}Y*U#7hYgyS53dl5CfA->doawfFv
zmYF4pF?6>Lf-kmTO=t;BW4oFo_6B*P0$<9#EosBPQ`S7QHNU_!_kdk%v?tIE_LPZk
zO?L=6iY~@YIkrkif)9ZrEvAi+yh?@dg@fH!I(NQ%;2+8b0O>H839qGiH(Jiu<Km1J
z;BimAhR+nB0&%}Fz(s~5p3swm#F>R~jDqV|8mNTf9r2eRQle5CX6pqQBj&T-PZ->(
zGTLVSqM%KAB5$egWFCmySF*8rgXp>Zh7U@<;MdD*3;pxKcgAOD8q-TYcjdZR9mI_5
zn=FbCFCY9|M#&XJbqUuyZ3uL{np_o)iG~(-Xto5g-xx6XL?s#M9O~t*DbV@39bQ$W
zT~?ap3)TsS;TAQ5aq;Em_FN?iWvzz@b-#rWH%HI{aX4YT?E)U+Hv)+289qoeEb7Kp
z=neTP2EeamG6>Wdk5>>N<WPq+@Xn9W7_i|hGX!i;`O_abzGLyDLH_C(i?@Wx<n3-q
zQmsC3IfSLDdh;@2NhXj{tNeXkI)uiFHD?F#5FPX2_Swu23@L*I4(*2@1I4@`gbWV9
z9*7N;90e;^swqma6GPsgmwV2xa511#*c|Dmy45-9k1x?Dn9Q|o)VsVedM!~JiH*gx
zFkx{*Q|RcG6l!GEl-`1^!(vA?GUaW-@nz3T%c9Fi{@OL0B-Wkt*i*J#dO8l2<yTbz
zb~gg=x^G;iDru2&C^la1oAQm5yE=uit^G-t8>_Ufy#_oC0OaK7&e?OB*nI%vV2$Oe
zY0bi62xxI3G0#x)f?V8E%?NwQj}<mJzvo2Lq&FqJK9nC!eSl`sj#0`SC_2c-%5}E1
z%n#K(mm>wqdMQz!$6Jsmz>I;&in2&Et$?#4r9qYfirzts6?8z7k+o7N=i+n|XMv)O
zKs^zZ)SNK#*~4(MXLds*LLL~Z)9W5vk<Da78lCZjiC{dy^r>pe2mKU=1iU|8KrXqG
zm8;6)!8cT5+9$69j`6)^CjdCjW}%yij$2=2&?UE5-N)y*ZkL(@UJsy5r$sN(3i6tv
zT=o_aap2@dwwyJONMJ2#84yB3J}8I-XNii`yVuLd1Wl7S1vCc+A>(^^ME}g)5|<*u
z;y{L}kUs+9)EZqlW<iLdCLQj<iyUJvj&OpggfP#x6+u_6ZJKC%zh~rI8o2@})b8~F
zArE>eN8W(x&tLLtAEdSZp#rN@sTacbcw0hDkWM%kvrQPP4&Eg`4S5a!;!z1eEtLU)
zwwBQwD<->CuWnECw^%V*ag1T?$bz?_P``eyh_d-%*8lzuOBk<{vDRVz`vv9jY(gL_
z_R_xA^V^d*UY(4Pk6rHyU$=)ZNuAm`tz1>v=<591f5Rh8RN_xMWD(~V((g2#$R@5Q
zM|>({3Rz=^z-dZ51nDK9lkNu4OL-b#Fs1Bof%G<DiJq<(I7V+9ekw@I-ym<0K;wFg
zq!U`F6|G+^n<P@I!|WgYh$;PK2M9AesFzTEt6vVyB<$BQQH+J^>>ZF3!Fc5j1G-!U
zB4ZYisY+=Q?H8X*!ltEoN(|l(s}~3`;-(h+Cah!n_0HC}&j@N*1m*8vk~)ESLK=3R
zay5Lz-%iv=Z%l~ztqk8W8M+o`1uQnmc=Yxi{gRMW!t1Z*zT+V-s)Siea{))OJ!#rS
zYZmvByxb8DvN@1F5Hrx_I#n{$s_5ntccENwPyO|?BQ&(d`;I|Ju}mI*DY4!$pmHzO
zQDr^GRNEkJac?McG7vg1yvsDK#BdJEXgsG@%p!6EfnavmpwH=OUbl4uwL&u>-hA=8
z;7imILo1^<gc#J-;BQ(4>l<gXLDP?TKSf4tc~@}zYhHgCIYUC1%-j7m1w&x8J&xK+
z@~)`6M1ArI>@yN^^tOG0>tLEH(pd`!&KcCe+JLUjYwC$E)Y@x2x0+}_U-VC@HIe^@
zdHnUumpso9s@6eF{e*V)%DEC5u&R=GEU+OT(cUx(YTBt5+iP;)Q!nR5xUpVeaZOsR
zi42lVk=cQ(-4Npr$72q2RAQc?Z%_2X2c2)doVJne9v<N6qZqe&Sx|eq;;wx_YQJ#I
z1dZlhO1%<sdyOKAyLf(d3145azv&X2Mc^}D+Wc?V@5ZBuJ5qoB|0grl-@8=YeAev>
z!+^9fqH^J)W2G5^tpK-{;$cX7dA2|okD>U?R6g>aK^)7#y`p-qFHyv0zL1K)Pzcq<
zl{Xe@H<kb~YcR?E+{~@*XEJ69*~K#>2b|_8n2}1@*+@-QlA9)M&eNkrRP%1$b#}sr
z-42<_o_BP}L0GFckUO1ekwDDlGNmcPPOhT~V<j9^q_b>dv92Tacl1n`c^PnfkuZAy
z$A)Ec%ik>ch^r8R-+apt>KX9s6_8895UF2jod#^S;iT6Q(~$HSHT}&v$VNV$UU5XF
z;YlJTpUwlr1WFjAihUDtse-)`$tYz}Z*rn@TdAKve4PsT`iM{yQl`}P_S&rFFZJ^p
z1G^9aVXQR(Q;azqs^qCbvwDhD=7frMtjspyi@w$ClxZK8q*`sfVXFbWy4oFtS1B!S
zS%CI02al2nMs1ua>Rb)b{rZ?amUXok_&&e4Q<^S&&aA(DMe1R}Q+bY26%xyJiVoJP
zO~SJO_udb1)BbxdG7sKSxx-SjRwmxGX;0Zs1IO@S-~8ZyCNlv37tTHMWMDfZo12I$
zDwxTl#zt!ur5=0{GPl2*?(Y!EDA~egTcDy!&rdBN-O~^(6A6xq6u3RvFf?6?C6SVv
zwP@wYOO;Vi0iC>5CMiY)wL8*71YAGlaBgbQJz08dL@NMB&CHGwo*1>@#touH60sUZ
zHv&2}tNS40wnN#8-2xfin+Bnz<+ritUA!Kr8g$Pn=LZ@3B~T4Nm{P8Q{>Q=Re8&bF
z;%Fa`pWgxEn<ird<&MDz$`afCJRv-RQHPHGkK`Tj<Ev^xw+Z1k{+h9KZ^}>kUgg~0
fuN=zpH~k;}LSu{XD8KFW<WGV0u$lh3{vZE8C1f>X

literal 0
HcmV?d00001

diff --git a/mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle b/mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..4abe105e3b347ab63c1dd8ac25977853918635c5
GIT binary patch
literal 50796
zcmZ_1$!;vo)}Cjegp%sGw)ma>|DW^&9(bx*!O$g0uy4SyM;;ik2ZCVO1Hof<#kSZN
z`@Zisdv3B=>}I}_ipb27Zo$vHR%DUq$UfMIUu0XEkrAtT*Sl5>{<G!(_5b;=?0^4D
z``@(wWv?@ty*~cC{~>nn<L*+s^)LSI|CVjN70X-gRyDg`%{JF)6!$a5-CMg=>wR7q
zLrcZ_0Xy^Ijc_$jBV3*Mo??Bp_xi-1UUxb!H#t8}P8Wx>+596r<N3awU+;C+vyq$P
zU@NYy>puHu@vUu*R+u_#x9a@uy>Dgh-{JjE7e2n){wk-11)irC+b`MMO5FOTqt6`O
z)8!*ip>v|f?MPg<U!0ilb-U~dV~fT5xf(B@VPQ&*qw}o$$#2E2$#{F3f6m)a)WXVh
zvH6j`^Io-VVk{f(gd^Q5jBWKg8)|f}!<257op<>m?%<8=;Wc~R<tpAc%rC^faSj!S
zJ8`QUt`G7FUL<YCv;D9$%^h@TOOKaNT-d`@Ig_1slbw9OWiMOxoAfWwE>^?b;kV0q
z1>GlqUf|Pkt?qWY9*_B6x;CHWN%RxpZdr}baXYP=U!BCMW1X}5QY`Apd!PI2md_z}
zhW1^i3+vtpw?oCYK6=qy%<eyQ!r@Tyu}LG|KIW^h?beTcWIxQe8rl4a|3KWH>%Hn;
z`P4~1!w*!8%e8Q)FPeQ!73UgH;c7MB%rFAlt%30I95&yx$y1JIQ~I=F>o9EVH?sNT
zY;KVA<MKkBI?CVmnX}W8;^meLvyWL_xO)|EHnY|xr)7_qJXTz1EY`!;Sa?4Olc(0#
zjL7q1_aUtFi?!J4^j_w}<`t)TJHP&jEAQ=ArD*f9Yhmsv9~{ayM)|q;`#E3ut*1IV
z@4Yj0D*5T6HmA<ANd|;QEsbJxIqWa!b!;=XpThM^oYcc>Py01>tGCk-PlrNh!fzPg
zc#CR2x{-IbY4B?K0TZ$o7S6)UW9W{tlW~^sYaHYo!}*5#Is4e4>uBh)7j&q2`Sgb@
zmWHyuQypsl3O9_BO4i~P>Y>}t+Gp9>bm-3YH^b(K4c}@Q?`E^~Ft5ClpAPE*-UjoL
zk$jGasm9q>IMmU6`X%n@@w^f5c$l*N$XF@cdvr@`7flTVdOI9xbY|U?aIM)@3~uxX
zPZBPhkNF{Dq85KQv)9jj@?LE|?&@4|LO;}au>AL&iKlAp?8j3+U;3K8&Q<tiiVlS3
zF}^;ZI8Myj1gpmByJD3wphnzh8r1W_5&Dhy^*I@CN3&}N2EBR|?iqx&@Y>C;7VI0<
zz0LFI*ZaE3{$bqLx9i_FY~6;IW^#796Q>6Kp1tm5e$CD*Vlf|Ae`e>O%!sVbOI6vE
zT_VUBqEDO<j}V{;$HTa*-p)4pQaUY+Z5C_HraBGeY}jS;+c(-i2~CNWV(oXBSXE>2
zI?ia$^CB+A^2u)8y5T3hVG^On^kJuU=0Ms%a~md}!xG=Ip7tOt_IT6sgy&-fbhtih
zw|@Ew+3rCxJ{RW}89e-Ae4NggH09&UxV==JHww?koNTu(AK+Ru)bCKN4VlcUWJA18
zEgRXTk-c>|q1WArkN4rG8@i@2rcSb7ylFk&9~aw;44C|xm#Sv3qxqpat=Jsvy)m2y
zvi5GTdm4WaF<_E}otd*kwQxNynJ9HqXGZg!sIj2uskt(6;rpc+_@J8~+*zG4et`}M
zb1U)Jhs*15bQ*8l;f`-w)u`)g<m8__ah9P~ixX>cWz`2>m>V{ARSWZ-@U|SL7+Uqv
zJkx-AKXA#Gw?=*N$LX0cv!WA@kMqxqc=zrnAi;_QW>YmgdJB8{(YP}dnv?qV_pN-8
zPN@0GVV0u<-bQ(6^J9Dkb)DU+*nDz@&24Qkr2VwREH_D;XySOs$SjXeJQagqn_v2@
ze7x**nl4rUTLbwS53l16WAbZHn0vB!{I&<tWTN=%Dw7z++GiNw&p#Mfb#9W+T+-Vn
zC4Nf1nVJko7sc9xSuM3}ax>n$M5>146(7dM!I0X`4n3w%Jp0V1byOuDO7g^&Q=22T
zxV=@pjO0@iP}%e%|7e8s$1sY(<c3e#k_L(IvCmOYZKmqhjLx~8%kP*hX@7CJ>(Ah)
z1qP(1c0K=`Wlwzk2sbaLK3idaw!!~h)8-?sxUOMM!#}MWEv~HU95cbf?n~U`;%fMK
zw`o?%W;FfsnMD%{sK$3G#rctP9M}7M7+X5?VA?)IjUVOaVM|g>CfhIR-T1H^5AC#@
zyW$w-SI<VV-HysvDzB2>D%)L&o@``|Gp%vtCS-Vd$oJ`#nm5uoUV8|0%=QXTsFUYM
zY@Gefr>^qlk9=Y$96$7pzWz?l!19FhXuZG3v;|g4+wNKSMk$wT@o-VUGATi+`)ia}
zv&$EsZ!~O}R*RGMe3|J~_l9>)f3qCBH9s0POsiNDCYNhrVl&L$<c~5Nv)5<0Nf}$=
zS-LQ8?}j<Fh%A_ZT=@M=gXbxh^{wOHT-=sc>vh}V?Ibh9xmbE)l&goW5oR>Dyj_eh
zgf8kF4cZBvA%;;ry9h7MMqFJD9qEm5{a6Y-HT;F+$+ADRKDpWO@p08=h77zgkEOta
z?jw-0J#JdVaKji0;}80!kyAzhJG~Kim<`pqy;m&p32PECdOZz@)YgE|=ZQD%a5lm}
z&M(d%@(BiREiA5t<$X7Nc%*FOTF=b<?y7_G+vnb^UXQ;;heNg4+GjGvZRTPn?(3(K
z{l$j8>D`<)<aET@voyk*i@i_TQ2yb05^t1hvm6kI%<|L@9W02lEo(eo!t>|9C9V5o
zC>-n6(&p5%jjD8^tLXGw6Hg`>YRt)AXWG<eHG5wTCR(JLq*bcf&2<`u-x@xMIYfiM
z_TqKljLYlUtM$1ZH4~#elpeK~&fMqz1K$3o+|p4uqx`5nb~S(8jw=kudhdN!#`64R
z?^RkTZK5jY&~G(A&EEQlCe`7hjDMFa@%N*>=YUK#+EaVk$dy^$7}AenNA|56niGhb
z2Upze$qyuNbV$}k-5W)7#`F|Mp!S<Zl-g#+m8UIlSr4X7{j{vXT9~=S5@3q-Z-^5W
zCzyG)e0x3L-s^p!&NM{q@_&WaMt5he%{aN#2;;B3m0JU)BCNVb*EJJs*{h}njeJ6O
z7&AJ)n69#hECbn>7}FhoRVQqp#RG{QjET@O<;|I|=*wEXU#G#(_1ms0bkEnbFmV}l
zA+M+NQ8VGr%^+QcoVVV@DzeR42|9iy8@dVma<8+wJ$t<x13B)!xr<eFcEg%Us*%&U
zZ6fIGA|5d5tNGoSpHBldk?c`xOJa|ir}6UU4LN@%NQ&{<(4^CU&~K^x@<-lZW@fMR
z?!J__eJAdbD>L3Io8}W$Lf3BHoeE~Xvgtx5%$)k5U{9|zh2vs+wc7jmv>8<Q6Z+ql
z(-6D7zg}i*E8n88Yna>gR6LQbgr$%V)2M~!D@P40sL?1ohdxxp@)7b-Cs-qCes2pR
zsQZ451+!ezY1WUaZIi2-wYS`7!i3M)HTJW+2XkerarMc(Z=Dm5WNWbL&O5@;^hC`Y
zaq>_{m$8O4;KJKY+`QK_T<tQw;`C&`KC3%#E*8gWdKR0gA3g?ya?D+{FtimPrh4z1
zWX18dBQRm}(FZ}CZrQc+ytCIiU3k;B4_w3MMG2WumHq2&b?+u)>uV1u<nIXTuRX=Z
zLOA4=Yhiny>1$uArdhG|%U-<Rznj@~|G%3id#l{do<W0HGW-^6ZlK?0R3xAH@gxnN
zxm%x3j{Ibk(xccMn$og7YGG$2Y<*<cn0qyVregnhI6^TEgt=CBwHUWH^Up1_D}PFa
zOrD#Uuv1*L(op2I&$kky!4W)(TW9Jw8ZrwiOfYqIXV)wn@Fhmqu8%Gn`GkE4_B<ba
zJ5A?jzj!rs!P~k6zaCeaM|E%Ho733?zFoPwwV#bhH>IPGUAGd_=Fo=O0DSSc@WPnE
zM|nkk=oiOlnFVSrzVbCS`xXVg6t4^JYSWO1YPf=S&YC)YX%%L{u*XzV6@VeM5TZY@
zjX*dWVPepLjz8kf4T7POpNwUjE9M|!_zG+TQ!xEm&9<ifQ^ffu1JK>1?07pn(Et?O
z)xk;Qw72XXAqie}VfJt>ZaiYC7MmA(>TQYEFtY)K#%8`hs7=rc+3^+oixVX^lkfk^
z_Zg3sFmDFP{0lcppuqh@ekW%jU)r{}AMi%<f!#S=+K$^F{`MR-KgZ5j$JE)A?H<R?
zM?N4wk+pa>1pcSXMjCMmy9SeC8mrjx2#vd~GY**(TXD09zlaL&TG}S(v3E4GdM_v%
zo}4*-onRWik6^EA-o3HUJCJuTd+$qO>^}a!u$v5op<{&ZH|39E{FpVbi_ShniEE2?
zGm|VFD2{KDG~sQoI6ytg3PZ&ftLk~7p>OK6{QZ`;by_`6UFO5*@nn-*>Ew&E;zA=g
zyV=f$^i;*#AfH})Hpk4Psq|jd3H;6OX8On?n@O*)jHB#r7V@$u*_BKL9W_gnMvz50
zz`O*viqjt@oChFv#<<ciZa;d5=N~WGGBbXFMmQMxhAr0O(TrxZ9L|c#^(ueuIR$dj
zwmCYG?{wqp^tU}V0CO-k*Wls#EZdbZW`JSX>r<{iF~;l(>j0%FC>hjx;6I9;y}bLY
zk#9fd+fSL9wREvH_{k+1+p-TT*@0dUtF`zTMNIWQxBebC*!r7)b8;V1+jLu|j%4G`
zXmqzi*epKmL8l)~L+jgzv1WdVwFP{oof@F!QJbPQ-?UkZAbJj0j0QmXo4B}O*k@I3
z`#kx_rFT6NJ|C4|6eeC=Vtd2YF|sl4Z0qRqm<{4uw*DTPU~e*^ir3L%36s9=4TN@R
zX%LidSaLN@Xjgl03(-`3Ts<$2P|CGpP2)c7>5cM{PBFrmNb1<&kZQKKV)CM{hM{`)
z7L{-ykyswZ>EFn@h!3!V+i)fsM57pylI^`c+8*X4GP#<)-I^7MA@NH{lDza<T$}B6
zuifBb{KflE&Md{OA`x-X)b#NuFK!Zw7hJR#N1Se%s;c{*5>)=SIU?bxw_BW`fh`!j
zCTk`ygT*N@Y~r1L)5GOtX&f$_Djx{XryzwOPo)xJ)Sic#3G2N-!C58ehmW|5hev3p
z7z&7>TX-W=*|h1VRx=^Nx9#}A<gf9Uf7yeag=}s{7U%I(-jbUp04xESANFbpWh|3Q
z?_JtQ!boaJW(LrjJxMjLjk&SJH!{FU5G;*c_*T_F9F6Ss#&m81WwXm^15nat#OhF(
z30M(3u;RbM!_Y`UhO2oKY69}pKm~|en2LM!aGkNlQ6Vn^lJyqp4wK))2)5JwFBvur
zS~oQ{L)Xu7jhXym=DES(7R2}ax%bAcD{%&|teQ)($;ZavKs}o6`$K2$?I+hm@7io9
z4N}T{$wOfR)o`$6mPj=lAHgG6$Inh;U1rT$w7u@z{MGAv$Yh}34s$NOzwH@o+C%}1
zOy?6s;%<Mt`E`D2W#eC<vL$ty$|*!Pk7xzfC(ySMruMNlGNI%c%VTWj$DiixrK4f<
zAi*Zb54wZL>rw;HZ3<l$Y_NyBy>Mh|Zg{d-!Y?QAcoZIzzyR{op5rHPxRlj^GI8r>
zcx+7;i-sZ`;FQff1d=~q^%O>`DcW(mYePWu$#gCc@XVLDVgAE+Hc)#O&I4MRNCRg0
zEMJ}T;T_NK(l~Gb&ffHe`Rk|kUe|d&!Y2AL!#(`5gr*sYi^-StR<iDDK11C+BGf=Y
zHaR8?v$kddHes06ukdhr(<IE`Mt*~6t>LWF=ou8P;UaTU=v%S9ZbGRR7e?t>H+_nW
zF^kBdq?2Cv2Bxo>g@D=5!OgF&`+OduS1YXHlkrR0r=wNQabZr}AZb|(s!El<&-+`T
z+TuOxvj&XS>ueXBGTO5BR<^33P@ZgB5GMdNeP#qp@?y4Ev#BSOGqt$ejoT~U_7hCq
zqRZyludDbuk5eC=X2X2`AAI;Qw$~{3pA7r0h0V>lduLNm$X_PiQ;WmDh{z=0y@M&F
z`5zwk!KBpayr&t-<iBB0EjwKxf@$G498n;xk#Ibo-)NjFGQBCEmXVJCMuWHv5Hv!-
zMn=Nt@wkfiAd^6syq_dw!$5U3G<KQqcYO&4oS%+4;*C?s`3?Xj*gt{Hx{t?w<KwFR
zmiCyPQO!nAtk<xb_kF&CkRX~%vs9R(8ILt@;8^N4Kc?`X&4M1tx(B#hVf=_8Vg@hB
z(W$A@It{*V-!Iap<9$<mz+O0P+$waRdoLr!iPUp8Jsf`><Wsolgvoc&UW%4lTowB(
zfB%JyDbCF^xj#z5zO6HJIdE2Q@iNq_(<_BV)_E`+Om%v@7;Gy5uaI#qt}JB3FWEE(
zR$XE8^RA-Lf^BL582S8mnBmh8^xLI^tj77SyW-!l#UJXf>$`^jqpndu62Fw7vj%hB
zU%)rZ*jPF$r(;iARJwfvn+tLB;?sf{Bol%?dGprBdo};v@(5zKH|+C1I~=kH8sMj~
z)x-NTX0RLNSfu&bTpxKZmO7?9t6_H*r6L_@&H{h43z=YQ+)FCb-~mEY<D0e_bcD|h
zvpiVJ&hZYVrSL5LTPkq)c%Mz%l$zVmwg($=YKi}FJp(+!p@1!n7Xv+t`=|8jme)<<
zxjMEVyF#S!p10YlE=S<HMHC-*{D--a3FmQR+&>E{{MY~P-(y3?_J{Wyo*=H79`fki
z406r1Fh3QI0YTIpr&4>mW1$Jo#Gx5iH~G|yNqB=L=H@e}tAHXKI|JJ!KIrMBYGt1e
zcw8AR#+-BrD&LyOHw>OxCq85yxb%!6$yFfPHoL&EFZfdI$!~^`$2zq10m|-;qHQDi
z$nkje&UmptlQ1`w#*qnoM!XO|M*N|Jb=m!7_KUd${P<a{`UhIWa4SC0-3m{{(_Kff
zyzN&Z{KA;5t)dM|G499;9c3ocPY+YYnS*fqDJf>6w+zgKMG=o54o9QvXROZjC5h2$
zw3mh|+?KO!81oj>rHLZa$=2{MC9qTzvL2fgVP%wljyv42+${M%#Z}Ydqg5iW#t}ga
zy%&S?x@~vahQN<_I2d=28#o}@u>^@XvRjj~=4NZBon*JDA!fqDn=u#uC~&)`?iykF
zgny2GV?h*6ENRdJHR9c*O`5dX7YJEvHor5A1E<rX9ig=n<|k~%*VInY<f^wBHRaAe
zc!cs^O>p-aBm}cB&+cs8*Q4#pmM@3}>jiAn43;`vmtwt1gb@@$Uw&`ZG!aM>L#ZZ*
zexzftCMeRrp-6S@#2)|{p2o$u{hth|8vGf6Kk!}JG9ZXxp5yQko1W~4YH;>k(`A>(
zQN)7*--8X&A9ToD9@XH>TK;he)Ne8t&Al5obwfOgtaZ*mex3P|<lOqZO*<^XD_#mK
z!Gdpi*xGatIc`maD<)RjX-M`zfIZ|ZH%y&Izr`7v@_Vbr8|F5!nL{qMzOSP%TKYWz
z{d$dZv;3%Ed$I#iF>H{pJtcDXwFf_dA@cp4*j%=-34qTB#*zB5eV5;?W)>|8vuE@d
zxR|<N53pS$G`n)AiqOzuezRlxFSQM}MEu(R63<JUCVBKmb~LVodfD86c?j2&;Yj9-
zAM#k1w{3Q(ONcwTx$w6)Ojjktcy21m;55f!UrB7+H|X<tr1%iLK1{vg+E9kn2*X8#
z!1tie5}s|eCqw<s2z|fv0HDU`J{CX)&Xz93fEBUBEZJwiH$7Iv2!O<c*T4z#8U@@A
z#NCB>zLwp+N>+=CWXevlmAbSZFKbSXx}`H5+n-KuNQaKL43+vi)6CU@^5{8}&8dQE
z4-`U+3i@Wgzv_%7Z3;g~_u-yKnX2ZSNBI`UA0mA@yFE2mh=A{5npAUFSr@OZit*=K
zNu8|F7l_;tV44oWQ*qCmbGFo_#k#{&5T2UxafqAqH;fP3g^-`L6P|}@x3pPCOCY^9
z<NB?60F}50X!$2#Kt3xvRK)8+q+4g|fng&~JZG1tn1+9aN7Pv*n~)L4Ut{tC+q}|n
zCm_E;)oOTy-Pr@z!sG>Ua7b^uo3-sD*2CvzF}NE#W63C--{gEy5U!PnZT2)81R<a7
zxyhF>M&F8cXcDU8<~a1Jg!d=zMJ=u`$|MzppV|)05P&Ulxk|5#afQNKfazHhQ+VbL
z+b?_%3_G8Zprki?9Zr^Tq+!MjG|4-~`VCdePF@lECeAGEJpJhmjjVa(>W`pSxHn@X
z!QC}IUgzD>GI!PWL(N|%zU#@DemN;#Cfon}zu}kXZe*VuAF??AD9#Z%)qHm$-(hr1
z+K1bZ6r;MGBZO~}=AZrjP*lT&4;E<wKRF=>+<DH8j$8j+@kkAy{qCUZ)y-2)I>Op1
zzU6JBtEMQvV9qk4f2ik-hxlfw>j9ikiS%z6p98C$lkNX9=0aU>I(NuldRN!J9=Vzb
zW6w5P2l~EvUy;%G@SXV4IQ`ZiXizTk#?3C1L5CtH3c@+x(BEoya2pqQ_!U^lhK|+Z
z(=ZLQ%jyuJX_v@RE!(}$PG{Wvke_ICF&;d4r-hp_ND@p|P5qTEh^8ecrRtmg{m3&X
zG>7+9)1)v^h%gZWsw1{z^A^X)?FaauOq%pC-cHU0<g}e^_Qt1FY>mg(akvJWtmQYO
za0^qI`sl;RhoV!4X~`1C7m$xSsx=;FRvk3N3v<)2Y>2EVj-v6$=vd;5#)uQC3|Le5
z*&EA;WeZC-3*IH*qy`q8Nn#V#Y|30M+Xhn=?Q65wemaUFi*M~CJL=jeu7B;EK*pzg
zhP%1&0%n0NnQk6WhQsG?cja(*7$;6`@wQW2ASHxDVG8m2IgD?cysT*!EIjy-D26Z%
zmJQebGn@VeTqug*_6TE4y(DsM)jO7(6zyZ-^72E$d^hrIe4V;C+zH?u-?U#Z+eTdw
zh36fXc_-Y_QGo3C*#(0Hw_^fEB8@&j&>KG5W9Qt(KO+RHn8t?lPM;l5`Py{fMGT&K
zp<<=`Scg`CsN-{Zm`I4^y3{Fg*<euzNvwHchjI24;h?^<hAVcW>0#pBF_e->38DWe
zzn$PrdE$5m>*ys-Sl_hiiQ%_Y3=Z+J6a_S^xRH&#6vu+4%f^5=oMj&-5n%+CrrhNW
z<CA_PCVZHAwBM*09R8!%86;fX;6GtZ51!3UWea%PHE(1`>dx$b)nq+<_(>GD2BvVZ
zib=<x;cO_t&^)|kHp68~_UY17*hdWTTEdXsCM!C>iWhEPEv0iUA*yZkQ$knENJJI9
zC!c@kWHdyWljA4M;{pLz%f^@cz8?{G!;|IVhF4hw8Hh;t(zKVk3nybqV&OW_HgbwQ
zh7-ql>iQvM(&RPLg<o-fhVu4?ix%f2CwIgci;ijO+01W@hm&T~;Sj0e@DI`y$`+3l
z1{ax#M6h)K-7R<g_-k<b&$u<%duPt=$L)o9)iUp%7@ju!#FVtz{AG6A&vf$dA=m=t
zCCa4W64}jjEo=^+Er0*CIsK2oyWwP<&wRw#2L0aab>xY~k!J7X5Cb5S@neq4$i9Je
zxY8d40eN1<^W9s2ST2)M4w8VzWNnUyH@nt|%O-P;%Bh$G^a1edO~zhvFkvh<5kq>f
zbhEgmG>9km+s>!RcSiDExqtk1Ukj%=>uOJ27>7$m!|Bf6FrccIx0mv#DGQ<32m=)3
zGd5ckZ1+G@m<2V#f`WYaiF;!rwN4Zj6ARTCUskvqu6@3)XqfO(`TlZE0lj-5yK-D>
zz6t87pN5P0%7jE-C)GldhfG0n2!X?H<zKyHOAqwh{h`f|f{^O|P113M`6Np8?sUG(
zL)Ljnx?TALPFS){^P<xs+I~r~cuoXN-@G|3;v0Y4za7Xb9!4D#3f<A{3_z?dTzPc9
zPvt=LggF^wFdLbpdY+jf@<te9o(BU^ZQi^f2o1Ee3p2<E=o1A&oY!jffL=*kh>O3R
z*UBaE`_&cG$FX%}@~4_@jDwIcw~95q3B<;z#L4x3sgMTp!KdsDOJ%j#_(-vd=Y3#P
zJ!Ge2{1+V0eUaehMW%3NQUkNoVS7$zmiV-M7G<gs5_~i26l!=nuDsCa>?E4xL!C}G
zYABT56@PIW&Iu}|tL9cFDTN0r)&+fK(Ctm>;bPpbq3p9szC|5_hCa+D*ONg*m|LVv
z3wc1{l!zaQt9uy$^a9xEs53`Ea?M<;%H~_S<G1<@4a@~6*&|Jz;aBW`I3FWS$a*M_
zF!GU+-^oOEF?vcqk^1Um9Xk(nRBv(jZV3PYW$VSFO|IQJH&0_~5ZV7=IN^7L$e2)9
zatM(YcU?nkxj13g$Pk2m<~vKAx5ku_ajZNCeAu~s4#6z)Y1kG{7-x3FwOJDH*YV}k
z$434PaaD#Rf1S}oBY&B2ovpSRPx<0K#za1?{t-uJ1IzeZ<8Y@~)?CKVEw=geGRrWA
zT>lW#iRR@IdUeuy@j6Q|+1>)hEbbfED)Q6RypZE2Kdu_DUVB~Ubv6Gu_dJsl$3Sbn
zW{V^8mqlJj8aTHE6>dSt=SQPxRUOhqf72japiyquykS;nrFg+c69*kyr5wUzbw@U~
zi}gw)PAwOM0tMpqSZ=9gaS*<aR3w^<msBvc`405lAaY-<Vqr4>>Mm&II9d*iMr8}H
zv7W7eXv7gYH0L5tEfga|9v>pw;&BA%afGY0Rf9;uxP$;13-WDGAHi}%%2b`&Hy!x{
zKZvp4Py(Vs@#s1ONnmfaOzF`-w_vtz<LWKjTf`64*n9L@-1|6*v&c^z@YVQq<R%Rg
zjIL%UkS{kk!&i}i=ZZM2s;}wzs!{smHl{6uq{2t!{!sI*I~f=&adyqsL|n!_<D9xT
z`pm(6_JJX-4qo42iqVi#HI9&GoKf6Mj7W3P-M=_A&s@_l{x-q2G9JbS*vdnAli3|^
z-rUv*w`*Y+baDXY1z#*KoX1JWZwdHgmF^lUp4!&kKiE%$i1)5{e>U`itmLN3G5iqv
zrgRo2?dbw@#vbu&58r?kkpHi9s@ckie`oqM3Alq@6c~uVi5`6h_%o@$rd}K-P&8p!
z-y<D;0-H-e2Z0bb!Vw?i9-M{tH6zaNK<LayY^_+IAg9XXX5nx--rG>eE3(fVtzoLJ
zhJawF&tw<AGSbWhayNyD1r0x$T$6D9;m$oyq(@pA_~x(zg@_2nV`tq90z8IDR+8a5
z&BN%Eb1lB9&Y4~<_Fp`IgSJI<$Y1f(c=72zpi9^$H2Iteh4V1<)O&%EZZhcGIlvaq
zgv-uf%m}~*oD0{?umOM)kymfJfKyK7C@eChxzXP82f&#&hFryBm^DeLDX<~FDR{=J
zGNo+Lv)}F@5#}l_Tz@7?Bx+k2p8&hQrzL{d)~<b;3b8)Mu8m+UA?UkEK>V0Rb-sNa
zIpRtXt_v<CsWds?mE<~4y@$KWVjOSc2hjy~Hu%3CQ?#L2lw>W*O#dwo8kRblB@I+3
znTnX2I8kVX(hRpmEHQ2C@rv{MBtvY8YlE<)fJ$_xChPzk-}t9+1%(J1;zz{POerMg
zaCRy=Ada7Go(j^V-TDh+0d3U?D}4GI8`xep0*&dYfC{Of9==zY*UTWeJ$qO=)@eV~
zIj@fx*wX};basmQ0JeanN_C**<Epe)K7(l>x0i;=-U`7IHu-BGQb7bK9B_&0Mo~qw
z3Fxzz{GQGP#+MBONOeV&!A%<^Q@Ovc;L>ilpSIn5TgUz2{9d<(X$*GGO>pYMg2n-^
zIz5q3XyS#X1-H|S<8Cru?$<GGID|pu)^|L4%x9VBGN9s`3@*|Mk!0M;DZl1&RI^)R
zxFSZ77nQv(<_A(~|Neja_qx^Yk~4=a%ea~k5?>{dRSzHg;bxIdr=!|_y)O-OCe;9<
zVGnY#*plJHxH7<xQ7{u9Uk&DGBBgf8?)kRo{T!2xCkaO3|B~X+52ldWQvRkNjPFZ^
za!Nk0k`<qSEa-&g^?aDu7iLexVVvS&)oksG-Mq?QOf62P5nE_+Aj~E0l<5Z#<Gpx6
zApnHKf4Rqb7ROha1T`}G8Q0krPY9EEDgoFe30jm=roBchZ~fbn4ViNEn{B#H%6{P9
z^)t^sWM){poU)9tK$(4hay+09g$2>TMGU59ebdBBIwX5d?3tP$>UE{N&^pHdGrRBo
z(|I<fHKUP`jUXBX>5w@`BTRI&SHeuy4EN|0`=N?|9tT0X>fT_Y!(HlcGI;V2fhzc>
zag;I1VBS^?i-(!jJdE=f76<+jz<m!N8lu=6yAU=UjxA^8Bk!>*Wu0Fiz%=9*-iTm=
zEe#s9a9lDoI1!HUDUOw@t#G5fOY?jOFfs+Ler3Na6D`Tk%^it>Hr{9*RSu1rndu%`
zI1`Ai=e9;EVgU+|O%gMi)v))GT_wGJJ48eA7o`yQleg1oFk&+}Q48{f&j0wGZw%_C
zMCdzHO0#`VL7eTge9LKcWU?$}Z|8liSphQm1{M&38RR-4?oTG0e|^JDUx59MQkG(_
zZ<!ff55qGuaxetWnT-RqH8cVBEHzY*)SY&98af>^$=#lp?{D08H?;I#Whn?#CT*YO
z7y|jJb^CbNo(ffS0J+ZtA6_VeAsAktV4S!mk_}zSbJtvQz0`Id0&ZrQ)yxs0-K!gH
zy%{cC$+plsHSNdWw6H!R(qs@+8wTh&!oM_yd2!FlujHfHZ*~3>l8}0`KneLSa0B=i
z$QPa#BuDu9Lnet6byKAS4J2d55*}4U14cvw93nVR%q#ih^s0hr2C(2;;h5<8TeB`1
z%XD8L$S2Eu2F_Na#B~V$pRvdI`~;PBh!2h86$ge~0sik&Jlw8e_;YLgSpPABMYqT^
zlHJwlKLnd@Ctw7JTTLEskSJCh-|F)l5Br<3uRWZp0fAd30<$hbB=mVXzR8GpGE_Qx
z1~&?eKb<UB!SFXluw-i2p)-nqlQs#AJ@vXKcW)o%%3ARdYN4bQ#l&fnf$nZp?`_Xe
zE&9&$G^?JV%Q%o1y>Q@oQ*nInsX9QQ_Pclpc$i4p0WwNx@Lot???w@NPU$qu>`w$8
zXadcIKu-O_XV#N1pDAK$Jb3%w7x}&9DE7SwG|J*pK?QdtK>hsz&|N4Weg5tdg=6%w
za5ipaU3<f#7XJtgfvbQ6py~t-C50VfuOYH;8C@7A<`&2{!6o;IExN}>=AYWLM}iw1
zy<4A*ZY-<M;7S=6v^0(DUU?yF`bPuNlF94@%&>nMUnR1!F!-cj#746OOYcTngxQA!
zXylv$-vl&>YkLS8I@tFB6mS!_Y)Em0JE>3fIUZj<OjRi!u&63-<YuoE;+d(y3}g8@
zZfjn9!$cWmWb6zfdpJf^VWAcaoAGG^x-qlZ)Ef~gZJL=<9!k|Q(k9lNCum5{2vSX#
z7;%@8I7T}2*KJEIju5IPdMGY$cu3tF<<PF7_9(Mr*ki?^IsV!xP~u^6ztx|G><|00
zx@b616FY6^=oX7hel2@^1kuReWtx0HWJqw8<P@-Q9@QItP?rdbBfw#kB03vP*33pF
zEKEzW#8Q8UL&H7!>v_`}gDaypgHiLB*(k!GrpCyGF%<DqmqQb_{-UnpJ5I`|%?Age
z2I|>TZV{bxYEVf9!mK(L&}{SC-w3HiFiBT|1#Jkt@J7E|AQU>6$jcLG2%UG)uw?FF
zL%9CUK3|EQaOeE=L<!TTCVZzGKv@2k%u+FYvKk*R%+eP%9OsgW<*<B>=!13{#Z5Wd
z%B&qAe^6X<aeZ;4nC$5^b71R8JU+Z9HR(0TUKF?j>b>qmmaCHT_&wwgVFN{Hx^b;b
zOm%6G7#McQkVoygl-;CrW+wZcf(z_`rm~VA^>fmcsxnZan>gP)t3NmpjF!PyNpV*`
z`8}^zA!g6(;DoA7lM3@dVru5Y+s+KkuG?rhZXT!B+!|%R5zbei+=4beeoN&d1IqRo
zekUj0Hi7Ts>&Ca!kSQ;5HBn_5Nd{YsD8pMA0!YNiv2_twW!#WA@hAuiN1HuLJe1-b
zP#gUX0rnYyk3ABY82C{IscNo4(rn*vVY}o{59SS`;8AdRMTTx$zDaqg?hR~w%LM-^
zbB#(Z=cDn&Qim*EJ)Kjgg!`sFQh9kgG|;~gwUzQZ?HRsH;cu7Myq$P+Ogu;07=$yL
zbv$<7p`3vUL2xv)>TrgW$F|%I*L+Z1+@>Wz6N)FBP70;uWRNDOsv*+ICN9%v8u{hX
z^t<I|3HTN5SEs$od9wHcf3171;F#lmiE;La0u>Xh4S=LLF>h|~U;q36j_)pO`)!UO
zxnBqqFziThWCzX){KiLYtvI^J#DTWLIm9^QgDkLO@xYC_Y<k`joPig~#$`-mW4YX^
z<NAmYQ$9T58UID+O=$EQf#Q4KiZkFCW;%ogGS6!=@>4L;gax|i)~6>tIsA1!d2`mC
zziimkgh33Z^IHMROu&RxZZGDWm|>M-OH?X|c;4uBj&Xc_4|k@)ubA>yG9eA_Y)Pqt
z;Mughr9-70NSh^tBb{kepvont9m`7+=nItlqUX2iqhBYeW4R9)NFKo?^~H=6)zlH&
zOnC|<RZiwlhbAJb`NA*L<=EW2`5c%J2+JIusotYJ3ixs252a0poJD5MqBTCW0DZ@!
zn6ntlwlnTrm^1K$SUu#S2LHhm=eNa-P9>ET5dwkIFM0W?#7QkTVHxit#iXnfoU(ZU
z62cxvfYtWE5LCl`UhE5rkFtcELm4_brJ$SG-MpgYS}Kur4juGKJ!L64ep<e@mTw|?
z(Vtg6RSi)Ck*Co!wTH8h1*sl<edI#|-T3_zra{*%YFLzA#vo{$WY@L&R7xAFL4Z=_
z6=jqvhbrDIN)>spwV&`UbcZ2?I1S8-X^KaQA8p8k91bTPD=;iG6r079+j+=vB8?5T
zgZJXvzY7qcON9ftc*PgI?53h^FYnoqp%6!J&aq)YAdV?)pr^IA={B`F!5E8azXeO{
zeopCrX_M2yF|kV2SCl4f-+?FSb+(Cce<0?Jc~A+b>Ms7Kd@O5tb`eEf;<iV$YD!fg
z{_Me(<f2rvSB(TFTX+`ar^M5hRZpk)kPaz!dVUM<n88xEl|)*$rJ+*@sLYy6hZ2xt
zm%C-rzsjF5-Puxiyln!#3@BLurGInjek+dTXByy9K7*CMz_m>oE7Wj#U0_J?b2PHg
zwtX@oW(fghoU6JD;%ULdTx2sEEwmD5nPNmiuPjxy%K2_Dla56r!5b-MUFj;s!!0ll
zM}C5VMEr>og0kk8w;JK#%pOQ6q32xi*D&ci4c#u4@6cvMR1jozDjr8w5pzJG6?pd1
zWr-Vn?cvGHd;6O`CfEOFj~T9ivnSaL<(;{YxfMj-G}NRI6PLOcmX77}ds@N5CDWd+
z5y7>~n5%n(o;7y8Flu8yV9hR*nrA@<1fJSZPdTb?y?yrSQ4Eg7iQPinMg5|xWyzYT
zX2&b|xZVIf(QvUn-j;j;vAstC|CCEbV(U#8(s`c<<Dq2z(D7&7m0ja|e5yF>GT*q%
zw-^~f&y!(R;2LuSSB_1$_`sS7>A0{6)KfZ;L{O~La<SRWtT>wJ#HvMDclsCY6#X2!
z#YW>2j4%I1Qb=G2>L#heVBH&i=UBN(pTgpb)$1^5(m7GG4NO!*5qV}*{Zt=jSE943
zXRK6`t9lOMg4@ocJ`S553LVP!+B9qekTbrTctb*O;DHPij4~mG3y@M_Y@&WR(H-v~
znbnZGR4r9TC0f+WSn-MJI?d0rCR!@INOFRLw_r`v404Yt5s7k~)Of$L+7t?$(W?;)
zO1%113P>3O)CBTK0A8WHV{#8W*!~q1BUh<vl#mAKcZ-0Qi5pCf#Vg4&_w@`28sHV;
z(#&@=Gw!XB)y<pgK0pDg>L58-Y`&;LH7!w=sNAPUN#c~SFs)#ymnc*wJ}f0nazw0P
z?QuW!`yMaIDt_6FOd-j7FlF&?d!2D{<dv8<&~KMvpRYailcx=ey)x1=r2v1y%|vSP
zsbN7crtE|$qslRa;(g@SFoBqqez^9USp_6jyiy>WL9|9UyGB#06LdZm=dCdb%otDy
z>x8*^5314bi_l_;VW{e2{y}tpN49Ahjnko&cr>?9n?sPmpr80v4khyNCN`>MM8+2V
zK#5x`YiotaWCoZm8(unIu|vb514689JKB~e^Ancuf9Y9KAc2l@nrols=nxC%J6H<_
zfapyQDmO6(knRRO++1Nj^W0&koUIy04&J%7Rp?He@J>CQ3s$f)E=dr23J~dJA^hvG
zJ&(iOi1R~ooyQ*<h=X3oETb~N6AA9N091)SeIII6Jf<fUE;9*2UacBFUa5s)U$tWH
z3?oRjR?I;Yxx^R-<UXo4QF4yRo)l<_D+F0wafvwiYW#8})UT8QMH2ETR%DfIWFu^W
zP+Sp0;Ds=|%Gz2)OE(aL@tA=F;<u39$TwGTqh(kFFG<?yGa|K?5s;V}w{9SZFtyn<
z-J=O+4q&xk?il%Q9)wX<sA6RG5s3}D02QOdcvRtLG~6|F=f+WEbyK!&K0RZhBvN$N
z^QA;V{MP=X*qDd1-Ozv9#87k%!oxD>t@hmWuIB+r*_udzNlUu61i;5)ygH7nL|ifJ
zsUiHe=}a8oV+=lQMN3vFYh>^hEodlKRI&cs?_6>jtK!@#IS<-7zUv8}uwQ_*Sh^*3
zPcJq;Xy2(G)?S&{Sf>dfynis*qE2phAa<5=!O~_iw3>gKD6*p(lbiSO(QMiR6UqcK
zNj%WsUoy0R_V;`fmi7KlOKd`_5cFU=RL~gu{6pi0*mCDz#rC4PST@w=7{Px5k8hN*
zec|?^^MQz(Azl{AGd!QK+D|*WK2ph$RQ;CwHnB8PM;N@1CNdIc3IsqOx5@zZe!iI}
zR8l5^IEDIgYuM=wY{E#?=Lcl7I(&rQX;{AT52O!F!bX_>;E8!Za5s&AiY2@}IC7Ou
z4LZz++QV2z?dgQCnhaIqv2g?uUx)q@2Ui^1<s3d&u``n&p}iF1a_^glPX{<^_C4*;
z2m?*$=Py(#k@Hk~SETyArq{LxOcfXEG)&K#C}<7Y7%@G)+O+RnVg7PY{>o?as%<HS
zPU&>y)w$Y+8}5(-<J~T-V>TfgWuGYonl||@58?234)x02HXD&cCYm5cr~xQnel~s4
z(%ez*J65?#CqmG&Y4WXS0(CS*l+@g(7WJF*CiS(*s9`SF!odzaIUhP!MNM)B*oVPr
zb|l(n2?Ltv%%;f&oE7|Kv}A%KuJnq-B|f~aLP;K8hGUC^_D}nDXqJjm`^!{I!nX97
zi6g<^xY0(26NUhW=9-EpWLEL;{B?&fv{qqurW$r*w>D8U^WyV(d`9~X^xKMuD@gve
zr*r|$Ei+!Juv*<;=jdU)zLiy~_dN!YLAqY#pFtKB34PK}n4cQETeOqlm5F2U5l5w0
zYkt(_p9e+!MUYS4ASH!Z8n;CiH;U#laE1<~BHmUD>h(Rp8R`c~R2|CplsDpq|B92<
zKsD(<{NH9%t@w4AZyo1b0sy$0YBn^=cv%%Sl;ZXb6%y+|u;$?bQkEP0SbPo9SS?fp
zi0^U{nhV2B-*;k>sDKR>#qG&vw}Gk%x4aQm&Q@U3qz1f3@D_Y`r4TbgoKx!odBX|F
zxa(&7gUBPR-Hx9(A11&Rti&>5Y?Ed20jjwa3k0Oz+mRXvC3$A%*5r{*$(I$kWs8Hg
zT<*=cDm2%VX4d^se0}|_!T;j{U%i&SZf9?3D{LNu%!`JYBl*6|NI3_op32{nmvMpf
z8@{)gw)yB(JeuWl^T4Xb@L;Y?xH{5}sYo|@#!afSo@~NDeY4Lk>u1B~>o55j!j%*X
zgj6jXJCms<G?utiD|RPK(n!V1n=6cGje+b6;Ov3WVE;+@%odl$yy&1o@>RNzLW)gE
zA5CB=!vJtcMil!25h9y~A^y@ZD*)*$PNxa*It5t79;6DO>UvkmtI(5K83Dt0>e=Q(
zHqFWd;IQU^>};9jBl>zQAU1(qwY)W%&$=A}0R==%BeX2b#<~r^5gtZw_}OoNL-haj
zo%zlTB(87!6WiZPVadUX>({Oa04Bp2phL|YmJ3%!z?jcV0QQ+t$>Wv6MiEpoBrPVR
z8c5gpJLIf*gPavL%%Y{v)|)13f(V82t1OeH#^Y|IWY9dG&*7V<6RWX#z-kXXPd?Ok
z9vb)0uW#Mr!IR3mznv>CY*^`GdfK3rJm45A5)J2rHnag;tyq6NIiry#%1%m6MZ0?L
zq}Xqys#t|EtYdR62SV4P8GB<Ia1vuOxEWztdyOJ^{1DC(--{ZGdbWdUb7uYpP#S2!
z|M?&FAQa!g`_PgVm{?Q^8sfc65QWI_sS;{U5D1^O;vv)>#;5Of@ZGTmCb8`s#o}>j
zA#ZTZQ&I!>3e6{T@GE)efCtN`@rVtJ&_V(b*IVY#42W+(`U&(Y+fD>LtoXD?r$MR_
zPlG|91~fZ~?T2}d1jm5g<MPaIO;k(qCPCE#&}kD9WjI&4nVgoN>Ex+*VWuO_YOMKA
zn}_Ay7-qJlK{~%g+<X&j%|O+3GVwhtYg=xnqq3&l>CRfqS?e*?8ek!fF^$ap`GQGA
zL!lDOPiNG*Hwd2$+KIlFH7swij*3&HBr!VG^9BtARtkq<9fee7pnX!@)2o&L_@Dl7
z_1Ja4@WDX8jRE2?A7v=4&>u5`n4#`Kh);K;?2y7lbqP?2F|Xq$90w>5PQ@UNOygy8
zCj25ZZ3LtOsmE;tg_aJi^a#WLhAc79py4kyP<6@6ijA;34OIv%4GrzLq+QTM&+bvx
zn^_Be4BeQ%1@^hII721dHGeU&pzGw?VUTJliyy`U8&))Q|1#{VhbfL?$#|SLL*%yP
z;S2Y)vOfA!mW^)i$D<ist>H4te`D1g{z9Cffyrm#f&oHH%zT)xrGEI@Q|4SMvBW?k
zBK=3<`kmqEWk=%Ypku4q_9fsZ&&S8e9+@-xs9y7t90>>7<MgKpfR^<u+5HOvBrwIa
zDzskY)QIEXz0OCN+VQkVCbu!b&Lj1Q;JG@B-`XbPVJ2(8L+{>p(VvaA4MwcxE4TW_
zmX9qLdmCt}?hO^_Gt@GCI4iD_d-<@pWhyK3c(~q9BUK{oydIeSq|MNX52I!h{|JMa
z4IsgAD^#gXs~PoP)IjW{j)%j$&8MLD$@N!;LL&AON#X+_krV=;;d!8vD02E_XDC0$
z!Wm)m+^XwVJkUdg+juj7q*TDeqXtYW*FG|x%0xC48GbJ}LhqoJX~f;Vcp-=08%c)S
zPUO6Pl1O7q4WJ@(1qpne`=^eRL#DqfhRY13-%j(0#}(hvrF~j*hDmX<;%okV5i?)|
zj)Ogj7U)T8YCu8S8@DV5Act2=Re<zKz)dcm6p&FV(03p{=Dv`O(NPlVjeAiiP6pTX
z(ZV*ALT&3^C>tv@mFlKlt1W~inG~Z;wMn%gkbXkvmu*YA^8FqR02(9PPPdYMD-6gD
zT`M&t{*N}_m<o84|A3uKu1&-LgGuQN)W8ejJgy{rn4-!i2XRC8S!l?iWWrErVm;;X
zfdjJOFli+dYZx|N&+jh6%xp=FgF~e>KaJ-}EzDAGl9;>bQFT^6DZ2XP#W1#r%x}sq
zxtNPjC!sw~r6+a@0PAYn#;1Fq>6m7+;8F;c@4+ksYX(wpd*Mxb4JNX##pk!xzQ9;*
z@X{kXgno7}vhM=qk5tFwtJg7u!%U}4SokUPMd6xvvUwN;Zt9nbuH1ja%1yJRl0I{>
z+DW>0Saz;cwaT?$s=)n*@R=!IRy*pCs`$8xsjsg<5$ps3<F)mx-2N*7B$g6VX8G)G
zLwrdziEI;6d2c8fr=K6fTgG3)dpWm)K~-4d<~>r`g8j;f;swf_SfZS*xQ$0z<*}@k
z0omX4!X9x>T-Kzznb^Q8plMj)069wh%NOqW=OK%5e9-%tAmFI(vq3<wxrJD4$}(qv
z{CJ?A7W`S$&y*dwqFjTm(^QlE`W!J*jv`TEVx#a95xmITjVo<|ckb#a;DeOJBKD-B
zYH>w@PP~ko8e2EcAjau(*WhnZWnQhUMoRgD;zfFrAsB8YPjLjC-leLbuiC@7bez^3
zW~wmSo~$ebk*JqcxYM(mJIs)}XEnP4r8|tzk`eH|5l>qNWYxn`S1EeQA)-DRB|AIj
zb0Sxsz5l^@S!aUFDWKvFP*lf#;^tdEL(%9}j#3^bO`G*=I@2=4tHrMoS9@M>LC<iw
z>#vYcEn@7`W4PJ2Du~+5u`;v*WJmaTqS(hx!Lc7CTP|ElT){v0D#GkhR-e{vQf8_`
z94Ts#Q7~$e_gBlP5h(1W@qFjO%6iBg2rt~N&OGGx1=eV3A)yBJXYz`{`OoAe>11DY
zY3I@=4@8Pf<2>Q7T!ITw{9Qe8zi!ZyRnVE9EGwm*(rF$yW2on!7y)%}NbRWH6R{Mp
zeW5Ci8CjBs{7Rh5CnvS(ndGAX!h8Mc`~UU}Y7&ugq<fZ%qR%pRf=Ef0f=(~GF}x5j
zRgY<D9C@&S69%yQ+!_e4e%n<FK9fQrRu`T&ZUt?mp%*Q2L&t=sJiP3f@ToMr64ma;
z90i~ZIk7~O8$U3W&=?KLS1W$$4y;~azv&*9dS9G7q8;M2dmd#8*NS-<cY)GnIx@+f
zLM!aw7QhhbY_UtBmFAm#(R*1;%K!zyr*JC~h9T<O-&`PtWqCvZ2%F4xZ_9c#uaj_X
zQl=M+t`L+}wz66(G$tVm`h;;~d|KP2QftZ%fJ5}E{2${Dh`88UAqC9@S)E8OuUrgM
z#D)<G26h}r(3`5^SG-}9P~kw!lF?8BD66EY^tI35DJDzYR;xP5>*wbzhF<Y_T#kxU
zh(qfT_F#U9nSpem{5&Z3fH%H*>uC=>eCQ)4_%J8-;gNV-84Y*Cwva+i&)n2Pl<Ks#
zkMN<`KW!TE;lLqM#J3mDA}0*mmp^S7+cd}rYNQwY5<~!>ASKtAwT;;m*UVNqJPX^B
znAz^9C!)|m(&>G7KN)KIBT86pa9lUlaBzC>n43g0u0dE!8Bx>7gIr=sU0}3~5S@cD
zwll4Wu<eKGnhTr6MpnbgP%`<<Ued{@7)~`(a^tLr^{c&C1PT^MEuONV+9;}?r&XyG
z885YO75G!zijn^f;0k#H&)n*^8a0}#$;R@tM`HOQEpFN1KZ!PP<x9)ALJ!~elxG8f
zM2)`Wi<V-oN8x&B36+iF?WK%8Z(`457Z!^sY64gbDqG@#ip7>?0Wb=^@_Q*bCm+m@
zrI7dn`Aj7OIsiC{HRS|Fd+s_(%6uNM1MgXa5DD$vnlRc9qH7`<(D!fgLAEdTa-FS1
zDk9D>kEBK65_|(A`8>KdUOzL=D%rWHRsb(<wasE>G^pljve}{uzc(%QS@pSKsS#`9
zgnVPlzGXdMU1IW=_<UN2t&9SY&ZS@!a@tZ4<n?%9kp?g(htMIWkcG8MpE&INJ&*UR
zJ<!kZ#l*58;LrXZ<~IFfUHVfPL3vea_0|1^62(C^QP~*fQ!%i5Kp{1Z`>$;OpMST<
z?DaqYZfAM7bU9<Dq`$~X##dL*(<lqB>2g~jm4;JN)flDc>pIlJxO?ydJZ#$P5!#Qf
zO9ge?pYFyeSJ!c8<JNzxbe5`~8T#`V1}ECA;lX;}ND5(QnN1s@6(iB%AzB#^chOFz
z*)<b9{{S_r)mZHdz2A8)OXAcxy7sl||HuFJZ@<?%4b2IgyA{(}hJ9MKZ&V(`$W=$&
zhmeBPTl2*?Ow(113ty;50pR>&B}sAccNJ+v$+QgcsCmOC1D5Ru*P42*=4%W2oT4%g
zICFDv7(t@$Z35{wY6sFLrgB;=@}JV4_hll4IB=eGV6bJnw!|#bZB4&db5bTB4XK)f
zIgPTdQgpyfxPKC&@p2Epi#Qpq{8XvOT|l$GK-YLkx5SFJrXOY5!>QS>z+JW`TQ+@_
zU5uK36YOUx1%E22u&v^Y^fQNKjc7T(KN-H+=u-;pe(xFE?|26%@GFv(9?7^Ne*%M?
zf)h}jscb9FiGhTAQNzfiH1bjRbpIe(5Go-3)6g0mHkQfl_K<Ub4|bRbPkBRk81PC}
z6<Y=kb~&MLA}#;a>&(Vo0)3<G$v;_OjW<b4lNMV`1PO3{^%cL`>Rem=r{as!X!V1a
zM|I-Lt@j1(tV=Nsh8pn?^3-pWamMlpS_*i;vFJFx89thZ3BfoyP9)!y#D~?ZL=rBi
zVJUlRSQisA*n$CJ7nVc`TRNZ*TorFvFTw<9WFK-X3Gd{m0s;DX7;PRyD^c4r(E=ze
zGXRpcEdiaXC<uPv(}0!*Z+UI{Y_?z#7(V!LTD!OT%1XdqSxBYp6dEG2NUoY7A$u6o
zVw3$%!xsPX6<gqv;n(TeM%Eat?*4wS4&`l5fO1X_Tq>}6Ivj3xy-hwAnN5Zw{h2>C
zRfoYsrup?1yb_Nby1~thun=TQy_uWuX^Cd^(vIHR3T>n|v_@YYzz$ie1S=M&(~6C@
zx&G-ws~5rY#yTp%Sx)TF{>hA}5_ZNK+_Z3mTnLd5uh$%b5hum!^9$?t#X4w6;?(_+
zmc=lPObD1(S*~f_<&@Qh^QMFb^^Y~ul=ij>IA^tcGPA1gEXY9ch6Sjtjz0Qd{J=ke
zdWy9+kKG7}=eI$+Oz4j)PH5>DYR);Il8RN-4bY9O;oqL%ifdAopgd_q`1(7Y-<rru
ziKF==o~y!6ga0^GM8gm}UTZy(F>p?`5DiR%GYg1_3h#UH!aT{O@0BY{*jM$XGLW!&
z<+}LLR?eBd58vNYGmFk|<3k93%Vziq=GF?+h_~lL5v@JrV&a+o>WEZ;N?M8RWW{>z
z1SwrlYdpGwg<CR^jy$`7)z%&J;xvOYn9Ao?IUk>$)mk4@p6DcN4Vew@`Qx4=F9tOt
z`mboyhAiI0^dI&ij30e)lOUH2GbkX!kx)>R0E=n0GQw%T#NEnUwyF^?nfNvqi~`6w
zGd1#0y?162U<qcP)?49W1zQ;*ju=C{#==&mK)#ZjPyLih%2=kH0!n|JOt?}7%~X9-
z%S2F0f~FJU?<wQXS4~uLLlUabT&QW?pvn7SvL>vo*_t)AU@J%XC<6I4IieDev<-fh
z$81hiC4J#;ClMGX#@XYB91SLw#Ribe6r*>jL-qSzzwxMatMTlJFimT}OjD|wC<xJe
z6H7>NlrJbkuCJ{|Q6fyN@N@)7P&$VL0^e>w@Gi;|5`)|C!(R{96EXoW=eKlo;BeQ3
zh#_0!SMq4{id%V>makyIBYj)SAhq*HMgp0ETH>G&8&HVR)WeVX>y+y44SY?5nTbPC
zE@aj%3Va&oD#;YA)}vc=D53I#dl!sWLiT}c7+-<cBddajKFz!-7a|W=Ys@d+LD_rG
zLS;1pg(C?{lfrZ$XjR@AMj`y!bwiDql1E4{u3{X3DbdL8*V2-(56h{BP+F26U;v_n
zlo&iAdf6+Iu%38><)uv&{R4aXIEaP@%tvfm?y9~5H|oq=wOq12Q;lHipZ8vQ-a3Ej
zI_9xsUHM(h<JN>f4TuNA7;(J?aAm6{{E&J4aN9rydCeCli5Z#j_SCRYd?g+*bckka
zbsrjG=C_GeGn)50W{C(jwHHYx+4|A|CI^QonaqWB6yrm$F{y##BT}Pk5o}6Rk)e;f
z?6H#Kh0F+GgTJ8ORZAgiaUM!Xz8IuMeXypbSg}H`8i}xpNBpa8*l61C7>l^J7U|JK
ziwiOYdv8NVt><N?6nho#*GuDjcBx8Y`Vxjr*QHVS{<BG9)(nFmvI3448&1@c@nhTH
zNZndnf-|hMf5y={eYXh;;vH(#4%3M&NFug#qqv`N&0Xf=(vhBTjzU#KwURtDhJog8
z#OtTmlLW}HB51OXBcOvU5(=w}c;oXIot-w{@H(#%2q~HN`XQ~dbiHS`Bk>hAJXH6s
zZ8@Gx*jUs8Iu9p&x&h7Wy<wC|e<B;2yRUCEd+Y3=u+-;2g4+6Ml$XP>v?~3zjGG)f
z6-q24%iSe^=#+$QU%W<{+gibv(u$+Tafi-+jzlVHl>=$>E5KY+&fPpq#R_Zmwi<(z
zaazW4#Lf40m#L>>(J4K)()qzS)i7a|+8&zp{a!f>lQAV2mk(B)uoQ&kf01az3|>ml
z9-16d1sy*>?kt-+6@i+0V75^WgC6I=LjjOaQ)%99tWsPzId(~+sA2QQQj0j0&+S>+
zFN$z3_Fk5l3HEq68EKPqh!~jB0a1NW*-oPhM}U#((ERUgUi|}t?s7m-y3&T^nq-?a
zSYlTVn*^!tl8+_X!9(Kj1-nr4A3xeO^-tUtu{{9M_#wlvxDPNW#SrX8uyKWxrkYo&
zYNte*EEG>OYF(hqEZ|$xs=fXr3k}XoZAEMN#1_&zf95&4X}{$%<9qa@iCES8vV7L&
zvjeZ9gE&Tp5?6)^nO<GZOaoH*H27ppA8+6<IdC3kH;_rmHb`X?SoViiI%eEh^EMsG
za&=_0s&8#A7VW_o*D_>fwG>-WM095TIstlL2i=P9jLT(Lhx~3@2IXI&>@=AC;dri5
zVj*==Da*Cpbytcf*vjVKX3Y8{tHf8i0xDz}PuS(n*#B*q=yLHZyo@KeN^1xIp&2td
zoqjb1Q<rAkB*Qkg1{af|HSN6(kHl7`7~qb2JIjpf-?xk-Vd2;cpca}#I+-*F_)?;z
z+d^d=fXVAv0^S<Bahh^F<~>)kDJjg-KCO8pv7@$U)3DU?0Q&~NQZw`t4W_dAhAx&x
zHZW9Q{Db<q)jbCX5w-}oVq=a#I2sB4?}H7?H=iMzxx`TqkAN$v0=LOzR(ilw-~;$v
zvw-YhIDrMjww-*yMv5F;mw`6&d)d{CFS73aXuti>i7hmoXOfR!xkZTyj6cW)eOs7@
zO{1D03U|0Nq&IxVdpCH2-^Se3kpA{+FsvG~%j1j4?4QLO2q8v=x61`mHF&9_rA#hB
za!7h`a;TQqRSgy0VUwb6rfpc4O;sR-VqzFlG>vxt_`9rZS8)f%Yv4&~STVeBNIrx;
zhz$=sB)3JSP=A$1_CB1ckQ=LWK5KK9^4tcRwk`~!s$}=zWdrDDEzV>`8tsSbP*=2U
zK}om-{my^mDUEu3o|>YZzzy@^6_0&*16$AxCE(AoaQYdhKB*4jHeCL=Yv>-y5Q&OF
zu+w^5;m@?}42wO2Fj(+Kp15Io-e5`LcI#gvq8iAz21_^ia_=5;qN}`Nm$EvC#i7zd
zEl|%!5Lhm%%m_hVS3EF+Zp%&tL+4~!EK600$fE3H34KG!q-Am9jLZ9{x6?eCn^Hpw
z`DB=lfLW6VrVF7$D0`P!>WFb*2Aql|ahVp60vwvij_rdm{}s<A&ZAl>Gys`Q)PiWK
zv^#rBZj2IIvR6K)$_xV$Ia|o4x>fy}B?}3#s*aVEZO-GzCvS>cxh|%g1oHH1h!ra?
zAo)jwu60w`H_aWwh`UXWpOH!B@sDj$QwsgpKn8eqDc|Of`B2N^sm$6X8a}oegmIR4
zs{^}{DOxIgJRt)vQ(<}qR<K9NTh^mn9wi2vITZ}!LyTQb1Y1DXfgpjlyX@53l~zjL
zmMTU5X$=5;2EubPh`r&q16eNFq@E;PAF5UmzN`x1h-nVJy`T7@dqTf5F9Jns)nLHH
zyjonHOb7sIMx&YeMy`K;G%7u16a|Tjx8(IaMknD@Fv#KqgO;vLS#931uYxGmPIR4p
z8wh(^GZv%T#^pa~lej@^lbsFL=kTaFrkZ&YlvQ)WOodvGyjA{A8I)HEOf5T0_UV=d
zEGiCrDdu;z>aINsG2bPOs_KB$9Ws$wEhy;xB6V4ek0Y&;V!DE6hC^n`*DCOW>lu~v
zr{JgIP6CPVCzH))#UFHM+~mz^-$@}rA9~sAYW5~oPB}KfFEzdv<2>{Xf->Bz*r(w)
zR%r2lw*gDn@f?qf=$Ww6ruDU95#^Q~$w(>Fx3B|zQP@A^_Fv6`8L7~o*@h+y&PJ%>
zY5oGy;jsqv4G<%q+;JmENZ4t=S=Uik&DlR@@!$SACJbbcak9;I3aaR(_-i>*;>7?8
z>-_bX;$heQ5so@VMNir}k?5B#ij_hJ|C!C1l*bFc$=&k7R8(4sp_f-<WehQ$gv?-a
zQ&gJMRP$sIsZfUp2Tu>}X`v-$uK7CSf?RDqr+7%gS>^^_cv^Y9T#1qAM;hTojuiW^
z$=Pd9<6k8$wbsmU7+Mq|P$O>pYOpv6Ajs_$J)EC4DFOxl!_Y~n0a{$qtijZ_fH8cQ
zr#Pd>P!ge*4TPz648~1^%40740G%n;swvur$Ug2|81shz@I(A)BfDI&+BY>{xX&e_
zm*Vrb*GBd)n(dxD;1)J__+h=RHIl4m5k_S*i=Be0>MJ3NsiMM2+S$jXXhVeHS#{Kw
zY$abjpEhNCG-O^pGyXY$N9R)tdMHf52UJ3+IbHn*7z?X*k9yxJtzrzLhmP>IB(N-Q
zQdqRj>UIiD7UNS;jC9WOLY&jh!}Tg6Rxe@=|5QDUtvQ}q0h-t{^RA>16+_4USTBW9
zi*-Gi0Q)1R68RU8YRm4gXA{5VS(P~y<u8gGNOaa^s$6p|qhpEJ=TC0eg2WlereEk5
zK!a?P1;?#4B1Z{UK;1bZpDrJvJ{_MwZ3?wmrW*(3W6U;r9cO2M-y^KmS)d7GWy(EK
zYgnCd^U4P`Q?9POAReBARo9@k7`RuiNl9$OYto)=NLk1L?3Za@9ts|`aUs#ar<hTC
zFDh^zUu8{A#q5l#wmj-zh}V+nsv=afAx6bgU>7!<Zb}NJF~(p)lgc8TW%j1guS6L)
z9(ylloIUNvH?*Cqev&<^X@!wbr72sYL~29Jxk5F1aQ-w6a~QS9*G<)-F27Cg9$}}d
z1e6d_)s9s=<#B4iEf_$S>ya0(0b%g9^0a=yHKPd^TYNX0SrYZ`S;HBY*($M*CHHrw
zoa(-(WHw<d4|2W_3UBwX@<65O^19y>!#qie`*Ry9>27_g_4U#C-6P2azff<td|gn%
zG>yuid`VW7V6mMzC0_$8iF49siSY26y*GWNY}yrVC6^$@bs7hzGr=%`NW}2{mLT>$
zS&RhnK_1xGz-2vjIBx1RYxsmCcPB0IU=8eVcVnTOS@>tb8)0IsIPeN(EEp!$z~8WZ
zWU_?G_W)zuz<%WUZwCdl*~@H!LC4cR=bu;LTy(7OL{{<Zw$#=D?<hoe_XI!D0RcP~
z2Qe4`S^LgpOoa)peqhlgTZ2f;GYHtJyLOauc4_7(87T*HkVJAcmmfJtri}|WRyZ^2
zVu~RppVVSV*8bTqewCb4rSHM17_vI=72{wr<W>nC+lw=RWq9ycpr#l7w{|M`Y^$7D
zKSO+-;LM&amEt`Q9R*I3W`Rgf6*5ot*yJPGD#86(;6!JQA7ecCS0M*kvSH6EXoAJc
zb<|jsfO7LDj7b`?YTL?*!fG@wxN_8%H7vi1FPWd=6d#!cDcA4MFN|_=i=cYmBn!fN
znBu1<v4~~xEmD;6ejRLwhXjY=JMtGJn3uwk*GI>kUu-2v)Yl%^oHxz}#BNr9wxG%8
zycgz(%U-5wKsTAaWSI?W{QCrl?w6=_tXAXERvunaRk3JN7>Yw$;8rh~k|@U~{d4Gn
z*B~un2|#BM6yQZsh0(F>;$X_jC3C49Fc>P7lpacW?CdP7M{q3{vM0HD5Gt+6K7kdZ
zf}lJv4LGV~Z?ilGc)17qGHTTOGlK$6f9g;|eh0F%L3E3Z+f?g>(Sl$EezpaPlmTiT
z3#Wr>`Ch|`ezr~j#ITrXSgz<_veTo6Vk9CA;*qp_AJcv66~sU%QDiHum4Ja#LpAWL
z-oJ{pi3<T{;5>(ZCJO6GU1Z&dY{4~@@*`N%TfMwjlBM>Ddus%1&xC2@?(hDf)VkPK
z-cpShl#$A9YGl~{@Wr686}0)x2HJ+n3p3_a`YG9Da~&3pcI!M$d)@DU=u^)ceJ+yT
zVPcFW5_m%yT(qqOU0%XMlqckC1mj*%1<lK$DV^<2YBfw;CqQU|5R(}|<vL`n(c{oT
zB-r9eA<o@4Q&3)>)UcAL{y<Y<MG^RF^o=Ri>QK$YnhIA?kk?js^0jgkMonSUeaRC+
z2C2@AGemd|!(+9|vKqyQL>!HLL_aLL!2BzL1K;VT=X6>pokqRRbfIf{F0^%P^+V#b
z*sVmaF~;sIYc75_aQNC@`-BB|PmPNEJ^RA072%_rovvAU5z9`w8TMfPb)85)!64U*
zlujdJ^O=9v(X52DDS+Wj79ZpqcaQLUUDRN#<=1D)sUF?UPw`i4c*0cOHtm2+R%LE$
z5A@sE1}HDRU8>uSo1pH(mCL%Yqy<he3~g3cunHuvta4R+Bw1$x7urrZ@T3985XLEc
ziu*GY2*{AH*pdd>oHq5=-`o&y8`t=Upy^>M7xZVDhBv=jEyp=!KBeqM4YuGuHcEcW
zG$Bm(<g%k&*bX+-wh@6X7iHta%j!_V#KJicR8P5;O=?S5r3#srq9rEum{cs8T1G>Z
zw)jLw*XSBWgRG9HhK?=Nf!$_hp$S1@QOk75f0Hk$irTtcD%yzZLV=;Wrk&$V!`Tls
z;_(TG<SEFv{~113=|@MV%f(|bjK(;hj7caYx(``r7TU}ouL$b6W#(-?fbW#bOl^<N
zr2U>XYv+{Z%*#tW;Fy-;)EvV+HBT7B@yg#w*%m4(q&=R4C9A&VjpL+;x0hri51;2_
zOmqgpG9T4MKS|bfa#|+Db6Th)K%z4@<Zl9|)eunXhfJ2Fwi~Z1VU>dVfH!Zfg~qt_
z#lFPJI+A?O$lW&IP0q7`%v#)dks+m$eX2&?Q$EN!FfE(D?inoLaDv@NQT9_eVX>`K
z;!ShL17PfNXHy=4*DYqLRTG?L(A`LMN5E0CFaSG!xjV8pSw~%aNRxf-c*D61<o;5{
zh*zM@9sBv~*V3@*T6LeFLn@Rts?pLnrkR13h*1e*YX3ECL!hBZ*ZXyYW?FG?d*1TF
z>V%(oQfCzC9;tGL!cy#a+suvR?`;Q*aG|_>k@T<0bkd9Y=l3yoXbNw62^kM{l{ETv
z$h4mxL#py^%sR3>w7nu#Y==s(v46+}lpWG%w1&?ofj+~gd*IG!4PdwidwdyTz_LTc
zuQARX2#gvfM0Re|NIUzB7NZy0R5(ci$~q~Z;P1i@RyA?uE7?cp$>X^MQpU1#TQc@&
zIMS?!@{_7scuZzYCD}ie<r0Hc9UiO>hL#gi_Gl$RJ;0-+Ux>dbIu{xKE_g9`;`pLo
zxe57x#e;V+`Qp>EC+p^qmw4XUg{d$?NLFoG{CMS@{vcTzLUkifz0>07@uQt$QLZWg
zA6jox+&{bZ#!N9FUs+US>YEiM=AgffePeEN;d+q<{*~SK!W;vV)0gR^^a*9?>tv7H
z=_-p_&@hoJV@V*45u?*hEK<dO`?Tnx3-(h^J=*|JfX1W-NG)xOJ<@hfmdfmYl{BQ_
z<Of=ixn>+?6H^DK+Djx~Y3{`CTy^a%+QV0=O=J04fBG^ARE$y};BK#2`wel0C&8*V
zz7BroFCvT#5qOkg>;Vr73JOm}!Z0H*NiYz)P0bAg_ISvAX{s`^$=?(?zJv0LGf*p<
z9c4ur`PTD4j5eqoR-?SDJ`4FMrJ$~s*@ktrl7_EU7*n5E7Gf%$agF1*uGqQLZ%QVG
z%QV*g%b#sHCelBml7fi#DOWG3@Xd3oIIKzu4-ozrI&SOQIVMJI($H~W_!ox}<K!=1
z(<K#4dH3FA!0`zP6B!+$u8O_glVY8ud70@n*i%YNftwE8A`aMZalMLv6w!n%7!2Et
zMiM}&C~M8DvwBX9mb0;XKulMw7j991qb!s}Q4IB+*fqb7X#K0f_Q^c16ROnWFEeuv
zWF*g)x+U98<9gcMJn-@~ezN^sd0{#;#cRLc1d)f$AX#ARq4|QY^l<Nu<!*5<hyfk*
z9aBR|DwVisbN*3FG$r{@xxOihGvENLkq=);jj{lA#S_r_!Qe6+QOSieS6E4dYm2*2
zpXToC>KD-+LbECqKGP67dmsnZwETbbpWh~k9H2>{EEezc?EKGf6qFbYRz@{-m#jmr
zh9UA;xn4@iys3G^U6SJb6a38eW_TM-*-M=n9DYbAdXnQeTlS>isQDg)SN`zqHt1G!
zCE->EvLSMRWyCkK-PP=pT$+>wS8=>oNgZO8;oKZxb0?9ywridzPW6o`MVaQlCvKlP
zJsg&CXkJn=)ZT7~TbC+R=$kf6&yF{;-x^4D_rDT0v?iHH(ECd<0A^Wf1T0TA+-u7h
zI|N$wkk+t=toPg2V`)>9)RUY}oNG#Y4_yaM<b|NQ5g>l@)tJa=X#ZZhYDg(l$RcA_
z_+i{&PnL3iU$V78LG6FB9K8)QhA2AtN1S+QXie3`kY|z27pMeIebU<DA}W$~wJtJ|
z<w5{$;-hHi>nJN@q(L^{jMErEDLu-6pjbwx;Pm4~A=&pmB8(e|0pylY-hA;oTaHtf
zaH-U<0~OkDrLbsNQBVpTJMn??ZCmk5gNa5k%TJiTNYsB%ip*FdSj+_j{aN6~i2d*v
zfws-K|IRl3p;$CrHqDhgsr#=~@$RY#RI1Gi%T-F@24*Z~<|FL{+)WSr{xbRl6>rkr
z6lXE0Bc9K|m$~rUA{iQ$1a34nmA(JN5r{^hY`35dm{Jt~kU*f}5uh?B2QChY+o^oN
z1o*d2ccIxsoKp|_dkB=%!kg(T=1bB(po-Qjx;!mtlg=r*W%xU*?idS?Hf-8|i)osB
zL5}H3q#O?!r<*Rivv-!(VOrtMG55``dL2u(bEa61{L;&9@Xo1^7>2%9@Y|(DiL2zd
zT8&;SfR&PE>Nu$Ja6^k!Xt5EHe~(@LWMLC3VTsxUpDAI=pgo%`YT-NSUl?h+1WRil
zAzY*89il9GI+HKp&K&K%Z@S^AZ>h?)P<f`Q`Dw0fHNJ0|{`%7!M4&JtzvF27mfd9r
z5VtBurV_!I+)%4Gg%w1@#7M38K8v-G4GY2`Wat@2JT2qQ3Tsq6bCJJpMW}{QTgC}O
zzz^Zqn7UDc5_jyw5j;v>jS{eG`07u({{yT5xf|uWPguhjq}$oJqO1_5#3tP@3kzpn
zo{Yb7T@xLFL)-EX>ixDwPl#^*Mg&>dGTksm1lJ#vn&Zx@NU5dS%s;_Bzvg$H6|D!r
z=@zk+{Ee)8>Msg<oMfTx6qUmZW<E77H--fo)F9zA*$7Wf#GXDB@L+4m+!>NCQ~-fr
zE+Z7*OCqGUok>@%2z5*AOQubMhqj=XY9zc$b%&eJw3L7~!u%<}DDattEh_@0@_=6R
zb$-fe{>n%hr>=#3IO!R=30eSMd>}1w3nZJeCh0BX+VbmalvNOtiQ%5N3dY|tLCqt)
z*);E?JV8(dL>0KhniGgZ)<MwyA&)&sUhy}5YTIKkZq%{|iYtIimu(Yz;`+#ifoJ%j
zrR;5S4J~IiBPcMEj{79Bq^uC*9_buKb}C^K$);6jTq~$9f~6sao0shLmQk5uMIK!^
zKUP0#)#u#mVMKdIep!I+pZ%vege8;y^CxqXf@W!P)Nj=v<v(eWns0nhV-gTBNft1H
z&khg%cLn3Dtdo^J;qgmL6ybFOj7dHlW$>6?DkR>Vb!G(M7LS}ikHO_|`hs3%XGlE(
z@!yKA5gs;u_s<be4ac;#qPbRb`A4Gp*vJhQ?#^!^v8VbtaCayIwxrHDT+WZ?Eoj}Z
z%kZx$GQ<aOP?W;{^KAhos(xBf8}`OA)#tp<H^SLpI-i<0@T3Y4-rioQSD%k7uGutB
zO%)%~UZoaKUioKnQbx}U3_oGf*7!zA1G`_egvSc&^>MjY?bDK_RGxd{-OLiUou0PC
zQ59FSJ?iO-MYpRsy-tzhs+5w+s$eR1QD-p;j9Vya28O52-s^Ps3YPMZ<PpV(i$+{r
zW?A`0m{?@JVRhWppmL5pGG%S;d|;KV3nQZZ<$n}i=KTN@h6D6fAbxGBdt!E}*36iG
zdTtG{MQP$n?EydC%s;nWnsFMu2X%-wq!UNx%y?5wS(FInJC#S@+GM2K9;P3BaZT5<
zMlM1Uj`%ph6g&IeQg0aYVg<b*!<5U?_#yLphzp8~`YjGu^F0X~PB{;C!cO@_D>QG-
zgZo2^Ze89qa3Ik-hIUjXc0K4$&ZCyOFIPdh_S%zGc#lx-ctmOlSRF>J2Q|Q?^*L{r
z-7dlE^(;cbL69%KlC8&Q4RR3ZvKoMN96C34>OjBEDui$XI1RR}mxlG#XhrAs&v>u0
z47%KU&=)|pLa=G6?zpj%km;dConwhR4gz|mYP6km#(SMDCNjSj2e&E0#wg5}^lDaY
zP$C>URWWsKc}}cT*4CL?d<~KBBpPrH7-D8XlFBmPval^>u*SofEJVk-IXNXKQ(60%
z@H?X!TjAQRNcE?QZO40b@*_VDxX$3H!{p5NR)miTbE>hk3_;VjLs}w}2$*`9zh;wH
zSksAXDe3`NO*B!P&r07PksH7{DTx=RK;oOL&Y8E}JHqY9pw!9K*;8*1$kn+5c^Sp3
zv&8^OzTL>51h9fo<V}4t9I+@%LbO;KDrBo-w`qYL69*(%;3EFQ&~Za5TfWF!=%t!B
zEMX?K4JfIkavzD*+-EmeF(1vXN_7Pti_cgD-80xG*S*dYd#p;{pW?erUZ~J1vwdR}
z)5Jsf#zLYGf8bVS!@n9bcC^T7jk$tY$I#RComD@=5+&O1@ieij6jwqiD<@6`vkYYi
z5zU()PDJJohB5PqXRrx9z{KihaY44QmNB!++-2}u*-<EEw1vlQwccLGtY&s9k1Cyj
zt!)qj5ziCMZJ&5lRB&5D!7@wH_J#>CEW1=MhfYt#gG9ig66`EYoIYSsn!~DkN&nhC
zQ`-=N)b@}?N!F{Wo&yDPEJ&0;cH$aJ3t(d}6N6Q;CmL>kpyI$332qeaW4?rM_WcIm
zq+oK`1C;^W(8;uImEg4b1sCxOs)b2D$SC>A31Th#q4bgybI;zIGOlu+`DaVxrp++c
zOs-}+>KUc!DEP4FlcS)#BE6>&^LXCCYc&f8bfKSG{D)-!`$<D1WA@BCv6>U}7LP~T
zgf|ycbApZlVx1$<?z*5H3O`7s#*uH7VemwnNORK`Nn_UItI*BzSa#$`LojrNo;|i2
zl@7U&taHReu|3W=yjd<T<drMQ60)m_dS)d)4|^c-ax%T*aRb@TzcFS&1m>uU)qsLx
z{*YaaSTQ5kCz#(+p3|#lweCK;;ij*mymP2}-8aEh$1AC1PuXy`q|Paclx2YtNVE!`
z_gCx2Bk=_k(KpFQ<kMTxQ`Hhfa9dGj8jRayb56Gt2)%LYRvN~tlO!1!PMX5tbBI2l
z+REp6XE0{0fDcdxo(hM|#a21}tJw^hUx*K-|Dx?nEtQnNg^Wr=K$+1${WNqQBx#kK
zQ!p@H3s6nJ^*8+-Hnm{#`VFPU(1>~-H!CcAmk;lbGJs3elHD*3p^oghpa-C}z!a)W
zDyRH8^u%>&;(X)0E@A%IsG?BL5gZBWFs<-j3##+<Ldh=1nLA`m6Ax<OzgqEA6|C0K
zX$pP-(6yKjx>Jy|_8@b_Vqisv1;}q|z=-$y1-E1eq9;&ta&UX^pUx95YqLVJAI2hc
z%nzu8Maa?O4T`*22&dx2xMFS4!+D&M?++Hgc-Csa(vSEeUIBA`BcCClBBaowz<-fj
zy7bg=294&SmgV@F?J_6l7-foi5Ft=#i(Gr#SynvA+rLuqkD^SLmFs&-WTZ-BlP!o&
z6d^5ljaG#P!OsJfSxFVydAM=5o~Bp&JR@!|ht*^32{cjY1?6L8aHT_19zLk7Q^o0l
zU(BZFJMO7;)ax8Gy4Sf4H_9ZF%C<xOvIBfo06ONPYXC+oBsGnKFPl{(k;x?2Pz$h$
zYl}IX>=VvURS&)H6W{zY92jV_O&{`$Rq9;lZ9213j4`fDg0ortfMK>S2h}n!v2n3M
zor8-xf%44eF|1Xr$=ilck$idOo0?U}bMCvx(N6=L5?J7NQIBL67Im7E(Zy8ua8#L-
zrM09nj!iZWc>BAdO!-aj={N7w!Lrj4%fLtKunzAfI^Ss8Uz??_^SV+pc=W~fj}c9|
zQasv{XW>=t2iLj8naY$4XGJ7yS19_(Gh<mL8X5{i@lO^KV&Mj}MR6NFBfLx%21W9U
zesN;0*Oie*%-Gkx({cw`zxw)hr8q52^|d7@C$oYIT}~}oIm}x4vy~{K%^&<_e)EtG
zCD7O+1$Lm;Kfo^u*xee|^ZmX^Y35$8w{#ECcpkTLl#9P#)fB|WRTPV5eZx!&Xt2qm
zdAcDDv1=&#!I}<VN4=UIwYr$KemTMfu16wXTEdz}>iA%JmEyv>n=EGi9OwJ@_Fr))
zSSPy(rJ+J~_F$AdNHbr{EUMt8;o8}tO7(I%K^qfXKXsI(8S0X=IB7|z#Fwzj1Gao0
zC-f^d-#%jD=-0_O7VcEZw`8x2p=SlJNc!*)1fqd(tA1!sYbSH2xp!%by4PNo@MXa*
zRro6@$93ONFU4)yrGB(rU$X8MXBf&xktk2Td5*ikIN7iDw0?CaRf4;B76Beehb)iS
z3+MUKKKjchD{*TW9qY+eD&6F6F+O8yvtXG^v?1E*<5I96{=cAKmcS5nMyHv-SM!F|
zQ?0NlhB{&X)O^tzcq$#9v=8fL$ZSqKUFon`hO2lr?(xTfUFM80N+nM6Xf(&l$w}kd
z47Q)jS)h$^vnWu}O&Sp$%P_1Lck^$A2qs-ZopUy%2=FIbvKp4o-1eYUnxiu`QuS^2
zd<&Mc-v%U%5~#xVgxBwG5-T0jSOL)FjZjnhpZ~*u;D`R-p3ZHhVIYd44+TL8^iTAw
z{E`~66j7tCctHe9z4gHhqA$g-87KJ**WPEwmvS;q(&=PoaxVLv&8PH7b6xgF9o|Cy
zi-(TA27yC%x)5S%sD;k|1Ni|exTKCv7)2$9Gv+Xv)SL!&51Ix9yO1YbhZYn&8Z8V3
zw?*%DVf3*TU{(Jh3C~s*C}M$+2wE1PJ!d-ETysXCq%mco5%_l%0;jvhqr_SUFX$nK
z+rZpYrc%@cy_u?S_EO@X2pgWg0y!Q5LdD>8`B}T9T3qu{Likt7<#EOapYcx0Kdr$>
zczxHbFRnuHIwd7cE&O6FJ=ds(WT=AZEQfXXD2H^?@g01sauI2)NYN6*14ijfdcw8Q
zkUu54QeufZR_S`9Jd={5TCgsK3ofa+lx{DE?~fr!z-TN6zlxa@PRI%R!DHd3q0trm
zt?_Fur1?JIdZ%R=RcbdKYL0<x!whuC6r1JD$WwG4ZH=6SyNBSdl~1naOpU-)5f3JV
z)iA(Hi#*qy@9zS0LT<|Xcg8(2^p-Y}M#bTGFCU>RM_~D@JWY9lodkg_yV-gFwPbE{
zZN7Soz)s%+ePQCkJ0tB;eGse_OK3*RE_!M(Kq+byz`+Ax&CERY35CUx7phLKBIyIy
zQLIe8u}oc<PbqAIs=6IQq_qd=f?w@7QjwL+Cc9-zo<O+)n-H+eO8M>XG93fhu-3DP
wTo3}U5<C@}ylTOMvs%jg2_i@HUSS@GkS3q$RaSj|-?)dAj8a)=oxiv92ZW0^<^TWy

literal 0
HcmV?d00001

diff --git a/mmpretrain/models/multimodal/ram/data/ram_tag_list_threshold.pickle b/mmpretrain/models/multimodal/ram/data/ram_tag_list_threshold.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..2be681d6f0afc07c75c8e972b3a847f9acdb86dd
GIT binary patch
literal 41289
zcmd6wEtDl!6@?L~=xDIPV9;S?$ATVY(7^_SL1RFGV@>qvXt0AG1B;nV<~PFv6Lk8$
z<bB<1zBfDj+^X*6slCqGd!JKPw{G34dawI;Km60r|N4XD=U0Dx@8Ztq4-TJytb&(+
zK79Uiap&2mzxd+!Kl=3I&Wo?jqaiA)69JG4z+@tTAm1w$YQ5I(btJSAoP+Ag<$<_7
z8u1WVzE;aV8zXLm2;kYTkDqfJ2*+)Nl*vS1z5mg@hwr?9aVN-~as<NwMq?xRgw&|9
z5t*VvwU)x!AQ%R+RbTw<_=!h37?QFxshb0P6%9Tw4(Gr}pPW16*shx!y>9IF$B!@W
zzx&&3{g^{z4Oo-E`0{UmxclD!hr=fH6WU<+22OA$wNZ{w1E>XPRB)_vgx!Mb)#V|%
zJTBM91}T7js?_C}`}pw(=bVEf&K|U+0`LYxKCHOr7kcCYgv|*l8@}`&I~<$4Ke@IK
zzc@@S_a0t5iT-uG<o@Ow$Ic<*4#PeSDf@(?A+_eSrg9sNPvC@<YlWR@a)^wI1|Roh
zXCel6es#F)TwZ(5+&7&W>JJW|v-eiiatTL9WXp-*^YS=xyuP*-q#y&l4@nsWACnyk
zwQkOtHkgQrYy{1fs42nfD|E>I0W{96dm%qPtZ2Ucdp9<?*&h|9mO=0#m4Gfc-ar@i
zsN+gaEvtyUI6nFK<+VLVssaBi9+)&f<}}TRZ4Y<Zu0@2R08#)R77=McFbpYgV5k**
zg377jLkbY&frH8%B7hOudyJC^Lj$WqQBnYA5+WM`9d7iwIk`J@<V<y~pU-d}96o0k
z9wTzq&#6{JHs|%y=HLTprWDA4wY~BBw8_Xf8sg)vz?rLzPe$YbR96vQ8GXiC6>c!u
z2>@y};Q8?${cd;BbYqlob@mUO0C4u+XjIg~C#XV-1zzAd6^@R{MBM6=)5A|VyMyP)
zyQcf+4)@cy-|V|p8e`8B31sft;4}c|5x^N0Lli)24Y-7T;INc6hsfwY#iNB<29y9a
z{XoW$2*4>v)=n!*eE&hXA6#obou|iVjX%D)GdhB6ZJ^rglwk(I`MU)kt9$ih=di>w
znKqicKOY@OFdP9I10Z*VQ~+x#40#JoqyZEKjP?(zP;10#$e~t~q67dH5dbLzug4gW
z?8C;#&Zq$9yczj0e^Vx$Va?MWH{!^f9i3ZAp#oUdq&9L~)R9oa<h6)Pgh)uay6@7=
z8NCfa9xko!5LEy{#c34eYpGBaLvtK~JOYhR!}0!URH4-{OKfxwMM;GcA?53ycMBx~
za0y2&Hz!mNyVuUa2ePY^8iA%3hI<voFo2D-jc-1lo^BB+>UH;RZlJ5vSO6TawN@Yb
zeU84YM0H94NNogu$Gg!Gd1%!sA^?00-~a;0sSvRt5diYFVPhW**8t-Xxl8yl7-WEb
zNCn_pCWnW(LGF|z)EYhG^o)d0C;_Jdx8So@0+j*VwKfuv<4gp=Iw5eb$fqyS&<_C4
zUf`;qw@<><f(!^F>c#21pU8Iyw1G1b19ONNASV?LfyvFZgL43W+~``!Lkdm<>@!+A
zx&)8USx^B?9!0`IX?(n;jS9|a?nN~M8#^4;f=p2WslC9>In!Y%5mLwj4cQz=FdTu#
z#~dPn-HY0gS^#s1m_q~r84yx%0tj+aYg0FJsyjj|fJPN2y^x}4;560o0a)b-<OVeI
z-RE9bGE@(@IdIlCXC$XYP&KLMhSxrh&`$s(6=VR;ywe~8Fqu>Us92B(@>+@#mNUjk
zaWV##$wV4}bJ&?9AILOMWCUmo6%hcbHLy0B`?h_re%K1{I+nz)WpeY@EYu=pKpOya
zh|sFvM#32Z3>(#mjA5gKV}K99H%F-807$KkOnd+s5&@86{Csx$t+qKt4DfMk6bU{d
zH7a2;+fIoqps#!)hePD%KvN42XC)CDt0Ko`)Ab)VAqUuplmY!X6(Xp@63cQo#=$jt
zEka|+Dau0(QUEwa0MFk!o^t2DjQ|HZz$qdIR;_WY3XyODYWlIWRpH?JFxiku@PPwx
z8>B`6AHX;A!#8D~9eyEs|J`dp=ndYZ_M2~B_<9wpzuEq|f$U3JqiQ&zvAt4Wl8>DC
z+KM$F#s+dCYy~L*DF7*eQ$#ibN&svaQYMcgI0Hs<$jAYt0HahOV;BH(0Axo<-2!N;
z!vWBk=N3{Xhsy%zg;f|DnC!<+x9}Z$nx{iIMhajRsqYqSY#uqF*_;vi77Rl_O$rX+
z=6VjNRZ|qss8v)OK~t2c7bX)Kt(rFWye6mBAiue-aP{zn)nrF*VfqTUbpJX1=#Ifp
zhk&r3NinpD1Q47<3PWEj8x`^ftQiKCm)~n&3~!o;6Z(PE0LD)+Bn7}ZJdZaS6)AvI
zL;~RavQJQjo`Z^%4>u_T8uLs-$||<YsSpXzGlD9lt~w0gtD5|_|H|;e;gkP(0Y8Qh
z9g?zh6KQ5P`%u&%kVDEoO~i&_WSZQR7;#*}k&)quyn8JhI?{A#m8XJ{4_9+kE<q}Q
zwMhoUwW7!kb~mI%a31Z1HJ_jgDVhh|9GMdA4cGGfWK<@TGVsF(ikkerjf{F8?H@MT
zDn}p(fC}K|9SMd$f`RN5Z8#MoE{YETxg$-AHyV%|u>0L;XujzOPIznQ5<~#_jKF9h
zg<ALkAOo7lb|*K64Tt9c`QY%mTnSsX`_Q5&fT93W2IhpwqqqOTM60Mp3P7p>YpTY1
zjpN@+(jfzT%^4Mj0H=uT4k!xPt3>ln&!$#r%&Cp{gEsU4XnKZYLs9{Rp|#P_a-P>Y
zMTa2;2N0TvGXkUPjdNqTarBOE@v44KUlR)>+&=u(Hc|ku$C_OShy8;gDS$_UAFBpA
zJ~szk{mmDt-|{zQMy*lJ03ZL>j#LBuXWtrC;Lxf$Ybks<MS@SWfMS$|GlQbkYIYRP
z$PL1G2dQOMNSQo}G|tU-QPhSZLJAOkNEuiThwBv)Kv2OM0So~n`RB!x^^LDGX8k-k
ze6ByU58D+|O;N@ez^aWUMyH;yS5yZyk98q?bDD_dO~linRl5%)mss0CtL)5+FH!)@
ziNG;{(_BxHqjH4QUVsvS-JOGBm_@CkLx4F%8V+QDRgORo02R{?>s(UEi2z7#1#f@d
zN-Y4jnzw&eAzuTbXk!@q2`QYztQF+VN{<o-4E|8UswRRVpiwoP#<2O`eN$`1X&T$>
z)f2S<ZZ)J#W|nZuKFlO0I})5tc7*C4>ZGjN7`ZuTH%6^yZ3EdpMAmk=ogJY>04y)f
z35N6>I2bk@$PK`dHX2o!v616U<mSMe4{Xl;Hptbv1UR)<B<zHZ*W6Kuv;nX@q)cui
zqY<@Nb^AEdG!JK^93Oyx`AjO*A_YJd0AIfIy_Hp;o&Fi?s02Q!sO1z91LUMe;|G-i
zCmJPiCIYa$iHzid98$E=a3HS%mvAIFH{^yBN^ng1ABZEv5t%mDR3rI+Pyc*}TE4U~
zdT|DL9YxB(J|p=V!=)SEu$EvaxK&q&98!UUs^LHm03QQXKDeX|{NBtQN2u<UBO|iq
zqexTB97md>r~pB=aas82-r=8Qe{yaK+EfpXZIt6|pKw<T`SFo7{evCI0kp9OMn2)&
zb5zF=Kq^c-CKGAiDnh1~fjOi3*>Xzo)^+1h$B@f0kr8mKj*JWg+0Rk}`Rl)a{K3T?
zuW3=)&=JUhuz_nSoEyNv20S@TTK68Fo3(5hrVEqzig>pW0Z_{kQX^oW=KbKt5|(qA
zc$4MEhycRMiV{o(L;&~%@Fw|NzA3|Ypfhka-tm$Os^;Vi8*S(2fV;6Ka;Dbl&#gPP
z7I7QF2eLs>-9n0fsMSQ8;g4Yh!svBBjUk-9<V+7E=dfM)0K8;C4uE_HoW2edPOH84
zhN}nXkg`t`85u%e8?<51GbGHRs5ggaflf6MCfG0=hw9BbH^`d~;m|6PTL3g~;OY&h
zS%BknRpRXrY~Y{@CE$3B;bVXb0B7Stu_4X?{$c_VfaTsUH~{1$uu<OB@_#>Z(XfF{
z2{`uQTy{}Zfba$t&MknZRoZBt;b9m6h6ds86Ee<308Pr5b<2Z~$wUko5>7eN6h&pS
zBai`b4B!O7D~P}esf}}8aBfm;av;O;*-Z+$Bcw*4nF^@o(d)Ak@~Dk9GPMl64=E8w
z0zQxdqyVH0IQ2|+#PSdc@{n3H3@TE-zn~44#E?h8gAI8|!2z(;2hbs$09cPTDu!0m
zVPhB?+nAy{yJcjEyisv7q5>f2$DiSg8<78f_jrGM?lG#JQLUvKIh+7Ob*oG!0@&;E
RFoV`@G?8z<{PN*XzXGC--R1xQ

literal 0
HcmV?d00001

diff --git a/mmpretrain/models/multimodal/ram/gradio_demo.py b/mmpretrain/models/multimodal/ram/gradio_demo.py
new file mode 100644
index 00000000..206e6b40
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/gradio_demo.py
@@ -0,0 +1,109 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import argparse
+
+import gradio as gr
+import torch
+
+from mmpretrain.registry import MODELS, TRANSFORMS
+from .config.ram_swin_large_14m import get_ram_cfg, test_transforms_cfg
+from .run.inference import inference
+
+parser = argparse.ArgumentParser(
+    description='RAM(Recognize Anything Model) demo')
+parser.add_argument(
+    'ram_ckpt', type=str, help='pretrained file for ram (absolute path)')
+parser.add_argument(
+    'clip_ckpt',
+    type=str,
+    help='clip vit-base-p16 pretrained file (absolute path)')
+args = parser.parse_args()
+
+if torch.cuda.is_available():
+    devices = [
+        torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())
+    ]
+elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
+    devices = [torch.device('mps')]
+else:
+    devices = [torch.device('cpu')]
+
+
+def get_free_device():
+    if hasattr(torch.cuda, 'mem_get_info'):
+        free = [torch.cuda.mem_get_info(gpu)[0] for gpu in devices]
+        select = max(zip(free, range(len(free))))[1]
+    else:
+        import random
+        select = random.randint(0, len(devices) - 1)
+    return devices[select]
+
+
+device = get_free_device()
+
+
+def ram_inference(image, tag_list, mode, threshold):
+    test_transforms = TRANSFORMS.get('Compose')(transforms=test_transforms_cfg)
+    model = MODELS.build(get_ram_cfg(mode=mode))
+    model.load_state_dict(torch.load(args.ram_ckpt))
+    model.device = device
+
+    if mode == 'openset':
+        categories = tag_list
+        if categories != '':
+            categories = categories.strip().split()
+        else:
+            categories = None
+        model.set_openset(
+            categories=categories,
+            clip_ckpt=args.clip_ckpt,
+            threshold=threshold)
+
+    sample = dict(img=image)
+    result = inference(sample, model, test_transforms, mode=mode)
+    tag, tag_chinese, logits =  \
+        result.get('tag_output')[0][0], result.get('tag_output')[1][0],\
+        result.get('logits_output')[0]
+
+    def wrap(tags, logits):
+        if tags is None:
+            return 'Openset mode has no tag_en'
+        tag_lst = tags.split('|')
+        rt_lst = []
+        for i, tag in enumerate(tag_lst):
+            tag = tag.strip()
+            rt_lst.append(tag + f': {logits[i]:.2f}')
+        return ' | '.join(rt_lst)
+
+    return [wrap(tag, logits), wrap(tag_chinese, logits)]
+
+
+def build_gradio():
+    inputs = [
+        gr.components.Image(label='image'),
+        gr.components.Textbox(
+            lines=2,
+            label='tag_list',
+            placeholder=
+            'please input the categories split by keyboard "blank": ',
+            value=''),
+        gr.components.Radio(['normal', 'openset'],
+                            label='mode',
+                            value='normal'),
+        gr.components.Slider(
+            minimum=0, maximum=1, value=0.68, step=0.01, label='threshold')
+    ]
+    return gr.Interface(
+        fn=ram_inference,
+        inputs=inputs,
+        outputs=[
+            gr.components.Textbox(),
+            gr.components.Textbox(info="it's translated from the english tags")
+        ])
+
+
+def main():
+    build_gradio().launch()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/mmpretrain/models/multimodal/ram/openset_utils.py b/mmpretrain/models/multimodal/ram/openset_utils.py
new file mode 100644
index 00000000..5fa0f52e
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/openset_utils.py
@@ -0,0 +1,212 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+
+from mmpretrain.registry import MODELS
+
+
+def article(name):
+    return 'an' if name[0] in 'aeiou' else 'a'
+
+
+def processed_name(name, rm_dot=False):
+    # _ for lvis
+    # / for obj365
+    res = name.replace('_', ' ').replace('/', ' or ').lower()
+    if rm_dot:
+        res = res.rstrip('.')
+    return res
+
+
+single_template = ['a photo of a {}.']
+
+multiple_templates = [
+    'There is {article} {} in the scene.',
+    'There is the {} in the scene.',
+    'a photo of {article} {} in the scene.',
+    'a photo of the {} in the scene.',
+    'a photo of one {} in the scene.',
+    'itap of {article} {}.',
+    'itap of my {}.',  # itap: I took a picture of
+    'itap of the {}.',
+    'a photo of {article} {}.',
+    'a photo of my {}.',
+    'a photo of the {}.',
+    'a photo of one {}.',
+    'a photo of many {}.',
+    'a good photo of {article} {}.',
+    'a good photo of the {}.',
+    'a bad photo of {article} {}.',
+    'a bad photo of the {}.',
+    'a photo of a nice {}.',
+    'a photo of the nice {}.',
+    'a photo of a cool {}.',
+    'a photo of the cool {}.',
+    'a photo of a weird {}.',
+    'a photo of the weird {}.',
+    'a photo of a small {}.',
+    'a photo of the small {}.',
+    'a photo of a large {}.',
+    'a photo of the large {}.',
+    'a photo of a clean {}.',
+    'a photo of the clean {}.',
+    'a photo of a dirty {}.',
+    'a photo of the dirty {}.',
+    'a bright photo of {article} {}.',
+    'a bright photo of the {}.',
+    'a dark photo of {article} {}.',
+    'a dark photo of the {}.',
+    'a photo of a hard to see {}.',
+    'a photo of the hard to see {}.',
+    'a low resolution photo of {article} {}.',
+    'a low resolution photo of the {}.',
+    'a cropped photo of {article} {}.',
+    'a cropped photo of the {}.',
+    'a close-up photo of {article} {}.',
+    'a close-up photo of the {}.',
+    'a jpeg corrupted photo of {article} {}.',
+    'a jpeg corrupted photo of the {}.',
+    'a blurry photo of {article} {}.',
+    'a blurry photo of the {}.',
+    'a pixelated photo of {article} {}.',
+    'a pixelated photo of the {}.',
+    'a black and white photo of the {}.',
+    'a black and white photo of {article} {}.',
+    'a plastic {}.',
+    'the plastic {}.',
+    'a toy {}.',
+    'the toy {}.',
+    'a plushie {}.',
+    'the plushie {}.',
+    'a cartoon {}.',
+    'the cartoon {}.',
+    'an embroidered {}.',
+    'the embroidered {}.',
+    'a painting of the {}.',
+    'a painting of a {}.',
+]
+
+openimages_rare_unseen = [
+    'Aerial photography', 'Aircraft engine', 'Ale', 'Aloe', 'Amphibian',
+    'Angling', 'Anole', 'Antique car', 'Arcade game', 'Arthropod',
+    'Assault rifle', 'Athletic shoe', 'Auto racing', 'Backlighting',
+    'Bagpipes', 'Ball game', 'Barbecue chicken', 'Barechested', 'Barquentine',
+    'Beef tenderloin', 'Billiard room', 'Billiards', 'Bird of prey',
+    'Black swan', 'Black-and-white', 'Blond', 'Boating', 'Bonbon',
+    'Bottled water', 'Bouldering', 'Bovine', 'Bratwurst', 'Breadboard',
+    'Briefs', 'Brisket', 'Brochette', 'Calabaza', 'Camera operator', 'Canola',
+    'Childbirth', 'Chordophone', 'Church bell', 'Classical sculpture',
+    'Close-up', 'Cobblestone', 'Coca-cola', 'Combat sport', 'Comics',
+    'Compact car', 'Computer speaker', 'Cookies and crackers',
+    'Coral reef fish', 'Corn on the cob', 'Cosmetics', 'Crocodilia',
+    'Digital camera', 'Dishware', 'Divemaster', 'Dobermann', 'Dog walking',
+    'Domestic rabbit', 'Domestic short-haired cat', 'Double-decker bus',
+    'Drums', 'Electric guitar', 'Electric piano', 'Electronic instrument',
+    'Equestrianism', 'Equitation', 'Erinaceidae', 'Extreme sport', 'Falafel',
+    'Figure skating', 'Filling station', 'Fire apparatus', 'Firearm',
+    'Flatbread', 'Floristry', 'Forklift truck', 'Freight transport',
+    'Fried food', 'Fried noodles', 'Frigate', 'Frozen yogurt', 'Frying',
+    'Full moon', 'Galleon', 'Glacial landform', 'Gliding', 'Go-kart', 'Goats',
+    'Grappling', 'Great white shark', 'Gumbo', 'Gun turret', 'Hair coloring',
+    'Halter', 'Headphones', 'Heavy cruiser', 'Herding', 'High-speed rail',
+    'Holding hands', 'Horse and buggy', 'Horse racing', 'Hound',
+    'Hunting knife', 'Hurdling', 'Inflatable', 'Jackfruit', 'Jeans', 'Jiaozi',
+    'Junk food', 'Khinkali', 'Kitesurfing', 'Lawn game', 'Leaf vegetable',
+    'Lechon', 'Lifebuoy', 'Locust', 'Lumpia', 'Luxury vehicle', 'Machine tool',
+    'Medical imaging', 'Melee weapon', 'Microcontroller', 'Middle ages',
+    'Military person', 'Military vehicle', 'Milky way', 'Miniature Poodle',
+    'Modern dance', 'Molluscs', 'Monoplane', 'Motorcycling', 'Musical theatre',
+    'Narcissus', 'Nest box', 'Newsagent\'s shop', 'Nile crocodile',
+    'Nordic skiing', 'Nuclear power plant', 'Orator', 'Outdoor shoe',
+    'Parachuting', 'Pasta salad', 'Peafowl', 'Pelmeni', 'Perching bird',
+    'Performance car', 'Personal water craft', 'Pit bull', 'Plant stem',
+    'Pork chop', 'Portrait photography', 'Primate', 'Procyonidae',
+    'Prosciutto', 'Public speaking', 'Racewalking', 'Ramen',
+    'Rear-view mirror', 'Residential area', 'Ribs', 'Rice ball',
+    'Road cycling', 'Roller skating', 'Roman temple', 'Rowing', 'Rural area',
+    'Sailboat racing', 'Scaled reptile', 'Scuba diving', 'Senior citizen',
+    'Shallot', 'Shinto shrine', 'Shooting range', 'Siberian husky', 'Sledding',
+    'Soba', 'Solar energy', 'Sport climbing', 'Sport utility vehicle',
+    'Steamed rice', 'Stemware', 'Sumo', 'Surfing Equipment', 'Team sport',
+    'Touring car', 'Toy block', 'Trampolining', 'Underwater diving',
+    'Vegetarian food', 'Wallaby', 'Water polo', 'Watercolor paint', 'Whiskers',
+    'Wind wave', 'Woodwind instrument', 'Yakitori', 'Zeppelin'
+]
+
+
+def get_clip_model():
+    model = dict(
+        type='CLIPZeroShot',
+        vision_backbone=dict(
+            type='VisionTransformer',
+            arch='base',
+            img_size=224,
+            patch_size=16,
+            drop_rate=0.,
+            layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
+            pre_norm=True,
+        ),
+        projection=dict(
+            type='CLIPProjection', in_channels=768, out_channels=512),
+        text_backbone=dict(
+            type='CLIPTransformer',
+            width=512,
+            layers=12,
+            heads=8,
+            attn_mask=True,
+        ),
+        tokenizer=dict(
+            type='AutoTokenizer',
+            name_or_path='openai/clip-vit-base-patch16',
+            use_fast=False),
+        vocab_size=49408,
+        transformer_width=512,
+        proj_dim=512,
+        context_length=77,
+        data_preprocessor=dict(
+            type='MultiModalDataPreprocessor',
+            mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255],
+            std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255],
+            to_rgb=False,
+        ),
+    )
+    return MODELS.build(model)
+
+
+def build_openset_label_embedding(categories=None, clip_ckpt_path=''):
+    if categories is None:
+        print('Categories is None, so using rare_unseen categories')
+        categories = openimages_rare_unseen
+    model = get_clip_model()
+    model.load_state_dict(torch.load(clip_ckpt_path))
+    templates = multiple_templates
+
+    run_on_gpu = torch.cuda.is_available()
+
+    with torch.no_grad():
+        openset_label_embedding = []
+        for category in categories:
+            texts = [
+                template.format(
+                    processed_name(category, rm_dot=True),
+                    article=article(category)) for template in templates
+            ]
+            texts = [
+                'This is ' + text
+                if text.startswith('a') or text.startswith('the') else text
+                for text in texts
+            ]
+            texts = model.tokenize(texts)  # tokenize
+            if run_on_gpu:
+                texts = texts.cuda()
+                model = model.cuda()
+            text_embeddings = model.extract_text_feat(texts)
+            text_embeddings /= text_embeddings.norm(dim=-1, keepdim=True)
+            text_embedding = text_embeddings.mean(dim=0)
+            text_embedding /= text_embedding.norm()
+            openset_label_embedding.append(text_embedding)
+        openset_label_embedding = torch.stack(openset_label_embedding, dim=1)
+        if run_on_gpu:
+            openset_label_embedding = openset_label_embedding.cuda()
+
+    openset_label_embedding = openset_label_embedding.t()
+    return openset_label_embedding, categories
diff --git a/mmpretrain/models/multimodal/ram/ram.py b/mmpretrain/models/multimodal/ram/ram.py
new file mode 100644
index 00000000..c5d22f07
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/ram.py
@@ -0,0 +1,332 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os
+import pickle
+from abc import abstractmethod
+from typing import List, Optional
+
+import numpy as np
+import torch
+import torch.nn as nn
+from mmengine.model import BaseModel
+
+from mmpretrain.registry import MODELS, TOKENIZER
+from mmpretrain.structures import DataSample
+from .bert import BertConfig, BertLMHeadModel, BertModel
+from .openset_utils import build_openset_label_embedding
+from .utils import tie_encoder_decoder_weights
+
+
+def get_path(path):
+    file_path = os.path.abspath(os.path.dirname(__file__))
+    if not os.path.isabs(path):
+        return os.path.join(file_path, path)
+
+
+class RAM(BaseModel):
+    """The implementation of `RAM <https://arxiv.org/abs/2306.03514>`_."""
+
+    def __init__(self,
+                 tokenizer: dict,
+                 vision_backbone: dict,
+                 tag_encoder: dict,
+                 tagging_head: dict,
+                 text_decoder: dict,
+                 device: str = 'cpu',
+                 vision_width: int = 1536,
+                 prompt='a picture of ',
+                 threshold=0.68,
+                 delete_tag_index=[],
+                 tag_list='./data/ram_tag_list.pickle',
+                 tag_list_chinese='./data/ram_tag_list_chinese.pickle',
+                 data_preprocessor: Optional[dict] = None,
+                 init_cfg: Optional[dict] = None):
+        if data_preprocessor is None:
+            data_preprocessor = {}
+        data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor')
+        data_preprocessor = MODELS.build(data_preprocessor)
+
+        super().__init__(
+            data_preprocessor=data_preprocessor, init_cfg=init_cfg)
+
+        self.device = device
+        # build the visual encoder
+        self.visual_encoder = MODELS.build(vision_backbone)
+
+        # build the tokenizer
+        self.tokenizer = TOKENIZER.build(tokenizer)
+        self.tokenizer.add_special_tokens({'bos_token': '[DEC]'})
+        self.tokenizer.add_special_tokens(
+            {'additional_special_tokens': ['[ENC]']})
+        self.tokenizer.enc_token_id = \
+            self.tokenizer.additional_special_tokens_ids[0]
+
+        # build the tag encoder
+        # encoder_config = BertConfig.from_json_file(med_config)
+        # encoder_config.encoder_width = 512
+        encoder_config = BertConfig.from_dict(tag_encoder)
+        self.tag_encoder = BertModel(
+            config=encoder_config, add_pooling_layer=False)
+
+        # build image-tag-text decoder
+        # decoder_config = BertConfig.from_json_file(med_config)
+        decoder_config = BertConfig.from_dict(text_decoder)
+        self.text_decoder = BertLMHeadModel(config=decoder_config)
+
+        self.delete_tag_index = delete_tag_index
+        self.prompt = prompt
+        self.prompt_length = len(self.tokenizer(self.prompt).input_ids) - 1
+
+        # load tag list
+        self.tag_list = self.load_tag_list(get_path(tag_list))
+        self.tag_list_chinese = self.load_tag_list(get_path(tag_list_chinese))
+
+        # create image-tag recognition decoder
+        self.threshold = threshold
+        self.num_class = len(self.tag_list)
+        # q2l_config =  \
+        #               BertConfig.from_json_file(f'{CONFIG_PATH}/configs/q2l_config.json')
+        # q2l_config.encoder_width = 512
+        q2l_config = BertConfig.from_dict(tagging_head)
+        self.tagging_head = BertModel(
+            config=q2l_config, add_pooling_layer=False)
+        self.tagging_head.resize_token_embeddings(len(self.tokenizer))
+        self.label_embed = nn.Parameter(
+            torch.zeros(self.num_class, q2l_config.encoder_width))
+
+        if q2l_config.hidden_size != 512:
+            self.wordvec_proj = nn.Linear(512, q2l_config.hidden_size)
+        else:
+            self.wordvec_proj = nn.Identity()
+
+        self.fc = nn.Linear(q2l_config.hidden_size, 1)
+
+        self.del_selfattention()
+
+        # share weights of the lowest 2-layer of
+        # "image-tag interaction encoder" with
+        # the "image-tag recogntion decoder"
+        tie_encoder_decoder_weights(self.tag_encoder, self.tagging_head, '',
+                                    ' ')
+        self.image_proj = nn.Linear(vision_width, 512)
+        # self.label_embed = nn.Parameter(torch.load(
+        #   f'{CONFIG_PATH}/data/textual_label_embedding.pth',
+        #   map_location='cpu').float())
+
+        # adjust thresholds for some tags
+        self.class_threshold = torch.ones(self.num_class) * self.threshold
+        ram_class_threshold_path = get_path(
+            './data/ram_tag_list_threshold.pickle')
+        with open(ram_class_threshold_path, 'rb') as f:
+            ram_class_threshold = pickle.load(f)
+        for key, value in enumerate(ram_class_threshold):
+            self.class_threshold[key] = value
+
+    def load_tag_list(self, tag_list_file):
+        with open(tag_list_file, 'rb') as f:
+            tag_list = pickle.load(f)
+        tag_list = np.array(tag_list)
+        return tag_list
+
+    # delete self-attention layer of image-tag recognition decoder
+    # to reduce computation, follower Query2Label
+    def del_selfattention(self):
+        del self.tagging_head.embeddings
+        for layer in self.tagging_head.encoder.layer:
+            del layer.attention
+
+    def get_label_embed(self):
+        return torch.nn.functional.relu(self.wordvec_proj(self.label_embed))
+
+    def extract_visual_feature(self, images):
+        image_embeds = self.visual_encoder(images)[0]
+        image_embeds = image_embeds.flatten(2, 3)
+        attn_pool = nn.AdaptiveAvgPool1d(1)
+        cls_token = attn_pool(image_embeds).permute(0, 2, 1).contiguous()
+        image_embeds = image_embeds.permute(0, 2, 1).contiguous()
+        image_embeds = torch.cat([cls_token, image_embeds], dim=1)
+        image_embeds = self.image_proj(image_embeds)
+        image_atts = torch.ones(
+            image_embeds.size()[:-1], dtype=torch.long).to(images.device)
+        return image_embeds, image_atts
+
+    def image2tag(self, label_embed, image_embeds, image_atts):
+        # recognized image tags using image-tag recogntiion decoder
+        # image_cls_embeds = image_embeds[:, 0, :]
+        image_spatial_embeds = image_embeds[:, 1:, :]
+
+        bs = image_spatial_embeds.shape[0]
+        label_embed = label_embed.unsqueeze(0).repeat(bs, 1, 1)
+        tagging_embed = self.tagging_head(
+            encoder_embeds=label_embed,
+            encoder_hidden_states=image_embeds,
+            encoder_attention_mask=image_atts,
+            return_dict=False,
+            mode='tagging',
+        )
+
+        logits = self.fc(tagging_embed[0]).squeeze(-1)
+        return logits
+
+    def forward(
+        self,
+        images: torch.Tensor,
+        data_samples: Optional[list] = None,
+        mode: str = 'predict',
+        **kwargs,
+    ):
+        if mode == 'predict':
+            return self.predict(images, data_samples, **kwargs)
+        else:
+            raise RuntimeError(f'Invalid mode "{mode}".')
+
+    @abstractmethod
+    def predict(self,
+                images: torch.Tensor,
+                data_samples: DataSample = None) -> DataSample:
+        raise NotImplementedError
+
+
+@MODELS.register_module()
+class RAMNormal(RAM):
+
+    def __init__(self,
+                 tokenizer: dict,
+                 vision_backbone: dict,
+                 tag_encoder: dict,
+                 tagging_head: dict,
+                 text_decoder: dict,
+                 device: str = 'cpu',
+                 vision_width: int = 1536,
+                 prompt='a picture of ',
+                 threshold=0.68,
+                 delete_tag_index=[],
+                 tag_list='./data/ram_tag_list.pickle',
+                 tag_list_chinese='./data/ram_tag_list_chinese.pickle',
+                 data_preprocessor: Optional[dict] = None,
+                 init_cfg: Optional[dict] = None):
+        super().__init__(
+            tokenizer,
+            vision_backbone,
+            tag_encoder,
+            tagging_head,
+            text_decoder,
+            device,
+            vision_width,
+            prompt,
+            threshold,
+            delete_tag_index,
+            tag_list,
+            tag_list_chinese,
+            data_preprocessor,
+            init_cfg,
+        )
+
+    def tag_process(self, logits):
+        targets = torch.where(
+            torch.sigmoid(logits) > self.class_threshold.to(logits.device),
+            torch.tensor(1.0).to(logits.device),
+            torch.zeros(self.num_class).to(logits.device))
+
+        tag = targets.cpu().numpy()
+        tag[:, self.delete_tag_index] = 0
+        tag_output = []
+        tag_output_chinese = []
+        logits_output = []
+
+        bs = logits.shape[0]
+        for b in range(bs):
+            index = np.argwhere(tag[b] == 1)
+            token = self.tag_list[index].squeeze(axis=1)
+            logits_output.append(
+                torch.sigmoid(logits)[b][index[:, 0]].cpu().numpy())
+            tag_output.append(' | '.join(token))
+            token_chinese = self.tag_list_chinese[index].squeeze(axis=1)
+            tag_output_chinese.append(' | '.join(token_chinese))
+
+        return [(tag_output, tag_output_chinese), logits_output]
+
+    def predict(self,
+                images: torch.Tensor,
+                data_samples: DataSample = None) -> DataSample:
+        self.eval()
+        self.to(self.device)
+        images = images.to(self.device)
+        label_embed = self.get_label_embed()
+        image_embeds, image_atts = self.extract_visual_feature(images)
+        logits = self.image2tag(label_embed, image_embeds, image_atts)
+        tag_output, logits_output = self.tag_process(logits)
+        data_samples.set_field(logits_output, 'logits_output')
+        data_samples.set_field(tag_output, 'tag_output')
+        return data_samples
+
+
+@MODELS.register_module()
+class RAMOpenset(RAMNormal):
+
+    def __init__(self,
+                 tokenizer: dict,
+                 vision_backbone: dict,
+                 tag_encoder: dict,
+                 tagging_head: dict,
+                 text_decoder: dict,
+                 device: str = 'cpu',
+                 vision_width: int = 1536,
+                 prompt='a picture of ',
+                 threshold=0.68,
+                 delete_tag_index=[],
+                 tag_list='./data/ram_tag_list.pickle',
+                 tag_list_chinese='./data/ram_tag_list_chinese.pickle',
+                 data_preprocessor: Optional[dict] = None,
+                 init_cfg: Optional[dict] = None):
+        super().__init__(
+            tokenizer,
+            vision_backbone,
+            tag_encoder,
+            tagging_head,
+            text_decoder,
+            device,
+            vision_width,
+            prompt,
+            threshold,
+            delete_tag_index,
+            tag_list,
+            tag_list_chinese,
+            data_preprocessor,
+            init_cfg,
+        )
+
+    def set_openset(self,
+                    categories: List[str] = None,
+                    clip_ckpt: str = '',
+                    threshold: float = 0.68):
+        openset_label_embedding, openset_categories = \
+                            build_openset_label_embedding(
+                                categories, clip_ckpt
+                            )
+        self.tag_list = np.array(openset_categories)
+        self.label_embed = nn.Parameter(openset_label_embedding.float())
+        self.num_class = len(openset_categories)
+
+        # the threshold for unseen categories is often lower
+        self.class_threshold = torch.ones(self.num_class) * threshold
+
+    def tag_process(self, logits):
+        targets = torch.where(
+            torch.sigmoid(logits) > self.class_threshold.to(logits.device),
+            torch.tensor(1.0).to(logits.device),
+            torch.zeros(self.num_class).to(logits.device))
+
+        tag = targets.cpu().numpy()
+        tag[:, self.delete_tag_index] = 0
+
+        bs = logits.shape[0]
+        tag_output = []
+        logits_output = []
+        for b in range(bs):
+            index = np.argwhere(tag[b] == 1)
+            token = self.tag_list[index].squeeze(axis=1)
+            logits_output.append(
+                torch.sigmoid(logits)[b][index[:, 0]].cpu().numpy())
+            tag_output.append(' | '.join(token))
+
+        return [(tag_output, [None]), logits_output]
diff --git a/mmpretrain/models/multimodal/ram/run/__init__.py b/mmpretrain/models/multimodal/ram/run/__init__.py
new file mode 100644
index 00000000..ef101fec
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/run/__init__.py
@@ -0,0 +1 @@
+# Copyright (c) OpenMMLab. All rights reserved.
diff --git a/mmpretrain/models/multimodal/ram/run/inference.py b/mmpretrain/models/multimodal/ram/run/inference.py
new file mode 100644
index 00000000..da5afcf5
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/run/inference.py
@@ -0,0 +1,29 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+
+
+def inference_ram(sample, model):
+
+    with torch.no_grad():
+        result = model.test_step(sample)
+
+    return result
+
+
+def inference_ram_openset(sample, model):
+    with torch.no_grad():
+        result = model.test_step(sample)
+
+    return result
+
+
+def inference(sample, model, transforms, mode='normal'):
+    sample = transforms(sample)
+    if sample['inputs'].ndim == 3:
+        sample['inputs'] = sample['inputs'].unsqueeze(dim=0)
+    assert mode in ['normal', 'openset'
+                    ], 'mode of inference must be "normal" or "openset"'
+    if mode == 'normal':
+        return inference_ram(sample, model)
+    else:
+        return inference_ram_openset(sample, model)
diff --git a/mmpretrain/models/multimodal/ram/utils.py b/mmpretrain/models/multimodal/ram/utils.py
new file mode 100644
index 00000000..32cb115b
--- /dev/null
+++ b/mmpretrain/models/multimodal/ram/utils.py
@@ -0,0 +1,87 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from typing import List
+
+from torch import nn
+
+
+def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module,
+                                base_model_prefix: str, skip_key: str):
+    uninitialized_encoder_weights: List[str] = []
+    if decoder.__class__ != encoder.__class__:
+        print(f'''{decoder.__class__} and {encoder.__class__} are not equal.
+            In this case make sure that
+            all encoder weights are correctly initialized.''')
+
+    def tie_encoder_to_decoder_recursively(
+        decoder_pointer: nn.Module,
+        encoder_pointer: nn.Module,
+        module_name: str,
+        uninitialized_encoder_weights: List[str],
+        skip_key: str,
+        depth=0,
+    ):
+        assert isinstance(decoder_pointer, nn.Module) and isinstance(
+            encoder_pointer, nn.Module
+        ), f'{decoder_pointer} and {encoder_pointer}' + \
+            'have to be of type torch.nn.Module'
+        if hasattr(decoder_pointer, 'weight') and skip_key not in module_name:
+            assert hasattr(encoder_pointer, 'weight')
+            encoder_pointer.weight = decoder_pointer.weight
+            if hasattr(decoder_pointer, 'bias'):
+                assert hasattr(encoder_pointer, 'bias')
+                encoder_pointer.bias = decoder_pointer.bias
+            print(module_name + ' is tied')
+            return
+
+        encoder_modules = encoder_pointer._modules
+        decoder_modules = decoder_pointer._modules
+        if len(decoder_modules) > 0:
+            assert (len(encoder_modules) >
+                    0), f'''Encoder module {encoder_pointer}
+            does not match decoder module {decoder_pointer}'''
+
+            all_encoder_weights = set([
+                module_name + '/' + sub_name
+                for sub_name in encoder_modules.keys()
+            ])
+            encoder_layer_pos = 0
+            for name, module in decoder_modules.items():
+                if name.isdigit():
+                    encoder_name = str(int(name) + encoder_layer_pos)
+                    decoder_name = name
+                    if not isinstance(
+                            decoder_modules[decoder_name],
+                            type(encoder_modules[encoder_name])) and len(
+                                encoder_modules) != len(decoder_modules):
+                        # this can happen if the name corresponds to
+                        # the position in a list module list of layers
+                        # in this case the decoder has added a
+                        # cross-attention that the encoder doesn't have
+                        # thus skip this step and
+                        # subtract one layer pos from encoder
+                        encoder_layer_pos -= 1
+                        continue
+                elif name not in encoder_modules:
+                    continue
+                elif depth > 500:
+                    raise ValueError(
+                        '''Max depth of recursive function `tie_encoder_to_decoder` reached.
+                        It seems that there is a circular dependency
+                        between two or more `nn.Modules` of your model.''')
+                else:
+                    decoder_name = encoder_name = name
+                tie_encoder_to_decoder_recursively(
+                    decoder_modules[decoder_name],
+                    encoder_modules[encoder_name],
+                    module_name + '/' + name,
+                    uninitialized_encoder_weights,
+                    skip_key,
+                    depth=depth + 1,
+                )
+                all_encoder_weights.remove(module_name + '/' + encoder_name)
+
+            uninitialized_encoder_weights += list(all_encoder_weights)
+
+    # tie weights recursively
+    tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix,
+                                       uninitialized_encoder_weights, skip_key)
diff --git a/mmpretrain/models/utils/tokenizer.py b/mmpretrain/models/utils/tokenizer.py
index 5b8a324b..fddda432 100644
--- a/mmpretrain/models/utils/tokenizer.py
+++ b/mmpretrain/models/utils/tokenizer.py
@@ -12,6 +12,7 @@ from .huggingface import register_hf_tokenizer
 
 register_hf_tokenizer(AutoTokenizer)
 register_hf_tokenizer(LlamaTokenizer)
+register_hf_tokenizer(BertTokenizer)
 
 
 @register_hf_tokenizer()
diff --git a/tools/model_converters/ram2mmpretrain.py b/tools/model_converters/ram2mmpretrain.py
new file mode 100644
index 00000000..5ee3b476
--- /dev/null
+++ b/tools/model_converters/ram2mmpretrain.py
@@ -0,0 +1,117 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import argparse
+import os.path as osp
+from collections import OrderedDict
+from copy import deepcopy
+
+import mmengine
+import torch
+from mmengine.runner import CheckpointLoader
+
+
+def convert_swin(ckpt):
+    new_ckpt = OrderedDict()
+    convert_mapping = dict()
+
+    def correct_unfold_reduction_order(x):
+        out_channel, in_channel = x.shape
+        x = x.reshape(out_channel, 4, in_channel // 4)
+        x = x[:, [0, 2, 1, 3], :].transpose(1,
+                                            2).reshape(out_channel, in_channel)
+        return x
+
+    def correct_unfold_norm_order(x):
+        in_channel = x.shape[0]
+        x = x.reshape(4, in_channel // 4)
+        x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
+        return x
+
+    for k, v in ckpt.items():
+        if 'attn_mask' in k:
+            continue
+        if k.startswith('head'):
+            continue
+        elif k.startswith('layers'):
+            new_v = v
+            if 'attn.' in k:
+                new_k = k.replace('attn.', 'attn.w_msa.')
+            elif 'mlp.' in k:
+                if 'mlp.fc1.' in k:
+                    new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
+                elif 'mlp.fc2.' in k:
+                    new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
+                else:
+                    new_k = k.replace('mlp.', 'ffn.')
+            elif 'downsample' in k:
+                new_k = k
+                if 'reduction.' in k:
+                    new_v = correct_unfold_reduction_order(v)
+                elif 'norm.' in k:
+                    new_v = correct_unfold_norm_order(v)
+            else:
+                new_k = k
+            new_k = new_k.replace('layers', 'stages', 1)
+        elif k.startswith('patch_embed'):
+            new_v = v
+            if 'proj' in k:
+                new_k = k.replace('proj', 'projection')
+            else:
+                new_k = k
+        elif k.startswith('norm'):
+            new_v = v
+            new_k = k.replace('norm', 'norm3')
+        else:
+            new_v = v
+            new_k = k
+
+        new_ckpt[new_k] = new_v
+        convert_mapping[k] = new_k
+
+    return new_ckpt, convert_mapping
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description='Convert keys in official pretrained RAM models to'
+        'MMPretrain style.')
+    parser.add_argument('src', help='src model path or url')
+    # The dst path must be a full path of the new checkpoint.
+    parser.add_argument('dst', help='save path')
+    args = parser.parse_args()
+
+    checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
+    if 'state_dict' in checkpoint:
+        state_dict = checkpoint['state_dict']
+    elif 'model' in checkpoint:
+        state_dict = checkpoint['model']
+    else:
+        state_dict = checkpoint
+
+    visual_ckpt = OrderedDict()
+    for key in state_dict:
+        if key.startswith('visual_encoder.'):
+            new_key = key.replace('visual_encoder.', '')
+            visual_ckpt[new_key] = state_dict[key]
+
+    new_visual_ckpt, convert_mapping = convert_swin(visual_ckpt)
+    new_ckpt = deepcopy(state_dict)
+    for key in state_dict:
+        if key.startswith('visual_encoder.'):
+            if 'attn_mask' in key:
+                del new_ckpt[key]
+                continue
+            del new_ckpt[key]
+            old_key = key.replace('visual_encoder.', '')
+            new_ckpt[key.replace(old_key,
+                                 convert_mapping[old_key])] = deepcopy(
+                                     new_visual_ckpt[key.replace(
+                                         old_key,
+                                         convert_mapping[old_key]).replace(
+                                             'visual_encoder.', '')])
+
+    mmengine.mkdir_or_exist(osp.dirname(args.dst))
+    torch.save(new_ckpt, args.dst)
+
+
+if __name__ == '__main__':
+    main()