diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..895343e6f6fee35557fa74f481a6cb6a02715fcb 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,57 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
 *.zip filter=lfs diff=lfs merge=lfs -text
 *.zst filter=lfs diff=lfs merge=lfs -text
 *tfevents* filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.0.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.0.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.1.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.10.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.11.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.12.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.13.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.13.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.13.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.14.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.14.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.14.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.15.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.15.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.15.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.16.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.16.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.16.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.16.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.18.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.18.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.18.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.19.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.19.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.2.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.2.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.20.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.21.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.21.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.21.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.22.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.23.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.24.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.24.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.25.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.26.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.26.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.27.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.28.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.29.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.29.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.3.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.30.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.31.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.31.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.4.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.5.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.5.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.7.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.8.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
+triton_models/weights/layers.9.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
diff --git a/service_docker_up.sh b/service_docker_up.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d45345e6165857b5bf210cb4ad261029143508a9
--- /dev/null
+++ b/service_docker_up.sh
@@ -0,0 +1,87 @@
+#!/bin/sh
+
+show_help() {
+  echo "Usage: $0 [-h] [--help] [-l] [--lib-dir]"
+  echo
+  echo "Options:"
+  echo "  -h, --help   Show this help message and exit"
+  echo "  --lib-dir    Specify the directory of turbomind libraries"
+}
+
+# check if '-h' or '--help' in the arguments
+for arg in "$@"
+do
+  if [ "$arg" == "-h" ] || [ "$arg" == "--help" ]; then
+    show_help
+    exit 0
+  fi
+done
+
+
+TP=1
+DEVICES="0"
+for ((i = 1; i < ${TP}; ++i)); do
+    DEVICES="${DEVICES},$i"
+done
+DEVICES="\"device=${DEVICES}\""
+
+
+SCRIPT_DIR="$(dirname "$0")"
+SCRIPT_ABS_DIR="$(realpath "$SCRIPT_DIR")"
+
+
+if [ -z "$1" ]; then
+    docker run \
+        --gpus $DEVICES \
+        --rm \
+        -v "${SCRIPT_ABS_DIR}":/workspace/models \
+        --shm-size 16g \
+        -p 33336:22 \
+        -p 33337-33400:33337-33400 \
+        --cap-add=SYS_PTRACE \
+        --cap-add=SYS_ADMIN \
+        --security-opt seccomp=unconfined \
+        --name lmdeploy \
+        -it --env NCCL_LAUNCH_MODE=GROUP openmmlab/lmdeploy:latest \
+        tritonserver \
+        --model-repository=/workspace/models/model_repository \
+        --allow-http=0 \
+        --allow-grpc=1 \
+        --grpc-port=33337 \
+        --log-verbose=0 \
+        --allow-metrics=1
+fi
+
+for ((i = 1; i <= $#; i++)); do
+  arg=${!i}
+  case "$arg" in
+    --lib-dir)
+    if [ "$i" -eq "$#" ]; then
+        show_help
+        exit -1
+    fi
+    LIB_PATH=${@:i+1:1}
+      docker run \
+        --gpus $DEVICES \
+        --rm \
+        -v "${LIB_PATH}":/opt/tritonserver/backends/turbomind \
+        -v ""${SCRIPT_ABS_DIR}"":/workspace/models \
+        --shm-size 16g \
+        -p 33336:22 \
+        -p 33337-33400:33337-33400 \
+        --cap-add=SYS_PTRACE \
+        --cap-add=SYS_ADMIN \
+        --security-opt seccomp=unconfined \
+        --name lmdeploy \
+        -it --env NCCL_LAUNCH_MODE=GROUP openmmlab/lmdeploy:latest \
+        tritonserver \
+        --model-repository=/workspace/models/model_repository \
+        --allow-http=0 \
+        --allow-grpc=1 \
+        --grpc-port=33337 \
+        --log-verbose=0 \
+        --allow-metrics=1
+    break
+    ;;
+  esac
+done
diff --git a/triton_models/interactive/1/placeholder b/triton_models/interactive/1/placeholder
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/triton_models/interactive/config.pbtxt b/triton_models/interactive/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..ae0423c7d4905b4fc058e72ba23aaf81391316d5
--- /dev/null
+++ b/triton_models/interactive/config.pbtxt
@@ -0,0 +1,281 @@
+# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#  * Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#  * Neither the name of NVIDIA CORPORATION nor the names of its
+#    contributors may be used to endorse or promote products derived
+#    from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+name: "turbomind"
+backend: "turbomind"
+default_model_filename: "weights"
+max_batch_size: 1
+
+model_transaction_policy {
+  decoupled: True
+}
+
+instance_group [
+  {
+    # max concurrent instances
+    count: 48
+    kind: KIND_CPU
+  }
+]
+
+input [
+  {
+    name: "input_ids"
+    data_type: TYPE_UINT32
+    dims: [ -1 ]
+    # allow_ragged_batch: true
+  },
+  {
+    name: "input_lengths"
+    data_type: TYPE_UINT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+  },
+  {
+    name: "request_output_len"
+    data_type: TYPE_UINT32
+    dims: [ -1 ]
+  },
+  {
+    name: "step"
+    data_type: TYPE_INT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "session_len"
+    data_type: TYPE_UINT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "runtime_top_k"
+    data_type: TYPE_UINT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "runtime_top_p"
+    data_type: TYPE_FP32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "beam_search_diversity_rate"
+    data_type: TYPE_FP32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "temperature"
+    data_type: TYPE_FP32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "len_penalty"
+    data_type: TYPE_FP32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "repetition_penalty"
+    data_type: TYPE_FP32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "random_seed"
+    data_type: TYPE_UINT64
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "is_return_log_probs"
+    data_type: TYPE_BOOL
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "beam_width"
+    data_type: TYPE_UINT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+    {
+    name: "start_id"
+    data_type: TYPE_UINT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "end_id"
+    data_type: TYPE_UINT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "bad_words_list"
+    data_type: TYPE_INT32
+    dims: [ 2, -1 ]
+    optional: true
+  },
+  {
+    name: "stop_words_list"
+    data_type: TYPE_INT32
+    dims: [ 2, -1 ]
+    optional: true
+  },
+  {
+    name: "prompt_learning_task_name_ids"
+    data_type: TYPE_UINT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "top_p_decay"
+    data_type: TYPE_FP32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "top_p_min"
+    data_type: TYPE_FP32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "top_p_reset_ids"
+    data_type: TYPE_UINT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "START"
+    data_type: TYPE_INT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "END"
+    data_type: TYPE_INT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "STOP"
+    data_type: TYPE_INT32
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  },
+  {
+    name: "CORRID"
+    data_type: TYPE_UINT64
+    dims: [ 1 ]
+    reshape: { shape: [ ] }
+    optional: true
+  }
+]
+output [
+  {
+    name: "output_ids"
+    data_type: TYPE_UINT32
+    dims: [ -1, -1 ]
+  },
+  {
+    name: "sequence_length"
+    data_type: TYPE_UINT32
+    dims: [ -1 ]
+  },
+  {
+    name: "cum_log_probs"
+    data_type: TYPE_FP32
+    dims: [ -1 ]
+  },
+  {
+    name: "output_log_probs"
+    data_type: TYPE_FP32
+    dims: [ -1, -1 ]
+  }
+]
+
+parameters {
+  key: "pipeline_para_size"
+  value: {
+    string_value: "1"
+  }
+}
+parameters {
+  key: "data_type"
+  value: {
+    string_value: "fp16"
+  }
+}
+parameters {
+  key: "model_type"
+  value: {
+    string_value: "Llama"
+  }
+}
+
+parameters {
+  key: "enable_custom_all_reduce"
+  value: {
+    string_value: "0"
+  }
+}
+parameters {
+  key: "tensor_para_size"
+  value: {
+    string_value: "1"
+  }
+}
+parameters {
+  key: "model_name"
+  value: {
+    string_value: "internlm-chat-7b"
+  }
+}
diff --git a/triton_models/postprocessing/1/model.py b/triton_models/postprocessing/1/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..20de97595195da5dedc044a31c6086c1f49892da
--- /dev/null
+++ b/triton_models/postprocessing/1/model.py
@@ -0,0 +1,129 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import json
+import os.path as osp
+from pathlib import Path
+
+import numpy as np
+import triton_python_backend_utils as pb_utils
+
+# This tokenizer is `lmdeploy/turbomind/tokenizer.py`. When an LLM is served
+# by triton inference server, it has to be converted first by running
+# `python lmdeploy/serve/turbomind/deploy.py`. Then
+# `lmdeploy/turbomind/tokenizer.py` will be copied to `tokenizer/tokenizer.py`
+from .tokenizer.tokenizer import Tokenizer
+
+
+class TritonPythonModel:
+    """Your Python model must use the same class name.
+
+    Every Python model that is created must have "TritonPythonModel" as the
+    class name.
+    """
+
+    def initialize(self, args):
+        """`initialize` is called only once when the model is being loaded.
+        Implementing `initialize` function is optional. This function allows
+        the model to initialize any state associated with this model.
+        Parameters
+        ----------
+        args : dict
+          Both keys and values are strings. The dictionary keys and values are:
+          * model_config: A JSON string containing the model configuration
+          * model_instance_kind: A string containing model instance kind
+          * model_instance_device_id: A string containing model instance device
+          ID
+          * model_repository: Model repository path
+          * model_version: Model version
+          * model_name: Model name
+        """
+        # Parse model configs
+        self.model_config = model_config = json.loads(args['model_config'])
+
+        # Parse model output configs
+        output_config = pb_utils.get_output_config_by_name(
+            model_config, 'OUTPUT')
+
+        # Convert Triton types to numpy types
+        self.output_dtype = pb_utils.triton_string_to_numpy(
+            output_config['data_type'])
+
+        cur_folder = Path(__file__).parent
+
+        self.tokenizer = Tokenizer(
+            osp.join(
+                cur_folder, self.model_config['parameters']['tokenizer_path']
+                ['string_value']))
+
+    def execute(self, requests):
+        """`execute` must be implemented in every Python model. `execute`
+        function receives a list of pb_utils.InferenceRequest as the only
+        argument. This function is called when an inference is requested
+        for this model. Depending on the batching configuration (e.g. Dynamic
+        Batching) used, `requests` may contain multiple requests. Every
+        Python model, must create one pb_utils.InferenceResponse for every
+        pb_utils.InferenceRequest in `requests`. If there is an error, you can
+        set the error argument when creating a pb_utils.InferenceResponse.
+        Parameters
+        ----------
+        requests : list
+          A list of pb_utils.InferenceRequest
+        Returns
+        -------
+        list
+          A list of pb_utils.InferenceResponse. The length of this list must
+          be the same as `requests`
+        """
+
+        responses = []
+
+        # Every Python backend must iterate over everyone of the requests
+        # and create a pb_utils.InferenceResponse for each of them.
+        for idx, request in enumerate(requests):
+            # Get input tensors
+            tokens_batch = pb_utils.get_input_tensor_by_name(
+                request, 'TOKENS_BATCH').as_numpy()
+            sequence_length = pb_utils.get_input_tensor_by_name(
+                request, 'sequence_length').as_numpy()
+
+            # Postprocessing output data.
+            outputs = self._postprocessing(tokens_batch.tolist(),
+                                           sequence_length)
+
+            # Create output tensors. You need pb_utils.Tensor
+            # objects to create pb_utils.InferenceResponse.
+            output_tensor = pb_utils.Tensor(
+                'OUTPUT',
+                np.array(outputs).astype(self.output_dtype))
+
+            # Create InferenceResponse. You can set an error here in case
+            # there was a problem with handling this inference request.
+            # Below is an example of how you can set errors in inference
+            # response:
+            #
+            # pb_utils.InferenceResponse(
+            #    output_tensors=..., TritonError("An error occurred"))
+            inference_response = pb_utils.InferenceResponse(
+                output_tensors=[output_tensor])
+            responses.append(inference_response)
+
+        # You should return a list of pb_utils.InferenceResponse. Length
+        # of this list must match the length of `requests` list.
+        return responses
+
+    def finalize(self):
+        """`finalize` is called only once when the model is being unloaded.
+
+        Implementing `finalize` function is optional. This function allows the
+        model to perform any necessary clean ups before exit.
+        """
+        print('Cleaning up...')
+
+    def _postprocessing(self, tokens_batch, sequence_length):
+        """decode token ids into texts."""
+        outputs = []
+        for beam_tokens, beam_len in zip(tokens_batch, sequence_length):
+            for tokens, _len in zip(beam_tokens, beam_len):
+                output = self.tokenizer.decode(tokens, _len)
+                output = output.encode('utf8')
+                outputs.append(output)
+        return outputs
diff --git a/triton_models/postprocessing/config.pbtxt b/triton_models/postprocessing/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..a4c3fd1041dcd03dc5c18b3fc28533cb82ac5653
--- /dev/null
+++ b/triton_models/postprocessing/config.pbtxt
@@ -0,0 +1,36 @@
+name: "postprocessing"
+backend: "python"
+max_batch_size: 1
+input [
+  {
+    name: "TOKENS_BATCH"
+    data_type: TYPE_UINT32
+    dims: [ -1, -1 ]
+  },
+  {
+    name: "sequence_length"
+    data_type: TYPE_UINT32
+    dims: [ -1 ]
+  }
+]
+output [
+  {
+    name: "OUTPUT"
+    data_type: TYPE_STRING
+    dims: [ -1, -1 ]
+  }
+]
+
+instance_group [
+    {
+        count: 16
+        kind: KIND_CPU
+    }
+]
+
+parameters {
+  key: "tokenizer_path"
+  value: {
+    string_value: "tokenizer/tokenizer.model"
+  }
+}
diff --git a/triton_models/preprocessing/1/model.py b/triton_models/preprocessing/1/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..77f51bfb3d03e4ccd1eee656eada1744ae19805a
--- /dev/null
+++ b/triton_models/preprocessing/1/model.py
@@ -0,0 +1,151 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import json
+import os.path as osp
+from pathlib import Path
+
+import numpy as np
+import torch
+import triton_python_backend_utils as pb_utils
+from torch.nn.utils.rnn import pad_sequence
+
+# This tokenizer is `lmdeploy/turbomind/tokenizer.py`. When an LLM is served
+# by triton inference server, it has to be converted first by running
+# `python lmdeploy/serve/turbomind/deploy.py`. Then
+# `lmdeploy/turbomind/tokenizer.py` will be copied to `tokenizer/tokenizer.py`
+from .tokenizer.tokenizer import Tokenizer
+
+
+class TritonPythonModel:
+    """Your Python model must use the same class name.
+
+    Every Python model that is created must have "TritonPythonModel" as the
+    class name.
+    """
+
+    def initialize(self, args):
+        """`initialize` is called only once when the model is being loaded.
+        Implementing `initialize` function is optional. This function allows
+        the model to initialize any state associated with this model.
+        Parameters
+        ----------
+        args : dict
+          Both keys and values are strings. The dictionary keys and values are:
+          * model_config: A JSON string containing the model configuration
+          * model_instance_kind: A string containing model instance kind
+          * model_instance_device_id: A string containing model instance device
+          ID
+          * model_repository: Model repository path
+          * model_version: Model version
+          * model_name: Model name
+        """
+        # Parse model configs
+        self.model_config = model_config = json.loads(args['model_config'])
+
+        # Parse model output configs and convert Triton types to numpy types
+        input_names = [
+            'INPUT_ID', 'REQUEST_INPUT_LEN', 'BAD_WORDS_IDS', 'STOP_WORDS_IDS'
+        ]
+        for input_name in input_names:
+            setattr(
+                self,
+                input_name.lower() + '_dtype',
+                pb_utils.triton_string_to_numpy(
+                    pb_utils.get_output_config_by_name(
+                        model_config, input_name)['data_type']))
+
+        cur_folder = Path(__file__).parent
+        self.tokenizer = Tokenizer(
+            osp.join(
+                cur_folder, self.model_config['parameters']['tokenizer_path']
+                ['string_value']))
+        self.start_id = self.tokenizer.bos_token_id
+        self.end_id = self.tokenizer.eos_token_id
+
+    def execute(self, requests):
+        """`execute` must be implemented in every Python model. `execute`
+        function receives a list of pb_utils.InferenceRequest as the only
+        argument. This function is called when an inference is requested
+        for this model. Depending on the batching configuration (e.g. Dynamic
+        Batching) used, `requests` may contain multiple requests. Every
+        Python model, must create one pb_utils.InferenceResponse for every
+        pb_utils.InferenceRequest in `requests`. If there is an error, you can
+        set the error argument when creating a pb_utils.InferenceResponse.
+        Parameters
+        ----------
+        requests : list
+          A list of pb_utils.InferenceRequest
+        Returns
+        -------
+        list
+          A list of pb_utils.InferenceResponse. The length of this list must
+          be the same as `requests`
+        """
+
+        responses = []
+
+        # Every Python backend must iterate over everyone of the requests
+        # and create a pb_utils.InferenceResponse for each of them.
+        for idx, request in enumerate(requests):
+            # Get input tensors
+            query = pb_utils.get_input_tensor_by_name(request,
+                                                      'QUERY').as_numpy()
+            request_output_len = pb_utils.get_input_tensor_by_name(
+                request, 'REQUEST_OUTPUT_LEN').as_numpy()
+
+            # Preprocessing input data.
+            input_id, request_input_len = self._create_request(query)
+
+            # Create output tensors. You need pb_utils.Tensor
+            # objects to create pb_utils.InferenceResponse.
+            input_id_tensor = pb_utils.Tensor(
+                'INPUT_ID',
+                np.array(input_id).astype(self.input_id_dtype))
+            request_input_len_tensor = pb_utils.Tensor(
+                'REQUEST_INPUT_LEN',
+                np.array(request_input_len).astype(
+                    self.request_input_len_dtype))
+            request_output_len_tensor = pb_utils.Tensor(
+                'REQUEST_OUTPUT_LEN', request_output_len)
+
+            # Create InferenceResponse. You can set an error here in case
+            # there was a problem with handling this inference request.
+            # Below is an example of how you can set errors in inference
+            # response:
+            #
+            # pb_utils.InferenceResponse(
+            #    output_tensors=..., TritonError("An error occurred"))
+            inference_response = pb_utils.InferenceResponse(output_tensors=[
+                input_id_tensor, request_input_len_tensor,
+                request_output_len_tensor
+            ])
+            responses.append(inference_response)
+
+        # You should return a list of pb_utils.InferenceResponse. Length
+        # of this list must match the length of `requests` list.
+        return responses
+
+    def finalize(self):
+        """`finalize` is called only once when the model is being unloaded.
+
+        Implementing `finalize` function is optional. This function allows the
+        model to perform any necessary clean ups before exit.
+        """
+        print('Cleaning up...')
+
+    def _create_request(self, query):
+        """Tokenize prompts and return the token ids and their length.
+
+        Args:
+            query (List[str]): a list of prompt
+        Returns:
+            tuple: token ids and their length
+        """
+        start_ids = [
+            torch.IntTensor(self.tokenizer.encode(s[0].decode()))
+            for s in query
+        ]
+        start_lengths = torch.IntTensor([[len(ids)] for ids in start_ids])
+        start_ids = pad_sequence(start_ids,
+                                 batch_first=True,
+                                 padding_value=self.end_id)
+        return start_ids, start_lengths
diff --git a/triton_models/preprocessing/config.pbtxt b/triton_models/preprocessing/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..a87abd98df1e193849122f0b1f3979f20eef3bbf
--- /dev/null
+++ b/triton_models/preprocessing/config.pbtxt
@@ -0,0 +1,74 @@
+name: "preprocessing"
+backend: "python"
+max_batch_size: 1
+
+input [
+    {
+        name: "QUERY"
+        data_type: TYPE_STRING
+        dims: [ -1 ]
+    },
+    {
+        name: "BAD_WORDS_DICT"
+        data_type: TYPE_STRING
+        dims: [ -1 ]
+        optional: true
+    },
+    {
+        name: "STOP_WORDS_DICT"
+        data_type: TYPE_STRING
+        dims: [ -1 ]
+        optional: true
+    },
+    {
+        name: "REQUEST_OUTPUT_LEN"
+        data_type: TYPE_UINT32
+        dims: [ -1 ]
+    }
+]
+output [
+    {
+        name: "INPUT_ID"
+        data_type: TYPE_UINT32
+        dims: [ -1 ]
+    },
+    {
+        name: "REQUEST_INPUT_LEN"
+        data_type: TYPE_UINT32
+        dims: [ 1 ]
+    },
+    {
+        name: "BAD_WORDS_IDS"
+        data_type: TYPE_INT32
+        dims: [ 2, -1 ]
+    },
+    {
+        name: "STOP_WORDS_IDS"
+        data_type: TYPE_INT32
+        dims: [ 2, -1 ]
+    },
+    {
+        name: "REQUEST_OUTPUT_LEN"
+        data_type: TYPE_UINT32
+        dims: [ -1 ]
+    },
+    {
+        name: "PROMPT_LEARNING_TASK_NAME_IDS"
+        data_type: TYPE_UINT32
+        dims: [ 1 ]
+    }
+]
+
+instance_group [
+    {
+        count: 4
+        kind: KIND_CPU
+    }
+]
+
+parameters {
+  key: "tokenizer_path"
+  value: {
+    string_value: "tokenizer/tokenizer.model"
+  }
+}
diff --git a/triton_models/tokenizer/config.json b/triton_models/tokenizer/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..d7912f1fc3b568622959d9a11d725525bf46d9c3
--- /dev/null
+++ b/triton_models/tokenizer/config.json
@@ -0,0 +1,29 @@
+{
+  "_name_or_path": "/nvme/shared_data/InternLM/internlm-chat-7b",
+  "architectures": [
+    "InternLMForCausalLM"
+  ],
+  "auto_map": {
+    "AutoConfig": "configuration_internlm.InternLMConfig",
+    "AutoModel": "modeling_internlm.InternLMForCausalLM",
+    "AutoModelForCausalLM": "modeling_internlm.InternLMForCausalLM"
+  },
+  "bias": true,
+  "bos_token_id": 1,
+  "eos_token_id": 2,
+  "hidden_act": "silu",
+  "hidden_size": 4096,
+  "initializer_range": 0.02,
+  "intermediate_size": 11008,
+  "max_position_embeddings": 2048,
+  "model_type": "internlm",
+  "num_attention_heads": 32,
+  "num_hidden_layers": 32,
+  "pad_token_id": 0,
+  "rms_norm_eps": 1e-06,
+  "tie_word_embeddings": false,
+  "torch_dtype": "float16",
+  "transformers_version": "4.33.1",
+  "use_cache": false,
+  "vocab_size": 103168
+}
diff --git a/triton_models/tokenizer/configuration_internlm.py b/triton_models/tokenizer/configuration_internlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..298f91319529e9b3034bcb74bb428d610534a0ba
--- /dev/null
+++ b/triton_models/tokenizer/configuration_internlm.py
@@ -0,0 +1,120 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" InternLM model configuration"""
+
+from transformers.utils import logging
+from transformers.configuration_utils import PretrainedConfig
+
+
+logger = logging.get_logger(__name__)
+
+INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
+
+
+class InternLMConfig(PretrainedConfig):
+    r"""
+    This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate an InternLM
+    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+    defaults will yield a similar configuration to that of the InternLM-7B.
+
+    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+    documentation from [`PretrainedConfig`] for more information.
+
+
+    Args:
+        vocab_size (`int`, *optional*, defaults to 32000):
+            Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
+            `inputs_ids` passed when calling [`InternLMModel`]
+        hidden_size (`int`, *optional*, defaults to 4096):
+            Dimension of the hidden representations.
+        intermediate_size (`int`, *optional*, defaults to 11008):
+            Dimension of the MLP representations.
+        num_hidden_layers (`int`, *optional*, defaults to 32):
+            Number of hidden layers in the Transformer encoder.
+        num_attention_heads (`int`, *optional*, defaults to 32):
+            Number of attention heads for each attention layer in the Transformer encoder.
+        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+            The non-linear activation function (function or string) in the decoder.
+        max_position_embeddings (`int`, *optional*, defaults to 2048):
+            The maximum sequence length that this model might ever be used with. Typically set this to something large
+            just in case (e.g., 512 or 1024 or 2048).
+        initializer_range (`float`, *optional*, defaults to 0.02):
+            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+        rms_norm_eps (`float`, *optional*, defaults to 1e-12):
+            The epsilon used by the rms normalization layers.
+        use_cache (`bool`, *optional*, defaults to `True`):
+            Whether or not the model should return the last key/values attentions (not used by all models). Only
+            relevant if `config.is_decoder=True`.
+        tie_word_embeddings(`bool`, *optional*, defaults to `False`):
+            Whether to tie weight embeddings
+        Example:
+
+    ```python
+    >>> from transformers import InternLMModel, InternLMConfig
+
+    >>> # Initializing a InternLM internlm-7b style configuration
+    >>> configuration = InternLMConfig()
+
+    >>> # Initializing a model from the internlm-7b style configuration
+    >>> model = InternLMModel(configuration)
+
+    >>> # Accessing the model configuration
+    >>> configuration = model.config
+    ```"""
+    model_type = "internlm"
+    _auto_class = "AutoConfig"
+
+    def __init__(
+        self,
+        vocab_size=103168,
+        hidden_size=4096,
+        intermediate_size=11008,
+        num_hidden_layers=32,
+        num_attention_heads=32,
+        hidden_act="silu",
+        max_position_embeddings=2048,
+        initializer_range=0.02,
+        rms_norm_eps=1e-6,
+        use_cache=True,
+        pad_token_id=0,
+        bos_token_id=1,
+        eos_token_id=2,
+        tie_word_embeddings=False,
+        bias=True,
+        **kwargs,
+    ):
+        self.vocab_size = vocab_size
+        self.max_position_embeddings = max_position_embeddings
+        self.hidden_size = hidden_size
+        self.intermediate_size = intermediate_size
+        self.num_hidden_layers = num_hidden_layers
+        self.num_attention_heads = num_attention_heads
+        self.hidden_act = hidden_act
+        self.initializer_range = initializer_range
+        self.rms_norm_eps = rms_norm_eps
+        self.use_cache = use_cache
+        self.bias = bias
+        super().__init__(
+            pad_token_id=pad_token_id,
+            bos_token_id=bos_token_id,
+            eos_token_id=eos_token_id,
+            tie_word_embeddings=tie_word_embeddings,
+            **kwargs,
+        )
diff --git a/triton_models/tokenizer/generation_config.json b/triton_models/tokenizer/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..d91279883b380cc7513492e8dfb095e65f7a58af
--- /dev/null
+++ b/triton_models/tokenizer/generation_config.json
@@ -0,0 +1,7 @@
+{
+  "_from_model_config": true,
+  "bos_token_id": 1,
+  "eos_token_id": 2,
+  "pad_token_id": 0,
+  "transformers_version": "4.33.1"
+}
diff --git a/triton_models/tokenizer/modeling_internlm.py b/triton_models/tokenizer/modeling_internlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9078b2b0579f6c319c0536448b71ad07eb71f70
--- /dev/null
+++ b/triton_models/tokenizer/modeling_internlm.py
@@ -0,0 +1,966 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch InternLM model."""
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from transformers.activations import ACT2FN
+from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
+from transformers.modeling_utils import PreTrainedModel
+from transformers.generation.streamers import BaseStreamer
+from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_internlm import InternLMConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "InternLMConfig"
+
+# Copied from transformers.models.bart.modeling_bart._make_causal_mask
+def _make_causal_mask(
+    input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
+):
+    """
+    Make causal mask used for bi-directional self-attention.
+    """
+    bsz, tgt_len = input_ids_shape
+    mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
+    mask_cond = torch.arange(mask.size(-1), device=device)
+    mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
+    mask = mask.to(dtype)
+
+    if past_key_values_length > 0:
+        mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
+    return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
+
+
+# Copied from transformers.models.bart.modeling_bart._expand_mask
+def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
+    """
+    Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+    """
+    bsz, src_len = mask.size()
+    tgt_len = tgt_len if tgt_len is not None else src_len
+
+    expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
+
+    inverted_mask = 1.0 - expanded_mask
+
+    return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
+
+
+class InternLMRMSNorm(nn.Module):
+    def __init__(self, hidden_size, eps=1e-6):
+        """
+        InternLMRMSNorm is equivalent to T5LayerNorm
+        """
+        super().__init__()
+        self.weight = nn.Parameter(torch.ones(hidden_size))
+        self.variance_epsilon = eps
+
+    def forward(self, hidden_states):
+        variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
+        hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+        # convert into half-precision if necessary
+        if self.weight.dtype in [torch.float16, torch.bfloat16]:
+            hidden_states = hidden_states.to(self.weight.dtype)
+
+        return self.weight * hidden_states
+
+
+class InternLMRotaryEmbedding(torch.nn.Module):
+    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+        super().__init__()
+        inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
+        self.register_buffer("inv_freq", inv_freq)
+
+        # Build here to make `torch.jit.trace` work.
+        self.max_seq_len_cached = max_position_embeddings
+        t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
+        freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+        # Different from paper, but it uses a different permutation in order to obtain the same calculation
+        emb = torch.cat((freqs, freqs), dim=-1)
+        self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
+        self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
+
+    def forward(self, x, seq_len=None):
+        # x: [bs, num_attention_heads, seq_len, head_size]
+        # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
+        if seq_len > self.max_seq_len_cached:
+            self.max_seq_len_cached = seq_len
+            t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
+            freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+            # Different from paper, but it uses a different permutation in order to obtain the same calculation
+            emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
+            self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
+            self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
+        return (
+            self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
+            self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
+        )
+
+
+def rotate_half(x):
+    """Rotates half the hidden dims of the input."""
+    x1 = x[..., : x.shape[-1] // 2]
+    x2 = x[..., x.shape[-1] // 2 :]
+    return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
+    # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
+    cos = cos.squeeze(1).squeeze(0)  # [seq_len, dim]
+    sin = sin.squeeze(1).squeeze(0)  # [seq_len, dim]
+    cos = cos[position_ids].unsqueeze(1)  # [bs, 1, seq_len, dim]
+    sin = sin[position_ids].unsqueeze(1)  # [bs, 1, seq_len, dim]
+    q_embed = (q * cos) + (rotate_half(q) * sin)
+    k_embed = (k * cos) + (rotate_half(k) * sin)
+    return q_embed, k_embed
+
+
+class InternLMMLP(nn.Module):
+    def __init__(
+        self,
+        hidden_size: int,
+        intermediate_size: int,
+        hidden_act: str,
+    ):
+        super().__init__()
+        self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
+        self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
+        self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
+        self.act_fn = ACT2FN[hidden_act]
+
+    def forward(self, x):
+        return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+
+class InternLMAttention(nn.Module):
+    """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+    def __init__(self, config: InternLMConfig):
+        super().__init__()
+        self.config = config
+        self.hidden_size = config.hidden_size
+        self.num_heads = config.num_attention_heads
+        self.head_dim = self.hidden_size // self.num_heads
+        self.max_position_embeddings = config.max_position_embeddings
+
+        if (self.head_dim * self.num_heads) != self.hidden_size:
+            raise ValueError(
+                f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+                f" and `num_heads`: {self.num_heads})."
+            )
+        self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
+        self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
+        self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
+        self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
+        self.rotary_emb = InternLMRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
+
+    def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+        return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+    def forward(
+        self,
+        hidden_states: torch.Tensor,
+        attention_mask: Optional[torch.Tensor] = None,
+        position_ids: Optional[torch.LongTensor] = None,
+        past_key_value: Optional[Tuple[torch.Tensor]] = None,
+        output_attentions: bool = False,
+        use_cache: bool = False,
+    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+        bsz, q_len, _ = hidden_states.size()
+
+        query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+        key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+        value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+
+        kv_seq_len = key_states.shape[-2]
+        if past_key_value is not None:
+            kv_seq_len += past_key_value[0].shape[-2]
+        cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+        query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+        # [bsz, nh, t, hd]
+
+        if past_key_value is not None:
+            # reuse k, v, self_attention
+            key_states = torch.cat([past_key_value[0], key_states], dim=2)
+            value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+        past_key_value = (key_states, value_states) if use_cache else None
+
+        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+        if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+            raise ValueError(
+                f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
+                f" {attn_weights.size()}"
+            )
+
+        if attention_mask is not None:
+            if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+                raise ValueError(
+                    f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+                )
+            attn_weights = attn_weights + attention_mask
+            attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
+
+        # upcast attention to fp32
+        attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+        attn_output = torch.matmul(attn_weights, value_states)
+
+        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+            raise ValueError(
+                f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+                f" {attn_output.size()}"
+            )
+
+        attn_output = attn_output.transpose(1, 2)
+        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+        attn_output = self.o_proj(attn_output)
+
+        if not output_attentions:
+            attn_weights = None
+
+        return attn_output, attn_weights, past_key_value
+
+
+class InternLMDecoderLayer(nn.Module):
+    def __init__(self, config: InternLMConfig):
+        super().__init__()
+        self.hidden_size = config.hidden_size
+        self.self_attn = InternLMAttention(config=config)
+        self.mlp = InternLMMLP(
+            hidden_size=self.hidden_size,
+            intermediate_size=config.intermediate_size,
+            hidden_act=config.hidden_act,
+        )
+        self.input_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+        self.post_attention_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+    def forward(
+        self,
+        hidden_states: torch.Tensor,
+        attention_mask: Optional[torch.Tensor] = None,
+        position_ids: Optional[torch.LongTensor] = None,
+        past_key_value: Optional[Tuple[torch.Tensor]] = None,
+        output_attentions: Optional[bool] = False,
+        use_cache: Optional[bool] = False,
+    ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+        """
+        Args:
+            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+            output_attentions (`bool`, *optional*):
+                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+                returned tensors for more detail.
+            use_cache (`bool`, *optional*):
+                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+                (see `past_key_values`).
+            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+        """
+
+        residual = hidden_states
+
+        hidden_states = self.input_layernorm(hidden_states)
+
+        # Self Attention
+        hidden_states, self_attn_weights, present_key_value = self.self_attn(
+            hidden_states=hidden_states,
+            attention_mask=attention_mask,
+            position_ids=position_ids,
+            past_key_value=past_key_value,
+            output_attentions=output_attentions,
+            use_cache=use_cache,
+        )
+        hidden_states = residual + hidden_states
+
+        # Fully Connected
+        residual = hidden_states
+        hidden_states = self.post_attention_layernorm(hidden_states)
+        hidden_states = self.mlp(hidden_states)
+        hidden_states = residual + hidden_states
+
+        outputs = (hidden_states,)
+
+        if output_attentions:
+            outputs += (self_attn_weights,)
+
+        if use_cache:
+            outputs += (present_key_value,)
+
+        return outputs
+
+
+INTERNLM_START_DOCSTRING = r"""
+    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+    etc.)
+
+    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+    and behavior.
+
+    Parameters:
+        config ([`InternLMConfig`]):
+            Model configuration class with all the parameters of the model. Initializing with a config file does not
+            load the weights associated with the model, only the configuration. Check out the
+            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+    "The bare InternLM Model outputting raw hidden-states without any specific head on top.",
+    INTERNLM_START_DOCSTRING,
+)
+class InternLMPreTrainedModel(PreTrainedModel):
+    config_class = InternLMConfig
+    base_model_prefix = "model"
+    supports_gradient_checkpointing = True
+    _no_split_modules = ["InternLMDecoderLayer"]
+    _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
+
+    def _init_weights(self, module):
+        std = self.config.initializer_range
+        if isinstance(module, nn.Linear):
+            module.weight.data.normal_(mean=0.0, std=std)
+            if module.bias is not None:
+                module.bias.data.zero_()
+        elif isinstance(module, nn.Embedding):
+            module.weight.data.normal_(mean=0.0, std=std)
+            if module.padding_idx is not None:
+                module.weight.data[module.padding_idx].zero_()
+
+    def _set_gradient_checkpointing(self, module, value=False):
+        if isinstance(module, InternLMModel):
+            module.gradient_checkpointing = value
+
+
+INTERNLM_INPUTS_DOCSTRING = r"""
+    Args:
+        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+            it.
+
+            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+            [`PreTrainedTokenizer.__call__`] for details.
+
+            [What are input IDs?](../glossary#input-ids)
+        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+            - 1 for tokens that are **not masked**,
+            - 0 for tokens that are **masked**.
+
+            [What are attention masks?](../glossary#attention-mask)
+
+            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+            [`PreTrainedTokenizer.__call__`] for details.
+
+            If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+            `past_key_values`).
+
+            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+            information on the default strategy.
+
+            - 1 indicates the head is **not masked**,
+            - 0 indicates the head is **masked**.
+        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+            config.n_positions - 1]`.
+
+            [What are position IDs?](../glossary#position-ids)
+        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+            `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+            Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+            blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+            model's internal embedding lookup matrix.
+        use_cache (`bool`, *optional*):
+            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+            `past_key_values`).
+        output_attentions (`bool`, *optional*):
+            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+            tensors for more detail.
+        output_hidden_states (`bool`, *optional*):
+            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+            more detail.
+        return_dict (`bool`, *optional*):
+            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+    "The bare InternLM Model outputting raw hidden-states without any specific head on top.",
+    INTERNLM_START_DOCSTRING,
+)
+class InternLMModel(InternLMPreTrainedModel):
+    """
+    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLMDecoderLayer`]
+
+    Args:
+        config: InternLMConfig
+    """
+    _auto_class = "AutoModel"
+
+    def __init__(self, config: InternLMConfig):
+        super().__init__(config)
+        self.padding_idx = config.pad_token_id
+        self.vocab_size = config.vocab_size
+
+        self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+        self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)])
+        self.norm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+        self.gradient_checkpointing = False
+        # Initialize weights and apply final processing
+        self.post_init()
+
+    def get_input_embeddings(self):
+        return self.embed_tokens
+
+    def set_input_embeddings(self, value):
+        self.embed_tokens = value
+
+    # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
+    def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
+        # create causal mask
+        # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+        combined_attention_mask = None
+        if input_shape[-1] > 1:
+            combined_attention_mask = _make_causal_mask(
+                input_shape,
+                inputs_embeds.dtype,
+                device=inputs_embeds.device,
+                past_key_values_length=past_key_values_length,
+            )
+
+        if attention_mask is not None:
+            # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+            expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
+                inputs_embeds.device
+            )
+            combined_attention_mask = (
+                expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
+            )
+
+        return combined_attention_mask
+
+    @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
+    def forward(
+        self,
+        input_ids: torch.LongTensor = None,
+        attention_mask: Optional[torch.Tensor] = None,
+        position_ids: Optional[torch.LongTensor] = None,
+        past_key_values: Optional[List[torch.FloatTensor]] = None,
+        inputs_embeds: Optional[torch.FloatTensor] = None,
+        use_cache: Optional[bool] = None,
+        output_attentions: Optional[bool] = None,
+        output_hidden_states: Optional[bool] = None,
+        return_dict: Optional[bool] = None,
+    ) -> Union[Tuple, BaseModelOutputWithPast]:
+        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+        output_hidden_states = (
+            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+        )
+        use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+        # retrieve input_ids and inputs_embeds
+        if input_ids is not None and inputs_embeds is not None:
+            raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+        elif input_ids is not None:
+            batch_size, seq_length = input_ids.shape
+        elif inputs_embeds is not None:
+            batch_size, seq_length, _ = inputs_embeds.shape
+        else:
+            raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+        seq_length_with_past = seq_length
+        past_key_values_length = 0
+
+        if past_key_values is not None:
+            past_key_values_length = past_key_values[0][0].shape[2]
+            seq_length_with_past = seq_length_with_past + past_key_values_length
+
+        if position_ids is None:
+            device = input_ids.device if input_ids is not None else inputs_embeds.device
+            position_ids = torch.arange(
+                past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+            )
+            position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
+        else:
+            position_ids = position_ids.view(-1, seq_length).long()
+
+        if inputs_embeds is None:
+            inputs_embeds = self.embed_tokens(input_ids)
+        # embed positions
+        if attention_mask is None:
+            attention_mask = torch.ones(
+                (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
+            )
+        attention_mask = self._prepare_decoder_attention_mask(
+            attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
+        )
+
+        hidden_states = inputs_embeds
+
+        if self.gradient_checkpointing and self.training:
+            if use_cache:
+                logger.warning_once(
+                    "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+                )
+                use_cache = False
+
+        # decoder layers
+        all_hidden_states = () if output_hidden_states else None
+        all_self_attns = () if output_attentions else None
+        next_decoder_cache = () if use_cache else None
+
+        for idx, decoder_layer in enumerate(self.layers):
+            if output_hidden_states:
+                all_hidden_states += (hidden_states,)
+
+            past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+            if self.gradient_checkpointing and self.training:
+
+                def create_custom_forward(module):
+                    def custom_forward(*inputs):
+                        # None for past_key_value
+                        return module(*inputs, output_attentions, None)
+
+                    return custom_forward
+
+                layer_outputs = torch.utils.checkpoint.checkpoint(
+                    create_custom_forward(decoder_layer),
+                    hidden_states,
+                    attention_mask,
+                    position_ids,
+                    None,
+                )
+            else:
+                layer_outputs = decoder_layer(
+                    hidden_states,
+                    attention_mask=attention_mask,
+                    position_ids=position_ids,
+                    past_key_value=past_key_value,
+                    output_attentions=output_attentions,
+                    use_cache=use_cache,
+                )
+
+            hidden_states = layer_outputs[0]
+
+            if use_cache:
+                next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
+
+            if output_attentions:
+                all_self_attns += (layer_outputs[1],)
+
+        hidden_states = self.norm(hidden_states)
+
+        # add hidden states from the last decoder layer
+        if output_hidden_states:
+            all_hidden_states += (hidden_states,)
+
+        next_cache = next_decoder_cache if use_cache else None
+        if not return_dict:
+            return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+        return BaseModelOutputWithPast(
+            last_hidden_state=hidden_states,
+            past_key_values=next_cache,
+            hidden_states=all_hidden_states,
+            attentions=all_self_attns,
+        )
+
+
+class InternLMForCausalLM(InternLMPreTrainedModel):
+    _auto_class = "AutoModelForCausalLM"
+
+    def __init__(self, config):
+        super().__init__(config)
+        self.model = InternLMModel(config)
+
+        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+        # Initialize weights and apply final processing
+        self.post_init()
+
+    def get_input_embeddings(self):
+        return self.model.embed_tokens
+
+    def set_input_embeddings(self, value):
+        self.model.embed_tokens = value
+
+    def get_output_embeddings(self):
+        return self.lm_head
+
+    def set_output_embeddings(self, new_embeddings):
+        self.lm_head = new_embeddings
+
+    def set_decoder(self, decoder):
+        self.model = decoder
+
+    def get_decoder(self):
+        return self.model
+
+    @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
+    @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+    def forward(
+        self,
+        input_ids: torch.LongTensor = None,
+        attention_mask: Optional[torch.Tensor] = None,
+        position_ids: Optional[torch.LongTensor] = None,
+        past_key_values: Optional[List[torch.FloatTensor]] = None,
+        inputs_embeds: Optional[torch.FloatTensor] = None,
+        labels: Optional[torch.LongTensor] = None,
+        use_cache: Optional[bool] = None,
+        output_attentions: Optional[bool] = None,
+        output_hidden_states: Optional[bool] = None,
+        return_dict: Optional[bool] = None,
+    ) -> Union[Tuple, CausalLMOutputWithPast]:
+        r"""
+        Args:
+            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+        Returns:
+
+        Example:
+
+        ```python
+        >>> from transformers import AutoTokenizer, InternLMForCausalLM
+
+        >>> model = InternLMForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
+        >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
+
+        >>> prompt = "Hey, are you consciours? Can you talk to me?"
+        >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+        >>> # Generate
+        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+        "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
+        ```"""
+
+        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+        output_hidden_states = (
+            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+        )
+        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+        # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+        outputs = self.model(
+            input_ids=input_ids,
+            attention_mask=attention_mask,
+            position_ids=position_ids,
+            past_key_values=past_key_values,
+            inputs_embeds=inputs_embeds,
+            use_cache=use_cache,
+            output_attentions=output_attentions,
+            output_hidden_states=output_hidden_states,
+            return_dict=return_dict,
+        )
+
+        hidden_states = outputs[0]
+        logits = self.lm_head(hidden_states)
+
+        loss = None
+        if labels is not None:
+            # Shift so that tokens < n predict n
+            shift_logits = logits[..., :-1, :].contiguous()
+            shift_labels = labels[..., 1:].contiguous()
+            # Flatten the tokens
+            loss_fct = CrossEntropyLoss()
+            shift_logits = shift_logits.view(-1, self.config.vocab_size)
+            shift_labels = shift_labels.view(-1)
+            # Enable model parallelism
+            shift_labels = shift_labels.to(shift_logits.device)
+            loss = loss_fct(shift_logits, shift_labels)
+
+        if not return_dict:
+            output = (logits,) + outputs[1:]
+            return (loss,) + output if loss is not None else output
+
+        return CausalLMOutputWithPast(
+            loss=loss,
+            logits=logits,
+            past_key_values=outputs.past_key_values,
+            hidden_states=outputs.hidden_states,
+            attentions=outputs.attentions,
+        )
+
+    def prepare_inputs_for_generation(
+        self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+    ):
+        if past_key_values:
+            input_ids = input_ids[:, -1:]
+
+        position_ids = kwargs.get("position_ids", None)
+        if attention_mask is not None and position_ids is None:
+            # create position_ids on the fly for batch generation
+            position_ids = attention_mask.long().cumsum(-1) - 1
+            position_ids.masked_fill_(attention_mask == 0, 1)
+            if past_key_values:
+                position_ids = position_ids[:, -1].unsqueeze(-1)
+
+        # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+        if inputs_embeds is not None and past_key_values is None:
+            model_inputs = {"inputs_embeds": inputs_embeds}
+        else:
+            model_inputs = {"input_ids": input_ids}
+
+        model_inputs.update(
+            {
+                "position_ids": position_ids,
+                "past_key_values": past_key_values,
+                "use_cache": kwargs.get("use_cache"),
+                "attention_mask": attention_mask,
+            }
+        )
+        return model_inputs
+
+    @staticmethod
+    def _reorder_cache(past_key_values, beam_idx):
+        reordered_past = ()
+        for layer_past in past_key_values:
+            reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
+        return reordered_past
+    
+    def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = []):
+        prompt = ""
+        for record in history:
+            prompt += f"""<|User|>:{record[0]}\n<|Bot|>:{record[1]}\n"""
+        if len(prompt) == 0:
+            prompt += ""
+        prompt += f"""<|User|>:{query}\n<|Bot|>:"""
+        return tokenizer([prompt], return_tensors="pt")
+    
+    @torch.no_grad()
+    def chat(self, 
+             tokenizer, 
+             query: str,
+             history: List[Tuple[str, str]] = [], 
+             streamer: Optional[BaseStreamer] = None,
+             max_new_tokens: int = 1024,
+             do_sample: bool = True,
+             temperature: float = 0.8,
+             top_p: float = 0.8,
+             eos_token_id = (2, 103028),
+             **kwargs):
+        inputs = self.build_inputs(tokenizer, query, history)
+        inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
+        outputs = self.generate(**inputs, 
+                                streamer=streamer, 
+                                max_new_tokens=max_new_tokens, 
+                                do_sample=do_sample, 
+                                temperature=temperature, 
+                                top_p=top_p, 
+                                eos_token_id=list(eos_token_id),
+                                **kwargs)
+        outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]):]
+        response = tokenizer.decode(outputs, skip_special_tokens=True)
+        response = response.split("")[0]
+        history = history + [(query, response)]
+        return response, history
+    
+    @torch.no_grad()
+    def stream_chat(self, 
+                    tokenizer,
+                    query: str,
+                    history: List[Tuple[str, str]] = [], 
+                    max_new_tokens: int = 1024,
+                    do_sample: bool = True,
+                    temperature: float = 0.8,
+                    top_p: float = 0.8,
+                    eos_token_id = (2, 103028),
+                    **kwargs):
+        class ChatStreamer(BaseStreamer):
+            def __init__(self, tokenizer) -> None:
+                super().__init__()
+                self.tokenizer = tokenizer
+                
+            def put(self, value):
+                if len(value.shape) > 1 and value.shape[0] > 1:
+                    raise ValueError("ChatStreamer only supports batch size 1")
+                elif len(value.shape) > 1:
+                    value = value[0]
+                token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
+                if token.strip() != "":
+                    print(token, end="")
+                
+            def end(self):
+                print("")
+            
+        return self.chat(
+            tokenizer=tokenizer,
+            query=query,
+            streamer=ChatStreamer(tokenizer=tokenizer),
+            history=history, 
+            max_new_tokens=max_new_tokens,
+            do_sample=do_sample,
+            temperature=temperature,
+            top_p=top_p,
+            eos_token_id=eos_token_id,
+            **kwargs
+        )
+                
+
+@add_start_docstrings(
+    """
+    The InternLM Model transformer with a sequence classification head on top (linear layer).
+
+    [`InternLMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+    (e.g. GPT-2) do.
+
+    Since it does classification on the last token, it requires to know the position of the last token. If a
+    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+    each row of the batch).
+    """,
+    INTERNLM_START_DOCSTRING,
+)
+class InternLMForSequenceClassification(InternLMPreTrainedModel):
+    _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
+
+    def __init__(self, config):
+        super().__init__(config)
+        self.num_labels = config.num_labels
+        self.model = InternLMModel(config)
+        self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+        # Initialize weights and apply final processing
+        self.post_init()
+
+    def get_input_embeddings(self):
+        return self.model.embed_tokens
+
+    def set_input_embeddings(self, value):
+        self.model.embed_tokens = value
+
+    @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
+    def forward(
+        self,
+        input_ids: torch.LongTensor = None,
+        attention_mask: Optional[torch.Tensor] = None,
+        position_ids: Optional[torch.LongTensor] = None,
+        past_key_values: Optional[List[torch.FloatTensor]] = None,
+        inputs_embeds: Optional[torch.FloatTensor] = None,
+        labels: Optional[torch.LongTensor] = None,
+        use_cache: Optional[bool] = None,
+        output_attentions: Optional[bool] = None,
+        output_hidden_states: Optional[bool] = None,
+        return_dict: Optional[bool] = None,
+    ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+        r"""
+        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+        """
+        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+        transformer_outputs = self.model(
+            input_ids,
+            attention_mask=attention_mask,
+            position_ids=position_ids,
+            past_key_values=past_key_values,
+            inputs_embeds=inputs_embeds,
+            use_cache=use_cache,
+            output_attentions=output_attentions,
+            output_hidden_states=output_hidden_states,
+            return_dict=return_dict,
+        )
+        hidden_states = transformer_outputs[0]
+        logits = self.score(hidden_states)
+
+        if input_ids is not None:
+            batch_size = input_ids.shape[0]
+        else:
+            batch_size = inputs_embeds.shape[0]
+
+        if self.config.pad_token_id is None and batch_size != 1:
+            raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+        if self.config.pad_token_id is None:
+            sequence_lengths = -1
+        else:
+            if input_ids is not None:
+                sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
+            else:
+                sequence_lengths = -1
+
+        pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+        loss = None
+        if labels is not None:
+            labels = labels.to(logits.device)
+            if self.config.problem_type is None:
+                if self.num_labels == 1:
+                    self.config.problem_type = "regression"
+                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+                    self.config.problem_type = "single_label_classification"
+                else:
+                    self.config.problem_type = "multi_label_classification"
+
+            if self.config.problem_type == "regression":
+                loss_fct = MSELoss()
+                if self.num_labels == 1:
+                    loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+                else:
+                    loss = loss_fct(pooled_logits, labels)
+            elif self.config.problem_type == "single_label_classification":
+                loss_fct = CrossEntropyLoss()
+                loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+            elif self.config.problem_type == "multi_label_classification":
+                loss_fct = BCEWithLogitsLoss()
+                loss = loss_fct(pooled_logits, labels)
+        if not return_dict:
+            output = (pooled_logits,) + transformer_outputs[1:]
+            return ((loss,) + output) if loss is not None else output
+
+        return SequenceClassifierOutputWithPast(
+            loss=loss,
+            logits=pooled_logits,
+            past_key_values=transformer_outputs.past_key_values,
+            hidden_states=transformer_outputs.hidden_states,
+            attentions=transformer_outputs.attentions,
+        )
diff --git a/triton_models/tokenizer/placeholder b/triton_models/tokenizer/placeholder
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/triton_models/tokenizer/special_tokens_map.json b/triton_models/tokenizer/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..9bfed7513d3b1b65445af10c4571256f4a19b290
--- /dev/null
+++ b/triton_models/tokenizer/special_tokens_map.json
@@ -0,0 +1,6 @@
+{
+  "bos_token": "",
+  "eos_token": "",
+  "pad_token": "",
+  "unk_token": ""
+}
diff --git a/triton_models/tokenizer/tokenization_internlm.py b/triton_models/tokenizer/tokenization_internlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6a348959c94afcc41f02caacf47a8bf23078dca
--- /dev/null
+++ b/triton_models/tokenizer/tokenization_internlm.py
@@ -0,0 +1,242 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tokenization classes for IntermLM."""
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from transformers.tokenization_utils import PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {}
+
+
+class InternLMTokenizer(PreTrainedTokenizer):
+    """
+    Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+    Args:
+        vocab_file (`str`):
+            Path to the vocabulary file.
+    """
+
+    vocab_files_names = VOCAB_FILES_NAMES
+    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+    model_input_names = ["input_ids", "attention_mask"]
+    _auto_class = "AutoTokenizer"
+
+    def __init__(
+        self,
+        vocab_file,
+        unk_token="",
+        bos_token="",
+        eos_token="",
+        pad_token="",
+        sp_model_kwargs: Optional[Dict[str, Any]] = None,
+        add_bos_token=True,
+        add_eos_token=False,
+        decode_with_prefix_space=False,
+        clean_up_tokenization_spaces=False,
+        **kwargs,
+    ):
+        self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+        super().__init__(
+            bos_token=bos_token,
+            eos_token=eos_token,
+            unk_token=unk_token,
+            pad_token=pad_token,
+            clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+            **kwargs,
+        )
+        self.vocab_file = vocab_file
+        self.add_bos_token = add_bos_token
+        self.add_eos_token = add_eos_token
+        self.decode_with_prefix_space = decode_with_prefix_space
+        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+        self.sp_model.Load(vocab_file)
+        self._no_prefix_space_tokens = None
+
+        """ Initialisation"""
+
+    @property
+    def no_prefix_space_tokens(self):
+        if self._no_prefix_space_tokens is None:
+            vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
+            self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
+        return self._no_prefix_space_tokens
+
+    @property
+    def vocab_size(self):
+        """Returns vocab size"""
+        return self.sp_model.get_piece_size()
+
+    @property
+    def bos_token_id(self) -> Optional[int]:
+        return self.sp_model.bos_id()
+
+    @property
+    def eos_token_id(self) -> Optional[int]:
+        return self.sp_model.eos_id()
+
+    def get_vocab(self):
+        """Returns vocab as a dict"""
+        vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+        vocab.update(self.added_tokens_encoder)
+        return vocab
+
+    def _tokenize(self, text):
+        """Returns a tokenized string."""
+        return self.sp_model.encode(text, out_type=str)
+
+    def _convert_token_to_id(self, token):
+        """Converts a token (str) in an id using the vocab."""
+        return self.sp_model.piece_to_id(token)
+
+    def _convert_id_to_token(self, index):
+        """Converts an index (integer) in a token (str) using the vocab."""
+        token = self.sp_model.IdToPiece(index)
+        return token
+
+    def _maybe_add_prefix_space(self, tokens, decoded):
+        if tokens and tokens[0] not in self.no_prefix_space_tokens:
+            return " " + decoded
+        else:
+            return decoded
+
+    def convert_tokens_to_string(self, tokens):
+        """Converts a sequence of tokens (string) in a single string."""
+        current_sub_tokens = []
+        out_string = ""
+        prev_is_special = False
+        for token in tokens:
+            # make sure that special tokens are not decoded using sentencepiece model
+            if token in self.all_special_tokens:
+                if not prev_is_special:
+                    out_string += " "
+                out_string += self.sp_model.decode(current_sub_tokens) + token
+                prev_is_special = True
+                current_sub_tokens = []
+            else:
+                current_sub_tokens.append(token)
+                prev_is_special = False
+        out_string += self.sp_model.decode(current_sub_tokens)
+        out_string = self.clean_up_tokenization(out_string)
+        out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
+        return out_string[1:]
+
+    def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
+        """
+        Save the vocabulary and special tokens file to a directory.
+
+        Args:
+            save_directory (`str`):
+                The directory in which to save the vocabulary.
+
+        Returns:
+            `Tuple(str)`: Paths to the files saved.
+        """
+        if not os.path.isdir(save_directory):
+            logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+            return
+        out_vocab_file = os.path.join(
+            save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+        )
+
+        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+            copyfile(self.vocab_file, out_vocab_file)
+        elif not os.path.isfile(self.vocab_file):
+            with open(out_vocab_file, "wb") as fi:
+                content_spiece_model = self.sp_model.serialized_model_proto()
+                fi.write(content_spiece_model)
+
+        return (out_vocab_file,)
+
+    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+        if self.add_bos_token:
+            bos_token_ids = [self.bos_token_id]
+        else:
+            bos_token_ids = []
+
+        output = bos_token_ids + token_ids_0
+
+        if token_ids_1 is not None:
+            output = output + token_ids_1
+
+        if self.add_eos_token:
+            output = output + [self.eos_token_id]
+
+        return output
+
+    def get_special_tokens_mask(
+        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+    ) -> List[int]:
+        """
+        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+        special tokens using the tokenizer `prepare_for_model` method.
+
+        Args:
+            token_ids_0 (`List[int]`):
+                List of IDs.
+            token_ids_1 (`List[int]`, *optional*):
+                Optional second list of IDs for sequence pairs.
+            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+                Whether or not the token list is already formatted with special tokens for the model.
+
+        Returns:
+            `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+        """
+        if already_has_special_tokens:
+            return super().get_special_tokens_mask(
+                token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+            )
+
+        if token_ids_1 is None:
+            return [1] + ([0] * len(token_ids_0)) + [1]
+        return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+    def create_token_type_ids_from_sequences(
+        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+    ) -> List[int]:
+        """
+        Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
+        use of token type ids, therefore a list of zeros is returned.
+
+        Args:
+            token_ids_0 (`List[int]`):
+                List of IDs.
+            token_ids_1 (`List[int]`, *optional*):
+                Optional second list of IDs for sequence pairs.
+
+        Returns:
+            `List[int]`: List of zeros.
+        """
+        eos = [self.eos_token_id]
+
+        if token_ids_1 is None:
+            return len(token_ids_0 + eos) * [0]
+        return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
\ No newline at end of file
diff --git a/triton_models/tokenizer/tokenizer.model b/triton_models/tokenizer/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..24f4d0607b1f6a966a5d653bb255813638de0bec
--- /dev/null
+++ b/triton_models/tokenizer/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aab622d98c98677a1a51f969e25765154487bf3e85c7819db105db2fcacba83f
+size 1658691
diff --git a/triton_models/tokenizer/tokenizer.py b/triton_models/tokenizer/tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..98db9c2b61d0b7cd7ae48eacbf29abcf80148af7
--- /dev/null
+++ b/triton_models/tokenizer/tokenizer.py
@@ -0,0 +1,290 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import json
+import os.path as osp
+from typing import Optional, Sequence, Union
+
+import torch
+
+
+class SentencePieceTokenizer:
+    """Tokenizer of sentencepiece.
+
+    Args:
+        model_file (str): the path of the tokenizer model
+    """
+
+    def __init__(self, model_file: str):
+        from sentencepiece import SentencePieceProcessor
+        self.model = SentencePieceProcessor(model_file=model_file)
+        self._no_prefix_space_tokens = None
+
+    @property
+    def vocab_size(self):
+        """vocabulary size."""
+        return self.model.vocab_size()
+
+    @property
+    def bos_token_id(self):
+        """begine of the sentence token id."""
+        return self.model.bos_id()
+
+    @property
+    def eos_token_id(self):
+        """end of the sentence token id."""
+        return self.model.eos_id()
+
+    @property
+    def no_prefix_space_tokens(self):
+        """tokens without prefix space."""
+        if self._no_prefix_space_tokens is None:
+            vocab = self.model.IdToPiece(list(range(self.vocab_size)))
+            self._no_prefix_space_tokens = {
+                i
+                for i, tok in enumerate(vocab) if not tok.startswith('▁')
+            }
+        return self._no_prefix_space_tokens
+
+    def _maybe_add_prefix_space(self, tokens, decoded):
+        """maybe add prefix space for incremental decoding."""
+        if len(tokens) and tokens[0] not in self.no_prefix_space_tokens:
+            return ' ' + decoded
+        else:
+            return decoded
+
+    def encode(self, s: str):
+        """Tokenize a prompt.
+
+        Args:
+            s (str): a prompt
+        Returns:
+            list[int]: token ids
+        """
+        add_bos = False
+        add_eos = False
+        if s.find('') != -1:
+            s = s.replace('', '')
+            add_bos = True
+        if s == '':
+            s = ''
+            add_eos = True
+        return self.model.Encode(s, add_bos=add_bos, add_eos=add_eos)
+
+    def decode(self, t: Sequence[int], offset: Optional[int] = None):
+        """De-tokenize.
+
+        Args:
+            t (List[int]): a list of token ids
+            offset (int): for incrementally decoding. Default to None, which
+                means not applied.
+        Returns:
+            str: text of decoding tokens
+        """
+        if isinstance(t, torch.Tensor):
+            t = t.tolist()
+        t = t[offset:]
+        out_string = self.model.Decode(t)
+        if offset:
+            out_string = self._maybe_add_prefix_space(t, out_string)
+        return out_string
+
+    def __call__(self, s: Union[str, Sequence[str]]):
+        """Tokenize prompts.
+
+        Args:
+            s (str): prompts
+        Returns:
+            list[int]: token ids
+        """
+        import addict
+        add_bos = False
+        add_eos = False
+
+        input_ids = self.model.Encode(s, add_bos=add_bos, add_eos=add_eos)
+        return addict.Addict(input_ids=input_ids)
+
+
+class HuggingFaceTokenizer:
+    """Tokenizer of sentencepiece.
+
+    Args:
+        model_dir (str): the directory of the tokenizer model
+    """
+
+    def __init__(self, model_dir: str):
+        from transformers import (AutoTokenizer, CodeLlamaTokenizerFast,
+                                  LlamaTokenizerFast)
+        model_file = osp.join(model_dir, 'tokenizer.model')
+        backend_tokenizer_file = osp.join(model_dir, 'tokenizer.json')
+        model_file_exists = osp.exists(model_file)
+        if not osp.exists(backend_tokenizer_file) and model_file_exists:
+            print('WARNING: Can not find tokenizer.json. '
+                  'It may take long time to initialize the tokenizer.')
+        self.model = AutoTokenizer.from_pretrained(model_dir,
+                                                   trust_remote_code=True)
+        self.need_padding = isinstance(self.model, LlamaTokenizerFast) \
+            or isinstance(self.model, CodeLlamaTokenizerFast)
+        self._no_prefix_space_tokens = None
+        # save tokenizer.json to reuse
+        if not osp.exists(backend_tokenizer_file) and model_file_exists:
+            if hasattr(self.model, 'backend_tokenizer'):
+                self.model.backend_tokenizer.save(backend_tokenizer_file)
+
+        if self.model.eos_token_id is None:
+            generation_config_file = osp.join(model_dir,
+                                              'generation_config.json')
+            with open(generation_config_file, 'r') as f:
+                cfg = json.load(f)
+                self.model.eos_token_id = cfg['eos_token_id']
+
+    @property
+    def vocab_size(self):
+        """vocabulary size."""
+        return self.model.vocab_size
+
+    @property
+    def bos_token_id(self):
+        """begine of the sentence token id."""
+        return self.model.bos_token_id
+
+    @property
+    def eos_token_id(self):
+        """end of the sentence token id."""
+        return self.model.eos_token_id
+
+    @property
+    def no_prefix_space_tokens(self):
+        """tokens without prefix space."""
+        if self._no_prefix_space_tokens is None:
+            vocab = self.model.convert_ids_to_tokens(
+                list(range(self.vocab_size)))
+            self._no_prefix_space_tokens = {
+                i
+                for i, tok in enumerate(vocab) if not tok.startswith('▁')
+            }
+        return self._no_prefix_space_tokens
+
+    def _maybe_add_prefix_space(self, tokens, decoded):
+        """maybe add prefix space for incremental decoding."""
+        if self.need_padding and len(
+                tokens) and tokens[0] not in self.no_prefix_space_tokens:
+            return ' ' + decoded
+        else:
+            return decoded
+
+    def encode(self, s: str):
+        """Tokenize a prompt.
+
+        Args:
+            s (str): a prompt
+        Returns:
+            list[int]: token ids
+        """
+        add_special_tokens = False
+        if s.find('') != -1:
+            s = s.replace('', '')
+        if s == '':
+            s = ''
+        if len(s) == 0:
+            add_special_tokens = True
+        return self.model.encode(s, add_special_tokens=add_special_tokens)
+
+    def decode(self, t: Sequence[int], offset: Optional[int] = None):
+        """De-tokenize.
+
+        Args:
+            t (List[int]): a list of token ids
+            offset (int): for incrementally decoding. Default to None, which
+                means not applied.
+        Returns:
+            str: text of decoding tokens
+        """
+        skip_special_tokens = True
+        t = t[offset:]
+        out_string = self.model.decode(t,
+                                       skip_special_tokens=skip_special_tokens)
+        if offset:
+            out_string = self._maybe_add_prefix_space(t, out_string)
+        return out_string
+
+    def __call__(self, s: Union[str, Sequence[str]]):
+        """Tokenize prompts.
+
+        Args:
+            s (str): prompts
+        Returns:
+            list[int]: token ids
+        """
+        add_special_tokens = False
+        return self.model(s, add_special_tokens=add_special_tokens)
+
+
+class Tokenizer:
+    """Tokenize prompts or de-tokenize tokens into texts.
+
+    Args:
+        model_file (str): the path of the tokenizer model
+    """
+
+    def __init__(self, model_file: str):
+        if model_file.endswith('.model'):
+            model_folder = osp.split(model_file)[0]
+        else:
+            model_folder = model_file
+            model_file = osp.join(model_folder, 'tokenizer.model')
+        tokenizer_config_file = osp.join(model_folder, 'tokenizer_config.json')
+
+        model_file_exists = osp.exists(model_file)
+        config_exists = osp.exists(tokenizer_config_file)
+        use_hf_model = config_exists or not model_file_exists
+
+        if not use_hf_model:
+            self.model = SentencePieceTokenizer(model_file)
+        else:
+            self.model = HuggingFaceTokenizer(model_folder)
+
+    @property
+    def vocab_size(self):
+        """vocabulary size."""
+        return self.model.vocab_size
+
+    @property
+    def bos_token_id(self):
+        """begine of the sentence token id."""
+        return self.model.bos_token_id
+
+    @property
+    def eos_token_id(self):
+        """end of the sentence token id."""
+        return self.model.eos_token_id
+
+    def encode(self, s: str):
+        """Tokenize a prompt.
+
+        Args:
+            s (str): a prompt
+        Returns:
+            list[int]: token ids
+        """
+        return self.model.encode(s)
+
+    def decode(self, t: Sequence[int], offset: Optional[int] = None):
+        """De-tokenize.
+
+        Args:
+            t (List[int]): a list of token ids
+            offset (int): for incrementally decoding. Default to None, which
+                means not applied.
+        Returns:
+            str: text of decoding tokens
+        """
+        return self.model.decode(t, offset)
+
+    def __call__(self, s: Union[str, Sequence[str]]):
+        """Tokenize prompts.
+
+        Args:
+            s (str): prompts
+        Returns:
+            list[int]: token ids
+        """
+        return self.model(s)
diff --git a/triton_models/tokenizer/tokenizer_config.json b/triton_models/tokenizer/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..857ab9eccafd9682a491c525f5ebdc206c607de7
--- /dev/null
+++ b/triton_models/tokenizer/tokenizer_config.json
@@ -0,0 +1,15 @@
+{
+  "auto_map": {
+    "AutoTokenizer": [
+      "tokenization_internlm.InternLMTokenizer",
+      null
+    ]
+  },
+  "bos_token": "",
+  "clean_up_tokenization_spaces": false,
+  "eos_token": "",
+  "model_max_length": 1000000000000000019884624838656,
+  "pad_token": "",
+  "tokenizer_class": "InternLMTokenizer",
+  "unk_token": ""
+}
diff --git a/triton_models/weights/layers.0.attention.w_qkv.0.qweight b/triton_models/weights/layers.0.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..a6d8bd363881a6c595c8b393a941c5401bc366e5
--- /dev/null
+++ b/triton_models/weights/layers.0.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3827a6ab0149c9e641de42919a51a1f208586eb7a27260eb345107bf7c7c411a
+size 25165824
diff --git a/triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..a96ae4d6882b3c443e34d0bf664b2fee54adeff2
--- /dev/null
+++ b/triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8cd215d34b2aca208dc6ee86a4df064ed7fdf3ff7c52079262f7f23cb3aa30b9
+size 1572864
diff --git a/triton_models/weights/layers.0.attention.wo.0.bias b/triton_models/weights/layers.0.attention.wo.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..a8f74ac86d4a1e1fd9bd3e54faa404171841b9ef
Binary files /dev/null and b/triton_models/weights/layers.0.attention.wo.0.bias differ
diff --git a/triton_models/weights/layers.0.attention.wo.0.qweight b/triton_models/weights/layers.0.attention.wo.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..d44eeb4889f12bfab6aaa2a5d08b0d8ee6fee0d9
--- /dev/null
+++ b/triton_models/weights/layers.0.attention.wo.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c23a5d415c07c7c0dd13800e41becec9010f42c7d031e7095afd0031f5ad906a
+size 8388608
diff --git a/triton_models/weights/layers.0.attention.wo.0.scales_zeros b/triton_models/weights/layers.0.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..d92a9d329b28546b8c83e4ae935c76eb364bfb02
Binary files /dev/null and b/triton_models/weights/layers.0.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.0.ffn_norm.weight b/triton_models/weights/layers.0.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..2574ebc3a31fa347963695b858854f3be3d3ecb7
Binary files /dev/null and b/triton_models/weights/layers.0.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.1.attention.w_qkv.0.qweight b/triton_models/weights/layers.1.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..a0e1fed4ce82bd998222a061b52e85bcd3ae9e87
--- /dev/null
+++ b/triton_models/weights/layers.1.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50f824cf40b3ca3adc59b6ebcd218a953c1a84d0386025e4dd1fd29908b24e96
+size 25165824
diff --git a/triton_models/weights/layers.1.attention.wo.0.scales_zeros b/triton_models/weights/layers.1.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..d284d353463e21faadc3e513b4c48f6195f10965
Binary files /dev/null and b/triton_models/weights/layers.1.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..695306475c1363b7a122a0db31641064d0557241
--- /dev/null
+++ b/triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:70e37d60fbc0641121371b2eaa517b2ab00257a70c8fc36494b444fc90bcd1f0
+size 1409024
diff --git a/triton_models/weights/layers.1.ffn_norm.weight b/triton_models/weights/layers.1.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..74da0d454241fd3270e6d382a2db41e4e31bc580
Binary files /dev/null and b/triton_models/weights/layers.1.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..a960b90a3417ed2933e64761ea17c09ffe88d59d
--- /dev/null
+++ b/triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:077aa0912226ff7fb9acc7c4b40b4d13725861e23ec09cb231b776e57e2d678e
+size 1572864
diff --git a/triton_models/weights/layers.10.attention.wo.0.scales_zeros b/triton_models/weights/layers.10.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..838304f3c947a1a2fb56a90109184613fdfd3728
Binary files /dev/null and b/triton_models/weights/layers.10.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.10.feed_forward.w13.0.qweight b/triton_models/weights/layers.10.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..976285dd4515d5fb08f60331f3b522a89fc79228
--- /dev/null
+++ b/triton_models/weights/layers.10.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8821f0c6176d9ec59cdeb94793cd0160383e04a6279558cdf3b8b94d87539c56
+size 45088768
diff --git a/triton_models/weights/layers.11.attention.wo.0.scales_zeros b/triton_models/weights/layers.11.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..d6a89ed23b6f93df7a9303cbf737cb280fb240bc
Binary files /dev/null and b/triton_models/weights/layers.11.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.11.feed_forward.w13.0.qweight b/triton_models/weights/layers.11.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..f2b17f0679973934ca0eea44c4b3897145d8f7ba
--- /dev/null
+++ b/triton_models/weights/layers.11.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:acbf7a75466069cbdf40e474371e4b752d4225c915cc4da1e3064aca817c9b25
+size 45088768
diff --git a/triton_models/weights/layers.12.attention.w_qkv.0.bias b/triton_models/weights/layers.12.attention.w_qkv.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..b19e9b3162f3cb8d3e9af1d9c110666bd5fac28a
Binary files /dev/null and b/triton_models/weights/layers.12.attention.w_qkv.0.bias differ
diff --git a/triton_models/weights/layers.12.attention.wo.0.qweight b/triton_models/weights/layers.12.attention.wo.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..6487bc0a8c1a175fffc4a2044b9e1ff3cc9e302b
--- /dev/null
+++ b/triton_models/weights/layers.12.attention.wo.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85efcdf14534ce13cb5efeff1e29a7ffba9d9eb9d62f9aba733a7c46a5ed6c13
+size 8388608
diff --git a/triton_models/weights/layers.13.attention.w_qkv.0.bias b/triton_models/weights/layers.13.attention.w_qkv.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..9dcd14ca6e13f420968d5e09674336bd5eacee7c
Binary files /dev/null and b/triton_models/weights/layers.13.attention.w_qkv.0.bias differ
diff --git a/triton_models/weights/layers.13.attention.wo.0.scales_zeros b/triton_models/weights/layers.13.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..2180ce3bf966ce31040b63a28a6293d0867f3f08
Binary files /dev/null and b/triton_models/weights/layers.13.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.13.feed_forward.w13.0.qweight b/triton_models/weights/layers.13.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..4b7273946b129cc2f8905bddb7484cccb74829bb
--- /dev/null
+++ b/triton_models/weights/layers.13.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f87466cf9734d8896ba1f62405ec89e221432ac04fe6e4cc3cf947d9d4cdb008
+size 45088768
diff --git a/triton_models/weights/layers.13.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.13.feed_forward.w13.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..8584623ce7140a73e9a8d1d5d55fb3ff5d91371d
--- /dev/null
+++ b/triton_models/weights/layers.13.feed_forward.w13.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e381ca5b8410eb0f4e35baafa862a25bfbf6b4f7ac6d58535f1517dd6ed7ee56
+size 2818048
diff --git a/triton_models/weights/layers.13.feed_forward.w2.0.qweight b/triton_models/weights/layers.13.feed_forward.w2.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..b8ddacd17cbb0eb1a65d89a992d69a68c23a80a4
--- /dev/null
+++ b/triton_models/weights/layers.13.feed_forward.w2.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d86c606b9f58c9a1d1fe4c62166a848b7346451f109bc4f92110344c8f6e5e14
+size 22544384
diff --git a/triton_models/weights/layers.14.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.14.attention.w_qkv.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..ace973921b4467001504665660a950c414d266a5
--- /dev/null
+++ b/triton_models/weights/layers.14.attention.w_qkv.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb8979d01c3adb98f40baaa31c64aab65a2b2f20217418f40eeef8c9cbb479c2
+size 1572864
diff --git a/triton_models/weights/layers.14.attention.wo.0.qweight b/triton_models/weights/layers.14.attention.wo.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..30f16eb5f4423daf3781ca1139dc89c2e15cef2e
--- /dev/null
+++ b/triton_models/weights/layers.14.attention.wo.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ed60486dcf2b9a2f6353ffc3d9770fefcb7b1536d1a0d6bbe0e73f1314b509a
+size 8388608
diff --git a/triton_models/weights/layers.14.attention.wo.0.scales_zeros b/triton_models/weights/layers.14.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..48d9a6a1e531781d05cdd4ed2492901e739a7ab5
Binary files /dev/null and b/triton_models/weights/layers.14.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.14.feed_forward.w2.0.qweight b/triton_models/weights/layers.14.feed_forward.w2.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..7ff1668c319bb9f5acf45de07021544deacf6539
--- /dev/null
+++ b/triton_models/weights/layers.14.feed_forward.w2.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5283e8d9834135c3a91261f7f5a23a33b12bdcdfa4da64701855761d41b7e2f1
+size 22544384
diff --git a/triton_models/weights/layers.14.ffn_norm.weight b/triton_models/weights/layers.14.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..139999168dbb22af0fecf697336d5cea75b2b743
Binary files /dev/null and b/triton_models/weights/layers.14.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.15.attention.w_qkv.0.qweight b/triton_models/weights/layers.15.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..3fd8665d50dcc36a273dec492b19431fcc2bb21f
--- /dev/null
+++ b/triton_models/weights/layers.15.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9da979d36ca2cf6d7fa23886a477609f88e03dc6ae4b27689cc37c9aec2d9a0e
+size 25165824
diff --git a/triton_models/weights/layers.15.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.15.attention.w_qkv.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..269482a01f5f0aff47ed502048f9e188220670f1
--- /dev/null
+++ b/triton_models/weights/layers.15.attention.w_qkv.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:932a4c5302ddc985c3ad90e697ad2b2c554db1c99b6a72635c55c82f8b161d7d
+size 1572864
diff --git a/triton_models/weights/layers.15.attention_norm.weight b/triton_models/weights/layers.15.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..b420377c9f715f1cf442a609fbd4ed4adc433023
Binary files /dev/null and b/triton_models/weights/layers.15.attention_norm.weight differ
diff --git a/triton_models/weights/layers.15.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.15.feed_forward.w2.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..4158679a52a5088851d1251f97ca236851f5d924
--- /dev/null
+++ b/triton_models/weights/layers.15.feed_forward.w2.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1c9cd36454fdb977e3fdd95b6a56f68cabe3fdb631c79f4f99594f73be47d76
+size 1409024
diff --git a/triton_models/weights/layers.16.attention.w_qkv.0.qweight b/triton_models/weights/layers.16.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..6b3ebe59e060254ba305dd479f66cc5a052eef53
--- /dev/null
+++ b/triton_models/weights/layers.16.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8b4ed1cf80e4810ff8e4be00484a0f4d7f0373d42f3959510b60c9ac683c643
+size 25165824
diff --git a/triton_models/weights/layers.16.attention.wo.0.bias b/triton_models/weights/layers.16.attention.wo.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..091845ba1027ee54d412e2475c07583dd4d863ae
Binary files /dev/null and b/triton_models/weights/layers.16.attention.wo.0.bias differ
diff --git a/triton_models/weights/layers.16.attention_norm.weight b/triton_models/weights/layers.16.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..39da53b76ee51d48d687b2657f776929077aa47b
Binary files /dev/null and b/triton_models/weights/layers.16.attention_norm.weight differ
diff --git a/triton_models/weights/layers.16.feed_forward.w13.0.qweight b/triton_models/weights/layers.16.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..2a00b5834de5e8436e59c12e151f7c96f1b7dc84
--- /dev/null
+++ b/triton_models/weights/layers.16.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d0e85614928a2f345c90ab431667afcdd01d4a4570a4617e7adfbf01d203c06
+size 45088768
diff --git a/triton_models/weights/layers.16.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.16.feed_forward.w13.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..e0472664ee5e3bfb146b9724206fce54fd35a501
--- /dev/null
+++ b/triton_models/weights/layers.16.feed_forward.w13.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d04e2e9940a9f325296a9c90bd3bce4a56bac04ce2a243643ebc812b2a96e210
+size 2818048
diff --git a/triton_models/weights/layers.16.feed_forward.w2.0.qweight b/triton_models/weights/layers.16.feed_forward.w2.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..4dcd4ef2aadb596f19d3d7ec9fa9fe82dc073200
--- /dev/null
+++ b/triton_models/weights/layers.16.feed_forward.w2.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:712b28fb566175675b2cec7efec67e4c617381d91a84a8f963faf61379964d71
+size 22544384
diff --git a/triton_models/weights/layers.16.ffn_norm.weight b/triton_models/weights/layers.16.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..eb0cbc1cd772a0261133448703026f0aa079a18d
Binary files /dev/null and b/triton_models/weights/layers.16.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.18.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.18.attention.w_qkv.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..7dd336f69abf3df0be1010f7530dabb214f4fb40
--- /dev/null
+++ b/triton_models/weights/layers.18.attention.w_qkv.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f5168dd0cac87ae326362b77d6cc00e216a4f411c751b0f091193946c245fc8
+size 1572864
diff --git a/triton_models/weights/layers.18.attention.wo.0.qweight b/triton_models/weights/layers.18.attention.wo.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..7ea648919f8ee0a2798adbaca711d38a4f8f0e0d
--- /dev/null
+++ b/triton_models/weights/layers.18.attention.wo.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fec9bdf596e9eff99c8e99be120eeae671b9554948fefa2ef6e29fac848f937e
+size 8388608
diff --git a/triton_models/weights/layers.18.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.18.feed_forward.w2.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..7d41f2e5e935ee0f8aa712fce6615df7380a6379
--- /dev/null
+++ b/triton_models/weights/layers.18.feed_forward.w2.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8607f9ad9261ecf9fd2eb13c931adca29626a2f326d58efd333ed5a5989ba540
+size 1409024
diff --git a/triton_models/weights/layers.19.attention.w_qkv.0.qweight b/triton_models/weights/layers.19.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..dd743553892dab19536170ae8d0f16f92ad154cf
--- /dev/null
+++ b/triton_models/weights/layers.19.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2591f9207f9f6e680d497acbf5d65617d363157f115761abd90b70be61f5c2bb
+size 25165824
diff --git a/triton_models/weights/layers.19.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.19.attention.w_qkv.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..9f3f210d1d74911181643769e7af574cdbc622e0
--- /dev/null
+++ b/triton_models/weights/layers.19.attention.w_qkv.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8290d587bf0d552a9bcceb17883a57ed6dee1e19f26cc5a6aa78bee4e5f9caec
+size 1572864
diff --git a/triton_models/weights/layers.19.ffn_norm.weight b/triton_models/weights/layers.19.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..6481efcb2c7fe1a89db40251f8313a7c3c12c3d4
Binary files /dev/null and b/triton_models/weights/layers.19.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.2.attention.wo.0.bias b/triton_models/weights/layers.2.attention.wo.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..3f34950b7cf7a6d0393735dfcaec496e1238d2dc
Binary files /dev/null and b/triton_models/weights/layers.2.attention.wo.0.bias differ
diff --git a/triton_models/weights/layers.2.attention.wo.0.qweight b/triton_models/weights/layers.2.attention.wo.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..910e8b8d49187c270b612d6764be521e7208189f
--- /dev/null
+++ b/triton_models/weights/layers.2.attention.wo.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bfbc8db4acac27ca6385199e0faf903dba4644ac747b2f192611a9bc30089d08
+size 8388608
diff --git a/triton_models/weights/layers.2.attention_norm.weight b/triton_models/weights/layers.2.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..5f12ab967b6669766bc2114e5d63812b856af25c
Binary files /dev/null and b/triton_models/weights/layers.2.attention_norm.weight differ
diff --git a/triton_models/weights/layers.2.feed_forward.w13.0.qweight b/triton_models/weights/layers.2.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..9577ffa2e1a62a5447b07764915f6ad26de31006
--- /dev/null
+++ b/triton_models/weights/layers.2.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1c1b6595360347f2b63172f4a2c7ea9fafafd67070977230a2737df7186c992
+size 45088768
diff --git a/triton_models/weights/layers.20.attention.wo.0.bias b/triton_models/weights/layers.20.attention.wo.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..740b6ae104e9fea385bf5f69467c552c53f6eacd
Binary files /dev/null and b/triton_models/weights/layers.20.attention.wo.0.bias differ
diff --git a/triton_models/weights/layers.20.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.20.feed_forward.w13.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..96d4f659434fac69407abd6a64840cdc48696b7e
--- /dev/null
+++ b/triton_models/weights/layers.20.feed_forward.w13.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85549fd9cf2091919319fb219464020191f58e53bac0350cbf1f504b77b620fc
+size 2818048
diff --git a/triton_models/weights/layers.20.ffn_norm.weight b/triton_models/weights/layers.20.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..62b727bbe1e7a8f93eb4140589f96132a7b6bdc1
Binary files /dev/null and b/triton_models/weights/layers.20.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.21.feed_forward.w13.0.qweight b/triton_models/weights/layers.21.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..925c12f811ec093b575c9d28f09a11b3497a6ca6
--- /dev/null
+++ b/triton_models/weights/layers.21.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1214235b7d85a94a4525ffa148e3b18f1a78508b011f7d57a1709c72f0ebc1a
+size 45088768
diff --git a/triton_models/weights/layers.21.feed_forward.w2.0.qweight b/triton_models/weights/layers.21.feed_forward.w2.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..3b68ed73375b3a8ad006f1b00d274c89a1472dc9
--- /dev/null
+++ b/triton_models/weights/layers.21.feed_forward.w2.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e036f4b5a9d78b23fc9be94f2f07cc1c3c70b3de98a551227807ae98176272e8
+size 22544384
diff --git a/triton_models/weights/layers.21.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.21.feed_forward.w2.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..219a6b4b584b0c812b7b486f8d82eb12f8618105
--- /dev/null
+++ b/triton_models/weights/layers.21.feed_forward.w2.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7adc8834265fc43ad1df2d5bb252d64624581f5a1fb7b12c6709ef67ad863893
+size 1409024
diff --git a/triton_models/weights/layers.21.ffn_norm.weight b/triton_models/weights/layers.21.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..7c27353eea926412fe49f5ae727a6375af9fa600
Binary files /dev/null and b/triton_models/weights/layers.21.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.22.attention.w_qkv.0.qweight b/triton_models/weights/layers.22.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..aa3e9eedc378ae681c135aefaab825daf8a74a21
--- /dev/null
+++ b/triton_models/weights/layers.22.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7c60b4568db7524400c9255741eea43995526dd815161060fde82a16a84392d5
+size 25165824
diff --git a/triton_models/weights/layers.22.attention.wo.0.scales_zeros b/triton_models/weights/layers.22.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..88f5d7b03ba2018081aa64d810d9be81fdf44297
Binary files /dev/null and b/triton_models/weights/layers.22.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.22.attention_norm.weight b/triton_models/weights/layers.22.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..13b1a3d88f1622329ad90a51020541e33f425b3d
Binary files /dev/null and b/triton_models/weights/layers.22.attention_norm.weight differ
diff --git a/triton_models/weights/layers.23.attention.w_qkv.0.bias b/triton_models/weights/layers.23.attention.w_qkv.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..32c9d33adafe69725b5d44ef3432405af39b3645
Binary files /dev/null and b/triton_models/weights/layers.23.attention.w_qkv.0.bias differ
diff --git a/triton_models/weights/layers.23.feed_forward.w13.0.qweight b/triton_models/weights/layers.23.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..da676761fcb570aa7de797324ebbf8ab9067d905
--- /dev/null
+++ b/triton_models/weights/layers.23.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cde79bdeef3936bb4b057a3f2e501e9dfe7fd82608be8e11dee954852a02beb9
+size 45088768
diff --git a/triton_models/weights/layers.23.ffn_norm.weight b/triton_models/weights/layers.23.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..83925ceb5acc6d36f9200c2d13bce043f90bbfe5
Binary files /dev/null and b/triton_models/weights/layers.23.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.24.attention.w_qkv.0.qweight b/triton_models/weights/layers.24.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..d7a99aaa0e038987fdf57d1696da4e03fdb0204b
--- /dev/null
+++ b/triton_models/weights/layers.24.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:490da848dc3abf31e49495d4e33ee823a08b3d3d27580e2764fa0a9f1d8d22af
+size 25165824
diff --git a/triton_models/weights/layers.24.feed_forward.w2.0.qweight b/triton_models/weights/layers.24.feed_forward.w2.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..63c162ae9626da8c9753437615e2b0252316ec08
--- /dev/null
+++ b/triton_models/weights/layers.24.feed_forward.w2.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15330262e7bac9beb91de12582a6cd8c3221af88746a591ddf8b6c69ba1122d5
+size 22544384
diff --git a/triton_models/weights/layers.25.attention.w_qkv.0.qweight b/triton_models/weights/layers.25.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..9858cfc34e8e54186158964311fd114ca695cf9e
--- /dev/null
+++ b/triton_models/weights/layers.25.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9811b3361784a6cd5ad840664d6d598d47ab1cb8e89d482ca5178d4c91f3d150
+size 25165824
diff --git a/triton_models/weights/layers.25.attention.wo.0.bias b/triton_models/weights/layers.25.attention.wo.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..0757b18baa05babedc66d08bc3515e35da521342
Binary files /dev/null and b/triton_models/weights/layers.25.attention.wo.0.bias differ
diff --git a/triton_models/weights/layers.25.ffn_norm.weight b/triton_models/weights/layers.25.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..26edf27effcf23a626b5b8d841659b2c56e2e508
Binary files /dev/null and b/triton_models/weights/layers.25.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.26.attention.wo.0.qweight b/triton_models/weights/layers.26.attention.wo.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..e3c6ad1cdcf22c9605aa41cd0f89f9796394522e
--- /dev/null
+++ b/triton_models/weights/layers.26.attention.wo.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cda19c6f7e8b5b4ad9745d5314598a415da216a88d8b5d2f77c896da55f888fc
+size 8388608
diff --git a/triton_models/weights/layers.26.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.26.feed_forward.w13.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..21acfb31eb8fe4a964e0a777e2cdda34bfe1f06b
--- /dev/null
+++ b/triton_models/weights/layers.26.feed_forward.w13.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb9b9b0f527844b27072daee9c275e6a0c37a48059b848f75a20283d7d78f314
+size 2818048
diff --git a/triton_models/weights/layers.27.attention.w_qkv.0.bias b/triton_models/weights/layers.27.attention.w_qkv.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..81e330141e460b450c24cca3603ffe0c9dab3af6
Binary files /dev/null and b/triton_models/weights/layers.27.attention.w_qkv.0.bias differ
diff --git a/triton_models/weights/layers.27.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.27.feed_forward.w13.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..2e3a6d8f8d7d66f3ea178addcb9ecb894aeaff7a
--- /dev/null
+++ b/triton_models/weights/layers.27.feed_forward.w13.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:415d0b646c2a6c861de9a3b6ac78006e5733986e6d6777fdd629123aa5ea8f95
+size 2818048
diff --git a/triton_models/weights/layers.28.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.28.attention.w_qkv.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..a90c17b17a07af7f19ff2be713863e25011a8fb2
--- /dev/null
+++ b/triton_models/weights/layers.28.attention.w_qkv.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f253ca0e0f2fee067f487ebb91711e327cd2e2740e43de0970ac01f4d5095394
+size 1572864
diff --git a/triton_models/weights/layers.29.attention.w_qkv.0.qweight b/triton_models/weights/layers.29.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..54da7383430c8c4a674a0f4c68afe8d10bd63dec
--- /dev/null
+++ b/triton_models/weights/layers.29.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d4ef02a287706e03dddb233bac4c35fcdead173b1f0611cbc8409ceddc9dc301
+size 25165824
diff --git a/triton_models/weights/layers.29.attention.wo.0.qweight b/triton_models/weights/layers.29.attention.wo.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..9cbf4abc17de5a3d721d15e493c1feede2900d34
--- /dev/null
+++ b/triton_models/weights/layers.29.attention.wo.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd47112e1ca84e42a16eede81385033abcd1c4ae05efb0a4643ebdc78480c73d
+size 8388608
diff --git a/triton_models/weights/layers.29.attention_norm.weight b/triton_models/weights/layers.29.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..2c76397f909d0b335d8ff650b5e3865e35235660
Binary files /dev/null and b/triton_models/weights/layers.29.attention_norm.weight differ
diff --git a/triton_models/weights/layers.3.attention.wo.0.scales_zeros b/triton_models/weights/layers.3.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..2ff2de51ab0c7b90d88ee13290488ea800264cb6
Binary files /dev/null and b/triton_models/weights/layers.3.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.3.feed_forward.w13.0.qweight b/triton_models/weights/layers.3.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..bd4b72b4dc85a79575bdcfd17cc853aa77acd7b9
--- /dev/null
+++ b/triton_models/weights/layers.3.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e633e2daa75d8b186587df2292456c930735e844107073fd626da4c3ff2ec43
+size 45088768
diff --git a/triton_models/weights/layers.30.attention.wo.0.bias b/triton_models/weights/layers.30.attention.wo.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..24aa0b14d73a2babdf0f06110fc49e584a606d60
Binary files /dev/null and b/triton_models/weights/layers.30.attention.wo.0.bias differ
diff --git a/triton_models/weights/layers.30.attention_norm.weight b/triton_models/weights/layers.30.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..902d13bc4ebd66f52bfe31684c0585d30a8c5911
Binary files /dev/null and b/triton_models/weights/layers.30.attention_norm.weight differ
diff --git a/triton_models/weights/layers.30.feed_forward.w13.0.qweight b/triton_models/weights/layers.30.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..af3aa26d1c64e5af2a59d1f0c2a37bd5057947d4
--- /dev/null
+++ b/triton_models/weights/layers.30.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6dcf5f2e833680f2ed6432faa87fa1bbe2127c43e91941e8e1e817fb2be3ad61
+size 45088768
diff --git a/triton_models/weights/layers.30.ffn_norm.weight b/triton_models/weights/layers.30.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..8e80239434a541ad4901df66276898c4a2256759
Binary files /dev/null and b/triton_models/weights/layers.30.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.31.feed_forward.w2.0.qweight b/triton_models/weights/layers.31.feed_forward.w2.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..2267ad8c01c426aaf70e248c8e58cb80a34a9142
--- /dev/null
+++ b/triton_models/weights/layers.31.feed_forward.w2.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3468684a97568d6f26bda211f43df3f4a748e31787fb0de53fe79ebb89fb5cdc
+size 22544384
diff --git a/triton_models/weights/layers.31.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.31.feed_forward.w2.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..ff301923ace63a9fd3765cf6e7e6b99c7fea33cf
--- /dev/null
+++ b/triton_models/weights/layers.31.feed_forward.w2.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e546357675a12754b87115c56000e42098886ea75976bb73a53ca349324d50a
+size 1409024
diff --git a/triton_models/weights/layers.4.attention.w_qkv.0.bias b/triton_models/weights/layers.4.attention.w_qkv.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..09a04eb84458a25f1c0a33b18184a9a4908e5743
Binary files /dev/null and b/triton_models/weights/layers.4.attention.w_qkv.0.bias differ
diff --git a/triton_models/weights/layers.4.attention.wo.0.bias b/triton_models/weights/layers.4.attention.wo.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..9d63d131ffc21bf72a1f00cb9592af5356ed5047
Binary files /dev/null and b/triton_models/weights/layers.4.attention.wo.0.bias differ
diff --git a/triton_models/weights/layers.4.feed_forward.w13.0.qweight b/triton_models/weights/layers.4.feed_forward.w13.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..edc1be5c976741bf1bd46bce476cb76e3139c59c
--- /dev/null
+++ b/triton_models/weights/layers.4.feed_forward.w13.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dfd5b448389e1c265443f125aa678eefb721a25568efff5c6c36341c613812de
+size 45088768
diff --git a/triton_models/weights/layers.5.attention.w_qkv.0.qweight b/triton_models/weights/layers.5.attention.w_qkv.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..c3a686697fcf66e72d055c863648c7621a700b9d
--- /dev/null
+++ b/triton_models/weights/layers.5.attention.w_qkv.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b4db00f413dd4c2822b98e1f17b2cf1be05f35f413dd1ddb41d8c1979adbdac
+size 25165824
diff --git a/triton_models/weights/layers.5.attention_norm.weight b/triton_models/weights/layers.5.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..e4ffe81e33b61d0a98c07cb6aaf4d03df8a6b27b
Binary files /dev/null and b/triton_models/weights/layers.5.attention_norm.weight differ
diff --git a/triton_models/weights/layers.5.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.5.feed_forward.w2.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..209ac73948f99e71e70f405458d9e365b611e4cc
--- /dev/null
+++ b/triton_models/weights/layers.5.feed_forward.w2.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc86cb69c6e9a0340ab893ce73ca3e5e40b65c9fb94625b5355917c42c13a78e
+size 1409024
diff --git a/triton_models/weights/layers.5.ffn_norm.weight b/triton_models/weights/layers.5.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..97c418b828aaa19bd0357570edb93e5691fa7d16
Binary files /dev/null and b/triton_models/weights/layers.5.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.6.attention_norm.weight b/triton_models/weights/layers.6.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..e7b46e6a81dce743769c84b56c533bc8782f3a62
Binary files /dev/null and b/triton_models/weights/layers.6.attention_norm.weight differ
diff --git a/triton_models/weights/layers.7.attention.w_qkv.0.bias b/triton_models/weights/layers.7.attention.w_qkv.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..584b1a9812c80e4d05d48b881bad5621da3c1eb6
Binary files /dev/null and b/triton_models/weights/layers.7.attention.w_qkv.0.bias differ
diff --git a/triton_models/weights/layers.7.attention.wo.0.scales_zeros b/triton_models/weights/layers.7.attention.wo.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..1d52cefd3542c0650548e67ab4c57eadba23d4dd
Binary files /dev/null and b/triton_models/weights/layers.7.attention.wo.0.scales_zeros differ
diff --git a/triton_models/weights/layers.7.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.7.feed_forward.w2.0.scales_zeros
new file mode 100644
index 0000000000000000000000000000000000000000..eacc87078de9650ac5181da78b2c901e9bfa476b
--- /dev/null
+++ b/triton_models/weights/layers.7.feed_forward.w2.0.scales_zeros
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:62a63d3b87c2aeb903cfe0b8070488f0a33151c2dbf6f33ed6f49abbf0cfa26d
+size 1409024
diff --git a/triton_models/weights/layers.7.ffn_norm.weight b/triton_models/weights/layers.7.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..90d7f195319b30a6ecf8050856b9a5fb34999e03
Binary files /dev/null and b/triton_models/weights/layers.7.ffn_norm.weight differ
diff --git a/triton_models/weights/layers.8.attention.w_qkv.0.bias b/triton_models/weights/layers.8.attention.w_qkv.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..6dc25ad2e35f53582b87098a4f70622a0c4cc9c1
Binary files /dev/null and b/triton_models/weights/layers.8.attention.w_qkv.0.bias differ
diff --git a/triton_models/weights/layers.8.attention_norm.weight b/triton_models/weights/layers.8.attention_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..31a75bfa27213efbf8e55629d6cc7ceceb8b899c
Binary files /dev/null and b/triton_models/weights/layers.8.attention_norm.weight differ
diff --git a/triton_models/weights/layers.8.feed_forward.w2.0.qweight b/triton_models/weights/layers.8.feed_forward.w2.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..378dcdeae86de31fec0ffcfd7941afdf7a290fcb
--- /dev/null
+++ b/triton_models/weights/layers.8.feed_forward.w2.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4e002716de35236b9deeb72da381486b464f6466801d0631efed052acf497316
+size 22544384
diff --git a/triton_models/weights/layers.9.attention.wo.0.bias b/triton_models/weights/layers.9.attention.wo.0.bias
new file mode 100644
index 0000000000000000000000000000000000000000..8e0643f11cb00bee14772b7184810d754473942f
Binary files /dev/null and b/triton_models/weights/layers.9.attention.wo.0.bias differ
diff --git a/triton_models/weights/layers.9.feed_forward.w2.0.qweight b/triton_models/weights/layers.9.feed_forward.w2.0.qweight
new file mode 100644
index 0000000000000000000000000000000000000000..66755207a189fac5a255235ce41cb60ef41cc821
--- /dev/null
+++ b/triton_models/weights/layers.9.feed_forward.w2.0.qweight
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6c62c12104092dc18f20b8f3285cfaffeb181850ae99e206237bae11af9244f
+size 22544384
diff --git a/triton_models/weights/layers.9.ffn_norm.weight b/triton_models/weights/layers.9.ffn_norm.weight
new file mode 100644
index 0000000000000000000000000000000000000000..3f4febad62f2e0cd14afccabc1b50471f2d5b0e3
Binary files /dev/null and b/triton_models/weights/layers.9.ffn_norm.weight differ