模型#

class tensorrt_llm.models.BaichuanForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

config_class#

别名:BaichuanConfig

classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[source]#

从给定参数创建一个 BaichuanForCausalLM 对象

classmethod quantize(
hf_model_dir: str,
output_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
*,
device: str = 'cuda',
calib_dataset: str = 'cnn_dailymail',
calib_batches: int = 512,
calib_batch_size: int = 1,
calib_max_seq_length: int = 512,
random_seed: int = 1234,
tokenizer_max_seq_length: int = 2048,
**kwargs,
)[source]#
class tensorrt_llm.models.BertForQuestionAnswering(*args, **kwargs)[source]#

基类: BertBase

forward(
input_ids=None,
input_lengths=None,
token_type_ids=None,
position_ids=None,
hidden_states=None,
max_input_length=None,
)[source]#
class tensorrt_llm.models.BertForSequenceClassification(*args, **kwargs)[source]#

基类: BertBase

forward(
input_ids,
input_lengths,
token_type_ids=None,
position_ids=None,
hidden_states=None,
max_input_length=None,
)[source]#
class tensorrt_llm.models.BertModel(*args, **kwargs)[source]#

基类: BertBase

forward(
input_ids=None,
input_lengths=None,
position_ids=None,
token_type_ids=None,
hidden_states=None,
max_input_length=None,
)[source]#
class tensorrt_llm.models.BloomForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

class tensorrt_llm.models.BloomModel(
config: PretrainedConfig,
)[source]#

基类: Module

forward(
input_ids: Tensor,
position_ids=None,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
prompt_embedding_table=None,
prompt_tasks=None,
prompt_vocab_size=None,
attention_params=None,
)[source]#
class tensorrt_llm.models.CLIPVisionTransformer(
image_size,
num_channels,
patch_size,
hidden_size,
num_attention_heads,
max_position_embeddings,
norm_epsilon,
intermediate_size,
hidden_act,
num_hidden_layers,
require_ln_f,
mapping: Mapping,
dtype,
)[source]#

基类: Module

forward(pixel_values)[source]#
class tensorrt_llm.models.ChatGLMConfig(
*,
chatglm_version: str = 'chatglm3',
add_bias_linear: bool = False,
add_qkv_bias: bool = True,
apply_query_key_layer_scaling: bool = False,
apply_residual_connection_post_layernorm: bool = False,
rmsnorm: bool = True,
rotary_pct: float = 0.5,
rotary_base: float = 10000.0,
rotary_scaling: dict | None = None,
**kwargs,
)[source]#

基类: PretrainedConfig

classmethod from_hugging_face(
hf_config_or_dir: str | transformers.PretrainedConfig,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[source]#
to_dict()[source]#
class tensorrt_llm.models.ChatGLMForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

config_class#

alias of ChatGLMConfig

classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[source]#

从给定参数创建一个 LLaMAForCausalLM 对象

prepare_inputs(*args, **kwargs)[source]#

有关详细的参数列表,请参见 PretrainedModel.prepare_inputs

classmethod quantize(
hf_model_dir: str,
output_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
*,
device: str = 'cuda',
calib_dataset: str = 'cnn_dailymail',
calib_batches: int = 512,
calib_batch_size: int = 1,
calib_max_seq_length: int = 512,
random_seed: int = 1234,
tokenizer_max_seq_length: int = 2048,
**kwargs,
)[source]#
class tensorrt_llm.models.ChatGLMModel(
config: ChatGLMConfig,
)[source]#

基类: Module

forward(
input_ids: Tensor = None,
position_ids: Tensor = None,
use_cache: bool = False,
attention_mask: Tensor = None,
kv_cache_params: KeyValueCacheParams = None,
attention_params: AttentionParams = None,
)[source]#
class tensorrt_llm.models.CogVLMConfig(
*,
mlp_bias: bool = False,
attn_bias: bool = False,
rotary_base: float = 10000.0,
rotary_scaling: dict | None = None,
**kwargs,
)[source]#

基类: PretrainedConfig

to_dict()[source]#
class tensorrt_llm.models.CogVLMForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM, TopModelMixin

config_class#

别名:CogVLMConfig

default_plugin_config(**kwargs)[source]#

当 to_trt() 调用中未提供 plugin_config 值时,返回此模型的默认插件配置。如果用户需要设置不同的插件配置,他们可以从返回对象开始并进行更改。

classmethod from_hugging_face(
hf_model_dir,
dtype='float16',
mapping: Mapping | None = None,
quant_mode: QuantMode | None = None,
**kwargs,
)[source]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

classmethod quantize(
hf_model_dir,
output_dir,
quant_config: QuantConfig,
*,
dtype='float16',
mapping: Mapping | None = None,
calib_batches=512,
calib_batch_size=1,
random_seed=1234,
tokenizer_max_seq_length=2048,
**kwargs,
)[source]#
class tensorrt_llm.models.CohereForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

config_class#

别名:CohereConfig

classmethod from_hugging_face(
hf_model_or_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[source]#

从给定参数创建一个 CohereForCausalLM 对象

class tensorrt_llm.models.DbrxConfig(
*,
bias: bool = False,
clip_qkv: float | None = None,
rotary_base: float = 500000.0,
rotary_scaling: dict | None = None,
moe: MoeConfig | dict | None = None,
**kwargs,
)[source]#

基类: PretrainedConfig

to_dict()[source]#
class tensorrt_llm.models.DbrxForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

config_class#

别名:DbrxConfig

class tensorrt_llm.models.DecoderModel(*args, **kwargs)[source]#

基类:PretrainedModel

check_config(
config: PretrainedConfig,
)[source]#
forward(
decoder_input_ids: Tensor,
encoder_output: Tensor,
position_ids=None,
token_type_ids=None,
use_cache=False,
attention_mask_params=None,
last_token_ids=None,
kv_cache_params=None,
attention_params=None,
hidden_states=None,
lora_params: LoraParams = None,
cross_kv_cache_gen: Tensor | None = None,
cross_kv_reuse: Tensor | None = None,
language_adapter_routings: Tensor | None = None,
)[source]#
precompute_relative_attention_bias(build_config)[source]#
prepare_inputs(
max_batch_size,
max_beam_width,
max_decoder_input_len,
max_seq_len,
max_encoder_input_len,
gather_context_logits: bool = False,
lora_target_modules: List[str] = None,
use_cache=True,
*args,
**kwargs,
)[source]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

use_lora(
lora_config: LoraConfig,
)[source]#

从给定配置加载 lora 权重到模块 :param lora_config: lora 配置

class tensorrt_llm.models.DeepseekForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

config_class#

DeepSeekV1Config 的别名

classmethod from_hugging_face(
model_dir,
dtype: str = 'auto',
mapping: Mapping | None = None,
override_fields={},
**kwargs,
)[source]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

class tensorrt_llm.models.DeepseekV2ForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

config_class#

DeepSeekV2Config 的别名

classmethod from_hugging_face(
model_dir,
dtype: str = 'auto',
hf_model: PreTrainedModel | None = None,
use_preloading: bool = False,
use_safetensors_loading: bool = False,
mapping: Mapping | None = None,
override_fields={},
**kwargs,
)[source]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

class tensorrt_llm.models.DiT(*args, **kwargs)[source]#

基类:PretrainedModel

check_config(
config: PretrainedConfig,
)[source]#
forward(latent, timestep, label)[source]#

DiT 的前向传递。 latent: (N, C, H, W) timestep: (N,) label: (N,)

forward_with_cfg(x, t, y)[source]#

使用无分类器引导的前向传递。

forward_without_cfg(x, t, y)[source]#

不使用无分类器引导的前向传递。

prepare_inputs(max_batch_size, **kwargs)[source]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

unpatchify(x: Tensor)[源代码]#
class tensorrt_llm.models.EagleForCausalLM(*args, **kwargs)[源代码]#

基类: LLaMAForCausalLM

config_class#

别名: EagleConfig

forward(*args, **kwargs)[源代码]#
classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

从给定参数创建一个 LLaMAForCausalLM 对象

prepare_inputs(*args, **kwargs)[源代码]#
需要的输入

device_request_types: [bs] draft_tokens: [bs, max_draft_len] draft_lens: [bs] spec_decoding_generation_lengths: [bs] spec_decoding_position_offsets: [bs, max_gen_tokens] spec_decoding_packed_mask: [bs, max_draft_len, packed_length] ** eagle_temperature: [bs] rand_data_validation: [bs, max_draft_tokens]

** 该掩码比较棘手,因为布尔掩码需要
在运行时打包。因此,最后一维将是

packed_length = ceil((max_draft_tokens+1)/32)

class tensorrt_llm.models.EncoderModel(*args, **kwargs)[源代码]#

基类:PretrainedModel

check_config(
config: PretrainedConfig,
)[源代码]#
forward(
input_ids: Tensor,
input_lengths=None,
position_ids=None,
token_type_ids=None,
hidden_states=None,
max_input_length=None,
prompt_embedding_table=None,
prompt_tasks=None,
prompt_vocab_size=None,
attention_mask=None,
lora_params: LoraParams = None,
language_adapter_routings: Tensor | None = None,
)[源代码]#
precompute_relative_attention_bias(build_config)[源代码]#
prepare_inputs(
max_batch_size,
max_input_len,
prompt_embedding_table_size: int = 0,
lora_target_modules: List[str] = None,
*args,
**kwargs,
)[源代码]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

use_lora(
lora_config: LoraConfig,
)[源代码]#

从给定配置加载 lora 权重到模块 :param lora_config: lora 配置

use_prompt_tuning()[源代码]#

在构建 TRT 引擎时启用 p tuning,在 to_trt 之前调用此方法

class tensorrt_llm.models.FalconConfig(
*,
bias: bool = False,
parallel_attention: bool = False,
num_ln_in_parallel_attn: int | None = None,
new_decoder_architecture: bool = False,
rotary_base: float = 10000.0,
**kwargs,
)[源代码]#

基类: PretrainedConfig

classmethod from_hugging_face(
hf_config_or_dir: str | transformers.PretrainedConfig,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#
to_dict()[源代码]#
class tensorrt_llm.models.FalconForCausalLM(*args, **kwargs)[源代码]#

基类: DecoderModelForCausalLM

check_config(config)[源代码]#
config_class#

别名: FalconConfig

classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

从给定参数创建一个 FalconForCausalLM 对象

class tensorrt_llm.models.FalconModel(config: FalconConfig)[源代码]#

基类: Module

forward(
input_ids: Tensor,
position_ids=None,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
hidden_states=None,
)[源代码]#
class tensorrt_llm.models.GPTConfig(
*,
gpt_variant: str = 'gpt2',
bias: bool = True,
q_scaling: float = 1.0,
embedding_scale: float | None = None,
apply_query_key_layer_scaling: bool = False,
rotary_pct: float = 1.0,
rotary_base: float = 10000.0,
rotary_scaling: dict | None = None,
inner_layernorm: bool = False,
norm_before_bmm1: bool = False,
moe: MoeConfig | dict | None = None,
**kwargs,
)[源代码]#

基类: PretrainedConfig

classmethod from_hugging_face(
hf_config_or_dir: str | transformers.PretrainedConfig,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#
类方法 from_nemo(
nemo_ckpt_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#
to_dict()[源代码]#
tensorrt_llm.models.GPTForCausalLM(*args, **kwargs)[源代码]#

基类: DecoderModelForCausalLM

config_class#

别名 GPTConfig

classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

从给定参数创建一个 LLaMAForCausalLM 对象

类方法 from_nemo(
nemo_ckpt_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#
classmethod quantize(
hf_model_dir: str,
output_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
*,
device: str = 'cuda',
calib_dataset: str = 'cnn_dailymail',
calib_batches: int = 512,
calib_batch_size: int = 1,
calib_max_seq_length: int = 512,
random_seed: int = 1234,
tokenizer_max_seq_length: int = 2048,
**kwargs,
)[源代码]#
use_lora(
lora_config: LoraConfig,
)[源代码]#

从给定配置加载 lora 权重到模块 :param lora_config: lora 配置

tensorrt_llm.models.GPTJConfig(*, rotary_dim: int = 64, **kwargs)[源代码]#

基类: PretrainedConfig

这是用于存储GPTJ模型配置的配置类。

classmethod from_hugging_face(
hf_config_or_dir: str | transformers.PretrainedConfig,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#
to_dict()[源代码]#
tensorrt_llm.models.GPTJForCausalLM(*args, **kwargs)[源代码]#

基类: DecoderModelForCausalLM

config_class#

别名 GPTJConfig

classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config=None,
**kwargs,
)[源代码]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

tensorrt_llm.models.GPTJModel(config: GPTJConfig)[源代码]#

基类: Module

forward(
input_ids: Tensor,
position_ids=None,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
)[源代码]#
tensorrt_llm.models.GPTModel(config: GPTConfig)[源代码]#

基类: Module

forward(
input_ids,
position_ids,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
hidden_states=None,
prompt_embedding_table=None,
prompt_tasks=None,
prompt_vocab_size=None,
lora_params=None,
spec_decoding_params=None,
)[源代码]#
tensorrt_llm.models.GPTNeoXForCausalLM(*args, **kwargs)[源代码]#

基类: DecoderModelForCausalLM

tensorrt_llm.models.GPTNeoXModel(
config: PretrainedConfig,
)[源代码]#

基类: Module

forward(
input_ids: Tensor,
position_ids=None,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
)[源代码]#
tensorrt_llm.models.GemmaConfig(
*,
architecture: str,
rotary_base: float = 10000.0,
rotary_scaling: dict | None = None,
attn_bias: bool = False,
mlp_bias: bool = False,
position_embedding_type: PositionEmbeddingType = PositionEmbeddingType.rope_gpt_neox,
query_pre_attn_scalar: int | None = None,
final_logit_softcapping: float | None = None,
attn_logit_softcapping: float | None = None,
mapping: Mapping | dict | None = None,
sliding_window_pattern: int = None,
rope_local_base_freq: int = None,
sliding_window: int = None,
**kwargs,
)[源代码]#

基类: PretrainedConfig

GEMMA2_ADDED_FIELDS = {'attn_logit_softcapping', 'final_logit_softcapping', 'query_pre_attn_scalar'}#
GEMMA3_ADDED_FIELDS = {'final_logit_softcapping', 'query_pre_attn_scalar', 'rope_local_base_freq', 'sliding_window', 'sliding_window_pattern'}#
GEMMA_ADDED_FIELDS = {'attn_bias', 'inter_layernorms', 'mlp_bias', 'rotary_base', 'rotary_scaling'}#
VERBATIM = {'attn_logit_softcapping', 'final_logit_softcapping', 'hidden_act', 'hidden_size', 'intermediate_size', 'max_position_embeddings', 'num_attention_heads', 'num_hidden_layers', 'query_pre_attn_scalar', 'rope_local_base_freq', 'sliding_window', 'sliding_window_pattern', 'use_parallel_embedding', 'vocab_size'}#
classmethod from_hugging_face(
hf_config_or_dir: HfConfigOrDir,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
) GemmaConfig[source]#
gemma2_config()[source]#
gemma3_config()[source]#
static get_hf_config(config_dir: str | PathLike)[source]#
property is_gemma_2: bool#
property is_gemma_3: bool#
to_dict()[source]#

序列化在 GemmaConfig 中添加的字段

class tensorrt_llm.models.GemmaForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

NATIVE_QUANT_FLOW = {QuantAlgo.W4A16, QuantAlgo.W8A16, QuantAlgo.W8A8_SQ_PER_CHANNEL_PER_TENSOR_PLUGIN, QuantAlgo.W8A8_SQ_PER_CHANNEL_PER_TOKEN_PLUGIN, QuantAlgo.W8A8_SQ_PER_TENSOR_PER_TOKEN_PLUGIN, QuantAlgo.W8A8_SQ_PER_TENSOR_PLUGIN}#
classmethod assert_valid_quant_algo(
quant_algo: QuantAlgo | None,
)[source]#
config_class#

别名为 GemmaConfig

classmethod from_hugging_face(
hf_model_dir: HfConfigOrDir,
dtype='float16',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
load_model_on_cpu: bool = True,
**kwargs,
)[source]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

classmethod quantize(
hf_model_dir: str,
output_dir: str,
dtype: str = 'float16',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
*,
gemma_config_kwargs: Dict[str, Any] = None,
**quantize_kwargs: Dict[str, Any],
)[source]#
use_lora(
lora_config: LoraConfig,
) None[source]#

从给定配置加载 lora 权重到模块 :param lora_config: lora 配置

class tensorrt_llm.models.LLaMAConfig(
*,
mlp_bias: bool = False,
attn_bias: bool = False,
rotary_base: float = 10000.0,
rotary_scaling: dict | None = None,
residual_mlp: bool = False,
disable_weight_only_quant_plugin: bool = False,
moe: MoeConfig | dict | None = None,
remove_duplicated_kv_heads: bool = False,
embedding_multiplier: float = 1.0,
attention_multiplier: float = 1.0,
residual_multiplier: float = 1.0,
output_multiplier_scale: float = 1.0,
**kwargs,
)[source]#

基类: PretrainedConfig

classmethod from_hugging_face(
hf_config_or_dir: str | transformers.PretrainedConfig,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[source]#
classmethod from_meta_ckpt(
meta_ckpt_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[source]#
to_dict()[source]#
class tensorrt_llm.models.LLaMAForCausalLM(*args, **kwargs)[source]#

基类: DecoderModelForCausalLM

config_class#

别名为 LLaMAConfig

default_plugin_config(**kwargs)[source]#

当 to_trt() 调用中未提供 plugin_config 值时,返回此模型的默认插件配置。如果用户需要设置不同的插件配置,他们可以从返回对象开始并进行更改。

classmethod from_hugging_face(
hf_model_or_dir: str | PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[source]#

从给定参数创建一个 LLaMAForCausalLM 对象

classmethod from_meta_ckpt(
meta_ckpt_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[source]#
classmethod quantize(
hf_model_dir: str,
output_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
*,
device: str = 'cuda',
calib_dataset: str = 'cnn_dailymail',
calib_batches: int = 512,
calib_batch_size: int = 1,
calib_max_seq_length: int = 512,
random_seed: int = 1234,
tokenizer_max_seq_length: int = 2048,
**kwargs,
)[源代码]#
use_lora(
lora_config: LoraConfig,
)[源代码]#

从给定配置加载 lora 权重到模块 :param lora_config: lora 配置

class tensorrt_llm.models.LLaMAModel(config: LLaMAConfig)[源代码]#

基类: Module

forward(
input_ids,
position_ids=None,
use_cache=False,
attention_mask=None,
spec_decoding_params=None,
kv_cache_params=None,
attention_params=None,
hidden_states=None,
hidden_states_for_embed=None,
prompt_embedding_table: Tensor | None = None,
prompt_tasks: Tensor | None = None,
prompt_vocab_size: Tensor | None = None,
lora_params=None,
)[源代码]#
class tensorrt_llm.models.LlavaNextVisionConfig(
*,
image_size: int,
patch_size: int,
text_hidden_size: int,
projector_hidden_act: str = 'gelu',
num_channels: int = 3,
vision_model_type: str = 'clip_vision_model',
**kwargs,
)[源代码]#

基类: PretrainedConfig

classmethod from_hugging_face(
hf_config_or_dir: str | transformers.PretrainedConfig,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#
class tensorrt_llm.models.LlavaNextVisionWrapper(*args, **kwargs)[源代码]#

基类:PretrainedModel

forward(pixel_values, position_ids=None)[源代码]#
classmethod from_hugging_face(
hf_model_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

从给定参数创建 LlavaNextVisionWrapper 对象

prepare_inputs(max_batch_size, **kwargs)[源代码]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

save_checkpoint(output_dir, save_config=True)[源代码]#
class tensorrt_llm.models.MLLaMAForCausalLM(*args, **kwargs)[源代码]#

基类:PretrainedModel

config_class#

MLLaMAConfig的别名

forward(
decoder_input_ids: Tensor,
encoder_output: Tensor,
use_cache=False,
attention_mask_params=None,
last_token_ids=None,
kv_cache_params=None,
attention_params=None,
hidden_states=None,
lora_params: LoraParams = None,
cross_kv_cache_gen: Tensor | None = None,
cross_kv_reuse: Tensor | None = None,
prompt_embedding_table: Tensor | None = None,
prompt_tasks: Tensor | None = None,
prompt_vocab_size: Tensor | None = None,
skip_cross_attn_blocks: Tensor | None = None,
)[源代码]#
classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

从给定参数创建 MLLaMAForCausalLM 对象

prepare_inputs(
max_batch_size,
max_beam_width,
max_decoder_input_len,
max_seq_len,
max_encoder_input_len,
gather_context_logits: bool = False,
gather_generation_logits: bool = False,
lora_target_modules: List[str] = None,
prompt_embedding_table_size: int = 0,
use_cache=True,
*args,
**kwargs,
)[源代码]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

use_lora(
lora_config: LoraConfig,
)[源代码]#

从给定配置加载 lora 权重到模块 :param lora_config: lora 配置

class tensorrt_llm.models.MPTForCausalLM(*args, **kwargs)[源代码]#

基类: DecoderModelForCausalLM

check_config(config)[源代码]#
class tensorrt_llm.models.MPTModel(config: PretrainedConfig)[源代码]#

基类: Module

forward(
input_ids,
position_ids,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
)[源代码]#
class tensorrt_llm.models.MambaForCausalLM(*args, **kwargs)[源代码]#

基类:PretrainedModel

config_class#

别名:MambaConfig

forward(
input_ids,
conv_states,
ssm_states,
host_request_types,
last_token_ids,
last_token_ids_for_logits,
host_context_lengths,
slot_mapping: Tensor | None = None,
)[源代码]#
classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

prepare_inputs(
max_batch_size,
max_input_len,
max_seq_len,
max_num_tokens,
use_cache,
max_beam_width: int = 1,
opt_num_tokens: int = None,
opt_batch_size: int = 0,
prompt_embedding_table_size: int = 0,
max_draft_len: int = 0,
gather_context_logits: bool = False,
lora_target_modules: List[str] = None,
speculative_decoding_draft_tokens_external: bool = False,
)[源代码]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

class tensorrt_llm.models.MedusaConfig(
*,
num_medusa_heads: int = 4,
num_medusa_layers: int = 1,
max_draft_len: int = 63,
**kwargs,
)[源代码]#

基类: PretrainedConfig

classmethod from_hugging_face(
hf_config_or_dir: str | transformers.PretrainedConfig,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#
to_dict()[源代码]#
class tensorrt_llm.models.MedusaForCausalLm(*args, **kwargs)[源代码]#

基类:PretrainedModel

config_class#

别名:MedusaConfig

classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

class tensorrt_llm.models.OPTForCausalLM(*args, **kwargs)[源代码]#

基类: DecoderModelForCausalLM

check_config(config)[源代码]#
class tensorrt_llm.models.OPTModel(config: PretrainedConfig)[源代码]#

基类: Module

forward(
input_ids: Tensor,
position_ids=None,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
prompt_embedding_table=None,
prompt_tasks=None,
prompt_vocab_size=None,
**kwargs,
)[源代码]#
class tensorrt_llm.models.Phi3ForCausalLM(*args, **kwargs)[源代码]#

基类: DecoderModelForCausalLM

config_class#

别名:Phi3Config

classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

use_lora(
lora_config: LoraConfig,
)[源代码]#

从给定配置加载 lora 权重到模块 :param lora_config: lora 配置

class tensorrt_llm.models.Phi3Model(
config: PretrainedConfig,
)[源代码]#

基类: Module

forward(
input_ids: Tensor,
position_ids=None,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
prompt_embedding_table=None,
prompt_tasks=None,
prompt_vocab_size=None,
lora_params=None,
)[源代码]#
class tensorrt_llm.models.PhiForCausalLM(*args, **kwargs)[源代码]#

基类: DecoderModelForCausalLM

check_config(config)[源代码]#
config_class#

别名:PhiConfig

classmethod from_hugging_face(
hf_model_or_dir: str | transformers.PreTrainedModel,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
**kwargs,
)[源代码]#

创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU

use_lora(
lora_config: LoraConfig,
)[源代码]#

从给定配置加载 lora 权重到模块 :param lora_config: lora 配置

class tensorrt_llm.models.PhiModel(config: PretrainedConfig)[source]#

基类: Module

forward(
input_ids: Tensor,
position_ids=None,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
prompt_embedding_table=None,
prompt_tasks=None,
prompt_vocab_size=None,
lora_params=None,
)[source]#
class tensorrt_llm.models.PretrainedConfig(
*,
architecture: str,
dtype: str,
hidden_size: int,
num_hidden_layers: int,
num_attention_heads: int,
vocab_size: int | None = None,
hidden_act: str = 'gelu',
logits_dtype: str = 'float32',
norm_epsilon: float = 1e-05,
position_embedding_type: PositionEmbeddingType | str = PositionEmbeddingType.learned_absolute,
max_position_embeddings: int | None = None,
rotary_embedding_dim: int | None = None,
num_key_value_heads: int | None = None,
intermediate_size: int | None = None,
mapping: Mapping | dict | None = None,
quantization: QuantConfig | dict | None = None,
use_parallel_embedding: bool = False,
embedding_sharding_dim: int = 0,
head_size: int | None = None,
qk_layernorm: bool = False,
runtime_defaults: RuntimeDefaultsIn = None,
**kwargs,
)[source]#

Bases: object

static create_runtime_defaults(
defaults: RuntimeDefaultsIn = None,
) RuntimeDefaults | None[source]#
for_each_rank() Generator[Self, None, None][source]#
classmethod from_checkpoint(ckpt_dir: str)[source]#
classmethod from_dict(config: dict)[source]#
classmethod from_json_file(config_file: str)[source]#
get_config_group(group_cls: Type[CG]) CG[source]#
has_config_group(group_cls: Type[CG]) bool[source]#
property kv_dtype#
property quant_algo#
property quant_mode#
set_if_not_exist(key, value)[source]#
set_rank(rank: int)[source]#
to_dict()[source]#
to_json_file(config_file: str)[source]#
to_layer_quant_config(config_file: str)[source]#
class tensorrt_llm.models.PretrainedModel(*args, **kwargs)[source]#

基类: Module, GenerationMixin, TopModelMixin

check_config(config)[source]#
classmethod from_checkpoint(
ckpt_dir: str,
rank: int | None = None,
config: PretrainedConfig | None = None,
*,
preprocess_weights_hook: Callable[[Dict[str, Tensor]], Dict[str, Tensor]] | None = None,
)[source]#
classmethod from_config(
config: PretrainedConfig,
)[source]#
load(weights, from_pruned=False)[source]#
prepare_inputs(
max_batch_size,
max_input_len,
max_seq_len,
max_num_tokens,
use_cache,
max_beam_width: int = 1,
opt_num_tokens: int = None,
prompt_embedding_table_size: int = 0,
position_encoding_2d: bool = False,
max_draft_len: int = 0,
speculative_decoding_draft_tokens_external: bool = False,
spec_decoding_is_generation_length_variable: bool = False,
gather_context_logits: bool = False,
lora_target_modules: List[str] = None,
opt_batch_size: int = 0,
num_hidden_layers: int = None,
mrope_rotary_cos_sin_size: int = None,
)[source]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

classmethod quantize(
hf_model_dir: str,
output_dir: str,
dtype: str = 'auto',
mapping: Mapping | None = None,
quant_config: QuantConfig | None = None,
*,
device: str = 'cuda',
calib_dataset: str = 'cnn_dailymail',
calib_batches: int = 512,
calib_batch_size: int = 1,
calib_max_seq_length: int = 512,
random_seed: int = 1234,
tokenizer_max_seq_length: int = 2048,
**kwargs,
)[source]#
release()[source]#
save_checkpoint(output_dir, save_config=True)[source]#
class tensorrt_llm.models.ReDrafterForCausalLM(*args, **kwargs)[source]#

基类: LLaMAForCausalLM

forward(*args, **kwargs)[source]#
  1. 运行基础模型,获取 logits, hidden_states

prepare_inputs(*args, **kwargs)[source]#
需要的输入

假设,max_gen_tokens = 1 + nb*(bl - 1), 计数实际 token device_request_types: [bs] draft_tokens: [bs, nb, bl] draft_indices: [bs, nb, bl] draft_probs: [bs, nb, bl-1, V] spec_decoding_generation_lengths: [bs] spec_decoding_position_offsets: [bs, max_gen_tokens] spec_decoding_packed_mask: [bs, max_gen_tokens, packed_length] ** redrafter_inverted_temperature: [bs] rand_data_sample: [bs] rand_data_validation: [bs, nb, bl-1]

** 该掩码比较棘手,因为布尔掩码需要
在运行时打包。因此,最后一维将是

packed_length = ceil(max_gen_tokens/32)

class tensorrt_llm.models.RecurrentGemmaForCausalLM(*args, **kwargs)[source]#

基类:PretrainedModel

forward(
input_ids,
position_ids=None,
use_cache=False,
attention_mask=None,
kv_cache_params=None,
attention_params=None,
conv_states=None,
rnn_states=None,
host_request_types=None,
last_token_ids=None,
last_token_ids_for_logits=None,
host_context_lengths=None,
slot_mapping=None,
)[source]#
prepare_inputs(
max_batch_size,
max_input_len,
max_seq_len,
max_num_tokens,
use_cache,
max_beam_width: int = 1,
opt_num_tokens: int = None,
opt_batch_size: int = 0,
prompt_embedding_table_size: int = 0,
max_draft_len: int = 0,
gather_context_logits: bool = False,
lora_target_modules: List[str] = None,
speculative_decoding_draft_tokens_external: bool = False,
)[source]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

prepare_recurrent_inputs(
max_batch_size,
num_profiles,
mapping,
)[source]#
tensorrt_llm.models.RobertaForQuestionAnswering#

别名: BertForQuestionAnswering

tensorrt_llm.models.RobertaForSequenceClassification#

BertForSequenceClassification 的别名

tensorrt_llm.models.RobertaModel#

BertModel 的别名

class tensorrt_llm.models.SD3Transformer2DModel(*args, **kwargs)[source]#

基类:PretrainedModel

property attn_processors#
config_class#

SD3Transformer2DModelConfig 的别名

disable_forward_chunking()[source]#
enable_forward_chunking(
chunk_size: int | None = None,
dim: int = 0,
)[source]#
forward(
hidden_states: Tensor,
encoder_hidden_states: Tensor | None = None,
pooled_projections: Tensor | None = None,
timestep: Tensor | None = None,
block_controlnet_hidden_states: List[Tensor] = None,
joint_attention_kwargs: Dict[str, Any] | None = None,
)[source]#
classmethod from_pretrained(
pretrained_model_name_or_path: str,
dtype='float16',
mapping=<tensorrt_llm.mapping.Mapping object>,
**kwargs,
)[source]#
fuse_qkv_projections()[source]#
load(weights, from_pruned=False)[source]#
prepare_inputs(max_batch_size, **kwargs)[source]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表

set_attn_processor(processor)[source]#
unfuse_qkv_projections()[source]#
class tensorrt_llm.models.SpeculativeDecodingMode(
value,
names=<not given>,
*values,
module=None,
qualname=None,
type=None,
start=1,
boundary=None,
)[source]#

基类: IntFlag

DRAFT_TOKENS_EXTERNAL = 2#
EAGLE = 32#
EXPLICIT_DRAFT_TOKENS = 16#
LOOKAHEAD_DECODING = 8#
MEDUSA = 4#
NONE = 1#
static from_arguments(args: Namespace)[source]#
class tensorrt_llm.models.WhisperEncoder(*args, **kwargs)[source]#

基类:PretrainedModel

forward(
input_features: Tensor,
input_lengths=None,
position_ids=None,
)[source]#
precompute_relative_attention_bias(build_config)[source]#
prepare_inputs(max_batch_size=16)[源代码]#

@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。

@return: 包含可以输入到 self.forward() 中的值的列表