模型#
- class tensorrt_llm.models.BaichuanForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
- config_class#
别名:
BaichuanConfig
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建一个 BaichuanForCausalLM 对象
- classmethod quantize(
- hf_model_dir: str,
- output_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- *,
- device: str = 'cuda',
- calib_dataset: str = 'cnn_dailymail',
- calib_batches: int = 512,
- calib_batch_size: int = 1,
- calib_max_seq_length: int = 512,
- random_seed: int = 1234,
- tokenizer_max_seq_length: int = 2048,
- **kwargs,
- class tensorrt_llm.models.BloomModel(
- config: PretrainedConfig,
基类:
Module
- class tensorrt_llm.models.CLIPVisionTransformer(
- image_size,
- num_channels,
- patch_size,
- hidden_size,
- num_attention_heads,
- max_position_embeddings,
- norm_epsilon,
- intermediate_size,
- hidden_act,
- num_hidden_layers,
- require_ln_f,
- mapping: Mapping,
- dtype,
基类:
Module
- class tensorrt_llm.models.ChatGLMConfig(
- *,
- chatglm_version: str = 'chatglm3',
- add_bias_linear: bool = False,
- add_qkv_bias: bool = True,
- apply_query_key_layer_scaling: bool = False,
- apply_residual_connection_post_layernorm: bool = False,
- rmsnorm: bool = True,
- rotary_pct: float = 0.5,
- rotary_base: float = 10000.0,
- rotary_scaling: dict | None = None,
- **kwargs,
基类:
PretrainedConfig
- classmethod from_hugging_face(
- hf_config_or_dir: str | transformers.PretrainedConfig,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- class tensorrt_llm.models.ChatGLMForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
- config_class#
alias of
ChatGLMConfig
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建一个 LLaMAForCausalLM 对象
- classmethod quantize(
- hf_model_dir: str,
- output_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- *,
- device: str = 'cuda',
- calib_dataset: str = 'cnn_dailymail',
- calib_batches: int = 512,
- calib_batch_size: int = 1,
- calib_max_seq_length: int = 512,
- random_seed: int = 1234,
- tokenizer_max_seq_length: int = 2048,
- **kwargs,
- class tensorrt_llm.models.ChatGLMModel(
- config: ChatGLMConfig,
基类:
Module
- forward(
- input_ids: Tensor = None,
- position_ids: Tensor = None,
- use_cache: bool = False,
- attention_mask: Tensor = None,
- kv_cache_params: KeyValueCacheParams = None,
- attention_params: AttentionParams = None,
- class tensorrt_llm.models.CogVLMConfig(
- *,
- mlp_bias: bool = False,
- attn_bias: bool = False,
- rotary_base: float = 10000.0,
- rotary_scaling: dict | None = None,
- **kwargs,
基类:
PretrainedConfig
- class tensorrt_llm.models.CogVLMForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
,TopModelMixin
- config_class#
别名:
CogVLMConfig
- default_plugin_config(**kwargs)[source]#
当 to_trt() 调用中未提供 plugin_config 值时,返回此模型的默认插件配置。如果用户需要设置不同的插件配置,他们可以从返回对象开始并进行更改。
- classmethod from_hugging_face(
- hf_model_dir,
- dtype='float16',
- mapping: Mapping | None = None,
- quant_mode: QuantMode | None = None,
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- classmethod quantize(
- hf_model_dir,
- output_dir,
- quant_config: QuantConfig,
- *,
- dtype='float16',
- mapping: Mapping | None = None,
- calib_batches=512,
- calib_batch_size=1,
- random_seed=1234,
- tokenizer_max_seq_length=2048,
- **kwargs,
- class tensorrt_llm.models.CohereForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
- config_class#
别名:
CohereConfig
- classmethod from_hugging_face(
- hf_model_or_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建一个 CohereForCausalLM 对象
- class tensorrt_llm.models.DbrxConfig(
- *,
- bias: bool = False,
- clip_qkv: float | None = None,
- rotary_base: float = 500000.0,
- rotary_scaling: dict | None = None,
- moe: MoeConfig | dict | None = None,
- **kwargs,
基类:
PretrainedConfig
- class tensorrt_llm.models.DbrxForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
- config_class#
别名:
DbrxConfig
- class tensorrt_llm.models.DecoderModel(*args, **kwargs)[source]#
-
- check_config(
- config: PretrainedConfig,
- forward(
- decoder_input_ids: Tensor,
- encoder_output: Tensor,
- position_ids=None,
- token_type_ids=None,
- use_cache=False,
- attention_mask_params=None,
- last_token_ids=None,
- kv_cache_params=None,
- attention_params=None,
- hidden_states=None,
- lora_params: LoraParams = None,
- cross_kv_cache_gen: Tensor | None = None,
- cross_kv_reuse: Tensor | None = None,
- language_adapter_routings: Tensor | None = None,
- prepare_inputs(
- max_batch_size,
- max_beam_width,
- max_decoder_input_len,
- max_seq_len,
- max_encoder_input_len,
- gather_context_logits: bool = False,
- lora_target_modules: List[str] = None,
- use_cache=True,
- *args,
- **kwargs,
@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。
@return: 包含可以输入到 self.forward() 中的值的列表
- class tensorrt_llm.models.DeepseekForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
- config_class#
DeepSeekV1Config
的别名
- classmethod from_hugging_face(
- model_dir,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- override_fields={},
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- class tensorrt_llm.models.DeepseekV2ForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
- config_class#
DeepSeekV2Config
的别名
- classmethod from_hugging_face(
- model_dir,
- dtype: str = 'auto',
- hf_model: PreTrainedModel | None = None,
- use_preloading: bool = False,
- use_safetensors_loading: bool = False,
- mapping: Mapping | None = None,
- override_fields={},
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- class tensorrt_llm.models.DiT(*args, **kwargs)[source]#
-
- check_config(
- config: PretrainedConfig,
- forward(latent, timestep, label)[source]#
DiT 的前向传递。 latent: (N, C, H, W) timestep: (N,) label: (N,)
- class tensorrt_llm.models.EagleForCausalLM(*args, **kwargs)[源代码]#
基类:
LLaMAForCausalLM
- config_class#
别名:
EagleConfig
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建一个 LLaMAForCausalLM 对象
- prepare_inputs(*args, **kwargs)[源代码]#
- 需要的输入
device_request_types: [bs] draft_tokens: [bs, max_draft_len] draft_lens: [bs] spec_decoding_generation_lengths: [bs] spec_decoding_position_offsets: [bs, max_gen_tokens] spec_decoding_packed_mask: [bs, max_draft_len, packed_length] ** eagle_temperature: [bs] rand_data_validation: [bs, max_draft_tokens]
- ** 该掩码比较棘手,因为布尔掩码需要
- 在运行时打包。因此,最后一维将是
packed_length = ceil((max_draft_tokens+1)/32)
- class tensorrt_llm.models.EncoderModel(*args, **kwargs)[源代码]#
-
- check_config(
- config: PretrainedConfig,
- forward(
- input_ids: Tensor,
- input_lengths=None,
- position_ids=None,
- token_type_ids=None,
- hidden_states=None,
- max_input_length=None,
- prompt_embedding_table=None,
- prompt_tasks=None,
- prompt_vocab_size=None,
- attention_mask=None,
- lora_params: LoraParams = None,
- language_adapter_routings: Tensor | None = None,
- class tensorrt_llm.models.FalconConfig(
- *,
- bias: bool = False,
- parallel_attention: bool = False,
- num_ln_in_parallel_attn: int | None = None,
- new_decoder_architecture: bool = False,
- rotary_base: float = 10000.0,
- **kwargs,
基类:
PretrainedConfig
- classmethod from_hugging_face(
- hf_config_or_dir: str | transformers.PretrainedConfig,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- class tensorrt_llm.models.FalconForCausalLM(*args, **kwargs)[源代码]#
基类:
DecoderModelForCausalLM
- config_class#
别名:
FalconConfig
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建一个 FalconForCausalLM 对象
- class tensorrt_llm.models.FalconModel(config: FalconConfig)[源代码]#
基类:
Module
- class tensorrt_llm.models.GPTConfig(
- *,
- gpt_variant: str = 'gpt2',
- bias: bool = True,
- q_scaling: float = 1.0,
- embedding_scale: float | None = None,
- apply_query_key_layer_scaling: bool = False,
- rotary_pct: float = 1.0,
- rotary_base: float = 10000.0,
- rotary_scaling: dict | None = None,
- inner_layernorm: bool = False,
- norm_before_bmm1: bool = False,
- moe: MoeConfig | dict | None = None,
- **kwargs,
基类:
PretrainedConfig
- classmethod from_hugging_face(
- hf_config_or_dir: str | transformers.PretrainedConfig,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- 类方法 from_nemo(
- nemo_ckpt_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- 类 tensorrt_llm.models.GPTForCausalLM(*args, **kwargs)[源代码]#
基类:
DecoderModelForCausalLM
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建一个 LLaMAForCausalLM 对象
- 类方法 from_nemo(
- nemo_ckpt_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- classmethod quantize(
- hf_model_dir: str,
- output_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- *,
- device: str = 'cuda',
- calib_dataset: str = 'cnn_dailymail',
- calib_batches: int = 512,
- calib_batch_size: int = 1,
- calib_max_seq_length: int = 512,
- random_seed: int = 1234,
- tokenizer_max_seq_length: int = 2048,
- **kwargs,
- 类 tensorrt_llm.models.GPTJConfig(*, rotary_dim: int = 64, **kwargs)[源代码]#
基类:
PretrainedConfig
这是用于存储GPTJ模型配置的配置类。
- classmethod from_hugging_face(
- hf_config_or_dir: str | transformers.PretrainedConfig,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- 类 tensorrt_llm.models.GPTJForCausalLM(*args, **kwargs)[源代码]#
基类:
DecoderModelForCausalLM
- config_class#
别名 为
GPTJConfig
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config=None,
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- 类 tensorrt_llm.models.GPTJModel(config: GPTJConfig)[源代码]#
基类:
Module
- 类 tensorrt_llm.models.GPTNeoXModel(
- config: PretrainedConfig,
基类:
Module
- 类 tensorrt_llm.models.GemmaConfig(
- *,
- architecture: str,
- rotary_base: float = 10000.0,
- rotary_scaling: dict | None = None,
- attn_bias: bool = False,
- mlp_bias: bool = False,
- position_embedding_type: PositionEmbeddingType = PositionEmbeddingType.rope_gpt_neox,
- query_pre_attn_scalar: int | None = None,
- final_logit_softcapping: float | None = None,
- attn_logit_softcapping: float | None = None,
- mapping: Mapping | dict | None = None,
- sliding_window_pattern: int = None,
- rope_local_base_freq: int = None,
- sliding_window: int = None,
- **kwargs,
基类:
PretrainedConfig
- GEMMA2_ADDED_FIELDS = {'attn_logit_softcapping', 'final_logit_softcapping', 'query_pre_attn_scalar'}#
- GEMMA3_ADDED_FIELDS = {'final_logit_softcapping', 'query_pre_attn_scalar', 'rope_local_base_freq', 'sliding_window', 'sliding_window_pattern'}#
- GEMMA_ADDED_FIELDS = {'attn_bias', 'inter_layernorms', 'mlp_bias', 'rotary_base', 'rotary_scaling'}#
- VERBATIM = {'attn_logit_softcapping', 'final_logit_softcapping', 'hidden_act', 'hidden_size', 'intermediate_size', 'max_position_embeddings', 'num_attention_heads', 'num_hidden_layers', 'query_pre_attn_scalar', 'rope_local_base_freq', 'sliding_window', 'sliding_window_pattern', 'use_parallel_embedding', 'vocab_size'}#
- classmethod from_hugging_face(
- hf_config_or_dir: HfConfigOrDir,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- property is_gemma_2: bool#
- property is_gemma_3: bool#
- class tensorrt_llm.models.GemmaForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
- NATIVE_QUANT_FLOW = {QuantAlgo.W4A16, QuantAlgo.W8A16, QuantAlgo.W8A8_SQ_PER_CHANNEL_PER_TENSOR_PLUGIN, QuantAlgo.W8A8_SQ_PER_CHANNEL_PER_TOKEN_PLUGIN, QuantAlgo.W8A8_SQ_PER_TENSOR_PER_TOKEN_PLUGIN, QuantAlgo.W8A8_SQ_PER_TENSOR_PLUGIN}#
- config_class#
别名为
GemmaConfig
- classmethod from_hugging_face(
- hf_model_dir: HfConfigOrDir,
- dtype='float16',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- load_model_on_cpu: bool = True,
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- classmethod quantize(
- hf_model_dir: str,
- output_dir: str,
- dtype: str = 'float16',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- *,
- gemma_config_kwargs: Dict[str, Any] = None,
- **quantize_kwargs: Dict[str, Any],
- class tensorrt_llm.models.LLaMAConfig(
- *,
- mlp_bias: bool = False,
- attn_bias: bool = False,
- rotary_base: float = 10000.0,
- rotary_scaling: dict | None = None,
- residual_mlp: bool = False,
- disable_weight_only_quant_plugin: bool = False,
- moe: MoeConfig | dict | None = None,
- remove_duplicated_kv_heads: bool = False,
- embedding_multiplier: float = 1.0,
- attention_multiplier: float = 1.0,
- residual_multiplier: float = 1.0,
- output_multiplier_scale: float = 1.0,
- **kwargs,
基类:
PretrainedConfig
- classmethod from_hugging_face(
- hf_config_or_dir: str | transformers.PretrainedConfig,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- classmethod from_meta_ckpt(
- meta_ckpt_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- class tensorrt_llm.models.LLaMAForCausalLM(*args, **kwargs)[source]#
基类:
DecoderModelForCausalLM
- config_class#
别名为
LLaMAConfig
- default_plugin_config(**kwargs)[source]#
当 to_trt() 调用中未提供 plugin_config 值时,返回此模型的默认插件配置。如果用户需要设置不同的插件配置,他们可以从返回对象开始并进行更改。
- classmethod from_hugging_face(
- hf_model_or_dir: str | PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建一个 LLaMAForCausalLM 对象
- classmethod from_meta_ckpt(
- meta_ckpt_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- classmethod quantize(
- hf_model_dir: str,
- output_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- *,
- device: str = 'cuda',
- calib_dataset: str = 'cnn_dailymail',
- calib_batches: int = 512,
- calib_batch_size: int = 1,
- calib_max_seq_length: int = 512,
- random_seed: int = 1234,
- tokenizer_max_seq_length: int = 2048,
- **kwargs,
- class tensorrt_llm.models.LLaMAModel(config: LLaMAConfig)[源代码]#
基类:
Module
- forward(
- input_ids,
- position_ids=None,
- use_cache=False,
- attention_mask=None,
- spec_decoding_params=None,
- kv_cache_params=None,
- attention_params=None,
- hidden_states=None,
- hidden_states_for_embed=None,
- prompt_embedding_table: Tensor | None = None,
- prompt_tasks: Tensor | None = None,
- prompt_vocab_size: Tensor | None = None,
- lora_params=None,
- class tensorrt_llm.models.LlavaNextVisionConfig(
- *,
- image_size: int,
- patch_size: int,
- text_hidden_size: int,
- projector_hidden_act: str = 'gelu',
- num_channels: int = 3,
- vision_model_type: str = 'clip_vision_model',
- **kwargs,
基类:
PretrainedConfig
- classmethod from_hugging_face(
- hf_config_or_dir: str | transformers.PretrainedConfig,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- class tensorrt_llm.models.LlavaNextVisionWrapper(*args, **kwargs)[源代码]#
-
- classmethod from_hugging_face(
- hf_model_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建 LlavaNextVisionWrapper 对象
- class tensorrt_llm.models.MLLaMAForCausalLM(*args, **kwargs)[源代码]#
-
- config_class#
MLLaMAConfig
的别名
- forward(
- decoder_input_ids: Tensor,
- encoder_output: Tensor,
- use_cache=False,
- attention_mask_params=None,
- last_token_ids=None,
- kv_cache_params=None,
- attention_params=None,
- hidden_states=None,
- lora_params: LoraParams = None,
- cross_kv_cache_gen: Tensor | None = None,
- cross_kv_reuse: Tensor | None = None,
- prompt_embedding_table: Tensor | None = None,
- prompt_tasks: Tensor | None = None,
- prompt_vocab_size: Tensor | None = None,
- skip_cross_attn_blocks: Tensor | None = None,
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
从给定参数创建 MLLaMAForCausalLM 对象
- prepare_inputs(
- max_batch_size,
- max_beam_width,
- max_decoder_input_len,
- max_seq_len,
- max_encoder_input_len,
- gather_context_logits: bool = False,
- gather_generation_logits: bool = False,
- lora_target_modules: List[str] = None,
- prompt_embedding_table_size: int = 0,
- use_cache=True,
- *args,
- **kwargs,
@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。
@return: 包含可以输入到 self.forward() 中的值的列表
- class tensorrt_llm.models.MPTModel(config: PretrainedConfig)[源代码]#
基类:
Module
- class tensorrt_llm.models.MambaForCausalLM(*args, **kwargs)[源代码]#
-
- config_class#
别名:
MambaConfig
- forward(
- input_ids,
- conv_states,
- ssm_states,
- host_request_types,
- last_token_ids,
- last_token_ids_for_logits,
- host_context_lengths,
- slot_mapping: Tensor | None = None,
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- prepare_inputs(
- max_batch_size,
- max_input_len,
- max_seq_len,
- max_num_tokens,
- use_cache,
- max_beam_width: int = 1,
- opt_num_tokens: int = None,
- opt_batch_size: int = 0,
- prompt_embedding_table_size: int = 0,
- max_draft_len: int = 0,
- gather_context_logits: bool = False,
- lora_target_modules: List[str] = None,
- speculative_decoding_draft_tokens_external: bool = False,
@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。
@return: 包含可以输入到 self.forward() 中的值的列表
- class tensorrt_llm.models.MedusaConfig(
- *,
- num_medusa_heads: int = 4,
- num_medusa_layers: int = 1,
- max_draft_len: int = 63,
- **kwargs,
基类:
PretrainedConfig
- classmethod from_hugging_face(
- hf_config_or_dir: str | transformers.PretrainedConfig,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
- class tensorrt_llm.models.MedusaForCausalLm(*args, **kwargs)[源代码]#
-
- config_class#
别名:
MedusaConfig
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- class tensorrt_llm.models.OPTModel(config: PretrainedConfig)[源代码]#
基类:
Module
- class tensorrt_llm.models.Phi3ForCausalLM(*args, **kwargs)[源代码]#
基类:
DecoderModelForCausalLM
- config_class#
别名:
Phi3Config
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- class tensorrt_llm.models.Phi3Model(
- config: PretrainedConfig,
基类:
Module
- class tensorrt_llm.models.PhiForCausalLM(*args, **kwargs)[源代码]#
基类:
DecoderModelForCausalLM
- config_class#
别名:
PhiConfig
- classmethod from_hugging_face(
- hf_model_or_dir: str | transformers.PreTrainedModel,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- **kwargs,
创建 LLM 对象并从 hugging face 加载权重 :param hf_model_dir: hugging face 模型目录 :param dtype: str, 从 hugging face 模型加载时默认权重数据类型 :param mapping: Mapping, 指定多 GPU 并行策略,当为 None 时,使用单个 GPU
- class tensorrt_llm.models.PhiModel(config: PretrainedConfig)[source]#
基类:
Module
- class tensorrt_llm.models.PretrainedConfig(
- *,
- architecture: str,
- dtype: str,
- hidden_size: int,
- num_hidden_layers: int,
- num_attention_heads: int,
- vocab_size: int | None = None,
- hidden_act: str = 'gelu',
- logits_dtype: str = 'float32',
- norm_epsilon: float = 1e-05,
- position_embedding_type: PositionEmbeddingType | str = PositionEmbeddingType.learned_absolute,
- max_position_embeddings: int | None = None,
- rotary_embedding_dim: int | None = None,
- num_key_value_heads: int | None = None,
- intermediate_size: int | None = None,
- mapping: Mapping | dict | None = None,
- quantization: QuantConfig | dict | None = None,
- use_parallel_embedding: bool = False,
- embedding_sharding_dim: int = 0,
- head_size: int | None = None,
- qk_layernorm: bool = False,
- runtime_defaults: RuntimeDefaultsIn = None,
- **kwargs,
Bases:
object
- static create_runtime_defaults(
- defaults: RuntimeDefaultsIn = None,
- property kv_dtype#
- property quant_algo#
- property quant_mode#
- class tensorrt_llm.models.PretrainedModel(*args, **kwargs)[source]#
基类:
Module
,GenerationMixin
,TopModelMixin
- classmethod from_checkpoint(
- ckpt_dir: str,
- rank: int | None = None,
- config: PretrainedConfig | None = None,
- *,
- preprocess_weights_hook: Callable[[Dict[str, Tensor]], Dict[str, Tensor]] | None = None,
- classmethod from_config(
- config: PretrainedConfig,
- prepare_inputs(
- max_batch_size,
- max_input_len,
- max_seq_len,
- max_num_tokens,
- use_cache,
- max_beam_width: int = 1,
- opt_num_tokens: int = None,
- prompt_embedding_table_size: int = 0,
- position_encoding_2d: bool = False,
- max_draft_len: int = 0,
- speculative_decoding_draft_tokens_external: bool = False,
- spec_decoding_is_generation_length_variable: bool = False,
- gather_context_logits: bool = False,
- lora_target_modules: List[str] = None,
- opt_batch_size: int = 0,
- num_hidden_layers: int = None,
- mrope_rotary_cos_sin_size: int = None,
@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。
@return: 包含可以输入到 self.forward() 中的值的列表
- classmethod quantize(
- hf_model_dir: str,
- output_dir: str,
- dtype: str = 'auto',
- mapping: Mapping | None = None,
- quant_config: QuantConfig | None = None,
- *,
- device: str = 'cuda',
- calib_dataset: str = 'cnn_dailymail',
- calib_batches: int = 512,
- calib_batch_size: int = 1,
- calib_max_seq_length: int = 512,
- random_seed: int = 1234,
- tokenizer_max_seq_length: int = 2048,
- **kwargs,
- class tensorrt_llm.models.ReDrafterForCausalLM(*args, **kwargs)[source]#
基类:
LLaMAForCausalLM
- prepare_inputs(*args, **kwargs)[source]#
- 需要的输入
假设,max_gen_tokens = 1 + nb*(bl - 1), 计数实际 token device_request_types: [bs] draft_tokens: [bs, nb, bl] draft_indices: [bs, nb, bl] draft_probs: [bs, nb, bl-1, V] spec_decoding_generation_lengths: [bs] spec_decoding_position_offsets: [bs, max_gen_tokens] spec_decoding_packed_mask: [bs, max_gen_tokens, packed_length] ** redrafter_inverted_temperature: [bs] rand_data_sample: [bs] rand_data_validation: [bs, nb, bl-1]
- ** 该掩码比较棘手,因为布尔掩码需要
- 在运行时打包。因此,最后一维将是
packed_length = ceil(max_gen_tokens/32)
- class tensorrt_llm.models.RecurrentGemmaForCausalLM(*args, **kwargs)[source]#
-
- forward(
- input_ids,
- position_ids=None,
- use_cache=False,
- attention_mask=None,
- kv_cache_params=None,
- attention_params=None,
- conv_states=None,
- rnn_states=None,
- host_request_types=None,
- last_token_ids=None,
- last_token_ids_for_logits=None,
- host_context_lengths=None,
- slot_mapping=None,
- prepare_inputs(
- max_batch_size,
- max_input_len,
- max_seq_len,
- max_num_tokens,
- use_cache,
- max_beam_width: int = 1,
- opt_num_tokens: int = None,
- opt_batch_size: int = 0,
- prompt_embedding_table_size: int = 0,
- max_draft_len: int = 0,
- gather_context_logits: bool = False,
- lora_target_modules: List[str] = None,
- speculative_decoding_draft_tokens_external: bool = False,
@brief: 为模型准备输入张量,给定的尺寸用于确定使用 TRT 动态形状时维度的范围。
@return: 包含可以输入到 self.forward() 中的值的列表
- tensorrt_llm.models.RobertaForQuestionAnswering#
- tensorrt_llm.models.RobertaForSequenceClassification#
- class tensorrt_llm.models.SD3Transformer2DModel(*args, **kwargs)[source]#
-
- property attn_processors#
- config_class#
SD3Transformer2DModelConfig
的别名
- forward(
- hidden_states: Tensor,
- encoder_hidden_states: Tensor | None = None,
- pooled_projections: Tensor | None = None,
- timestep: Tensor | None = None,
- block_controlnet_hidden_states: List[Tensor] = None,
- joint_attention_kwargs: Dict[str, Any] | None = None,
- classmethod from_pretrained(
- pretrained_model_name_or_path: str,
- dtype='float16',
- mapping=<tensorrt_llm.mapping.Mapping object>,
- **kwargs,
- class tensorrt_llm.models.SpeculativeDecodingMode(
- value,
- names=<not given>,
- *values,
- module=None,
- qualname=None,
- type=None,
- start=1,
- boundary=None,
基类:
IntFlag
- DRAFT_TOKENS_EXTERNAL = 2#
- EAGLE = 32#
- EXPLICIT_DRAFT_TOKENS = 16#
- LOOKAHEAD_DECODING = 8#
- MEDUSA = 4#
- NONE = 1#