from __future__ import annotations
import torch
class GraphModule(torch.nn.Module):
    def forward(self, s72: "Sym(s72)", L_input_ids_: "i32[s72]", L_self_modules_embed_tokens_parameters_weight_: "bf16[68480, 1024]", L_self_modules_layers_modules_0_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_0_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_0_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_0_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_1_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_1_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_1_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_1_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_2_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_2_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", L_self_modules_layers_modules_2_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_2_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_: "bf16[128000, 64]", s80: "Sym(s72)", L_positions_: "i64[s72]", L_self_modules_layers_modules_2_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", L_self_modules_layers_modules_2_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_2_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_2_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_3_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_3_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_3_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_3_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_4_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_4_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_4_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_4_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_5_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_5_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", L_self_modules_layers_modules_5_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_5_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_5_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", L_self_modules_layers_modules_5_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_5_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_5_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_6_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_6_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_6_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_6_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_7_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_7_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_7_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_7_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_8_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_8_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", L_self_modules_layers_modules_8_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_8_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_8_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", L_self_modules_layers_modules_8_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_8_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_8_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_9_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_9_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_9_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_9_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_10_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_10_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", L_self_modules_layers_modules_10_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_10_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_10_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", L_self_modules_layers_modules_10_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_10_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_10_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_11_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_11_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_11_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_11_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_12_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_12_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", L_self_modules_layers_modules_12_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_12_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_12_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", L_self_modules_layers_modules_12_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_12_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_12_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_13_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_13_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_13_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_13_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_14_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_14_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", L_self_modules_layers_modules_14_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_14_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", L_self_modules_layers_modules_14_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", L_self_modules_layers_modules_14_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_14_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_14_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_layers_modules_15_modules_operator_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_15_modules_ffn_norm_parameters_weight_: "bf16[1024]", L_self_modules_layers_modules_15_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", L_self_modules_layers_modules_15_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", L_self_modules_embedding_norm_parameters_weight_: "bf16[1024]"):
        l_input_ids_ = L_input_ids_
        l_self_modules_embed_tokens_parameters_weight_ = L_self_modules_embed_tokens_parameters_weight_
        l_self_modules_layers_modules_0_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_0_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_0_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_0_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_0_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_0_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_0_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_0_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_1_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_1_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_1_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_1_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_1_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_1_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_1_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_1_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_2_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_2_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_2_modules_self_attn_modules_qkv_proj_parameters_weight_ = L_self_modules_layers_modules_2_modules_self_attn_modules_qkv_proj_parameters_weight_
        l_self_modules_layers_modules_2_modules_self_attn_modules_q_layernorm_parameters_weight_ = L_self_modules_layers_modules_2_modules_self_attn_modules_q_layernorm_parameters_weight_
        l_self_modules_layers_modules_2_modules_self_attn_modules_k_layernorm_parameters_weight_ = L_self_modules_layers_modules_2_modules_self_attn_modules_k_layernorm_parameters_weight_
        l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_ = L_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_
        l_positions_ = L_positions_
        l_self_modules_layers_modules_2_modules_self_attn_modules_out_proj_parameters_weight_ = L_self_modules_layers_modules_2_modules_self_attn_modules_out_proj_parameters_weight_
        l_self_modules_layers_modules_2_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_2_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_2_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_2_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_2_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_2_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_3_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_3_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_3_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_3_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_3_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_3_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_3_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_3_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_4_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_4_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_4_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_4_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_4_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_4_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_4_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_4_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_5_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_5_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_5_modules_self_attn_modules_qkv_proj_parameters_weight_ = L_self_modules_layers_modules_5_modules_self_attn_modules_qkv_proj_parameters_weight_
        l_self_modules_layers_modules_5_modules_self_attn_modules_q_layernorm_parameters_weight_ = L_self_modules_layers_modules_5_modules_self_attn_modules_q_layernorm_parameters_weight_
        l_self_modules_layers_modules_5_modules_self_attn_modules_k_layernorm_parameters_weight_ = L_self_modules_layers_modules_5_modules_self_attn_modules_k_layernorm_parameters_weight_
        l_self_modules_layers_modules_5_modules_self_attn_modules_out_proj_parameters_weight_ = L_self_modules_layers_modules_5_modules_self_attn_modules_out_proj_parameters_weight_
        l_self_modules_layers_modules_5_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_5_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_5_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_5_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_5_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_5_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_6_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_6_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_6_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_6_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_6_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_6_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_6_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_6_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_7_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_7_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_7_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_7_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_7_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_7_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_7_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_7_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_8_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_8_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_8_modules_self_attn_modules_qkv_proj_parameters_weight_ = L_self_modules_layers_modules_8_modules_self_attn_modules_qkv_proj_parameters_weight_
        l_self_modules_layers_modules_8_modules_self_attn_modules_q_layernorm_parameters_weight_ = L_self_modules_layers_modules_8_modules_self_attn_modules_q_layernorm_parameters_weight_
        l_self_modules_layers_modules_8_modules_self_attn_modules_k_layernorm_parameters_weight_ = L_self_modules_layers_modules_8_modules_self_attn_modules_k_layernorm_parameters_weight_
        l_self_modules_layers_modules_8_modules_self_attn_modules_out_proj_parameters_weight_ = L_self_modules_layers_modules_8_modules_self_attn_modules_out_proj_parameters_weight_
        l_self_modules_layers_modules_8_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_8_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_8_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_8_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_8_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_8_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_9_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_9_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_9_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_9_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_9_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_9_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_9_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_9_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_10_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_10_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_10_modules_self_attn_modules_qkv_proj_parameters_weight_ = L_self_modules_layers_modules_10_modules_self_attn_modules_qkv_proj_parameters_weight_
        l_self_modules_layers_modules_10_modules_self_attn_modules_q_layernorm_parameters_weight_ = L_self_modules_layers_modules_10_modules_self_attn_modules_q_layernorm_parameters_weight_
        l_self_modules_layers_modules_10_modules_self_attn_modules_k_layernorm_parameters_weight_ = L_self_modules_layers_modules_10_modules_self_attn_modules_k_layernorm_parameters_weight_
        l_self_modules_layers_modules_10_modules_self_attn_modules_out_proj_parameters_weight_ = L_self_modules_layers_modules_10_modules_self_attn_modules_out_proj_parameters_weight_
        l_self_modules_layers_modules_10_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_10_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_10_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_10_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_10_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_10_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_11_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_11_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_11_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_11_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_11_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_11_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_11_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_11_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_12_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_12_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_12_modules_self_attn_modules_qkv_proj_parameters_weight_ = L_self_modules_layers_modules_12_modules_self_attn_modules_qkv_proj_parameters_weight_
        l_self_modules_layers_modules_12_modules_self_attn_modules_q_layernorm_parameters_weight_ = L_self_modules_layers_modules_12_modules_self_attn_modules_q_layernorm_parameters_weight_
        l_self_modules_layers_modules_12_modules_self_attn_modules_k_layernorm_parameters_weight_ = L_self_modules_layers_modules_12_modules_self_attn_modules_k_layernorm_parameters_weight_
        l_self_modules_layers_modules_12_modules_self_attn_modules_out_proj_parameters_weight_ = L_self_modules_layers_modules_12_modules_self_attn_modules_out_proj_parameters_weight_
        l_self_modules_layers_modules_12_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_12_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_12_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_12_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_12_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_12_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_13_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_13_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_13_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_13_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_13_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_13_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_13_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_13_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_14_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_14_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_14_modules_self_attn_modules_qkv_proj_parameters_weight_ = L_self_modules_layers_modules_14_modules_self_attn_modules_qkv_proj_parameters_weight_
        l_self_modules_layers_modules_14_modules_self_attn_modules_q_layernorm_parameters_weight_ = L_self_modules_layers_modules_14_modules_self_attn_modules_q_layernorm_parameters_weight_
        l_self_modules_layers_modules_14_modules_self_attn_modules_k_layernorm_parameters_weight_ = L_self_modules_layers_modules_14_modules_self_attn_modules_k_layernorm_parameters_weight_
        l_self_modules_layers_modules_14_modules_self_attn_modules_out_proj_parameters_weight_ = L_self_modules_layers_modules_14_modules_self_attn_modules_out_proj_parameters_weight_
        l_self_modules_layers_modules_14_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_14_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_14_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_14_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_14_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_14_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_layers_modules_15_modules_operator_norm_parameters_weight_ = L_self_modules_layers_modules_15_modules_operator_norm_parameters_weight_
        l_self_modules_layers_modules_15_modules_ffn_norm_parameters_weight_ = L_self_modules_layers_modules_15_modules_ffn_norm_parameters_weight_
        l_self_modules_layers_modules_15_modules_feed_forward_modules_w1_parameters_weight_ = L_self_modules_layers_modules_15_modules_feed_forward_modules_w1_parameters_weight_
        l_self_modules_layers_modules_15_modules_feed_forward_modules_w2_parameters_weight_ = L_self_modules_layers_modules_15_modules_feed_forward_modules_w2_parameters_weight_
        l_self_modules_embedding_norm_parameters_weight_ = L_self_modules_embedding_norm_parameters_weight_
        
        # No stacktrace found for following nodes
        submod_0 = self.submod_0(l_input_ids_, s72, l_self_modules_embed_tokens_parameters_weight_, l_self_modules_layers_modules_0_modules_operator_norm_parameters_weight_);  l_input_ids_ = l_self_modules_embed_tokens_parameters_weight_ = l_self_modules_layers_modules_0_modules_operator_norm_parameters_weight_ = None
        getitem = submod_0[0]
        getitem_1 = submod_0[1]
        getitem_2 = submod_0[2];  submod_0 = None
        submod_1 = self.submod_1(getitem, s72, getitem_1);  getitem = submod_1 = None
        submod_2 = self.submod_2(l_self_modules_layers_modules_0_modules_ffn_norm_parameters_weight_, getitem_1, s72, getitem_2, l_self_modules_layers_modules_0_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_0_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_1_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_0_modules_ffn_norm_parameters_weight_ = getitem_1 = getitem_2 = l_self_modules_layers_modules_0_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_0_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_1_modules_operator_norm_parameters_weight_ = None
        getitem_3 = submod_2[0]
        getitem_4 = submod_2[1]
        getitem_5 = submod_2[2];  submod_2 = None
        submod_3 = self.submod_3(getitem_3, s72, getitem_4);  getitem_3 = submod_3 = None
        submod_4 = self.submod_4(l_self_modules_layers_modules_1_modules_ffn_norm_parameters_weight_, getitem_4, s72, getitem_5, l_self_modules_layers_modules_1_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_1_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_2_modules_operator_norm_parameters_weight_, l_self_modules_layers_modules_2_modules_self_attn_modules_qkv_proj_parameters_weight_, l_self_modules_layers_modules_2_modules_self_attn_modules_q_layernorm_parameters_weight_, l_self_modules_layers_modules_2_modules_self_attn_modules_k_layernorm_parameters_weight_, l_positions_, l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_);  l_self_modules_layers_modules_1_modules_ffn_norm_parameters_weight_ = getitem_4 = getitem_5 = l_self_modules_layers_modules_1_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_1_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_2_modules_operator_norm_parameters_weight_ = l_self_modules_layers_modules_2_modules_self_attn_modules_qkv_proj_parameters_weight_ = l_self_modules_layers_modules_2_modules_self_attn_modules_q_layernorm_parameters_weight_ = l_self_modules_layers_modules_2_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
        getitem_6 = submod_4[0]
        getitem_7 = submod_4[1]
        getitem_8 = submod_4[2]
        getitem_9 = submod_4[3]
        getitem_10 = submod_4[4];  submod_4 = None
        submod_5 = self.submod_5(getitem_6, s72, getitem_7, getitem_8, getitem_9);  getitem_6 = getitem_7 = getitem_8 = submod_5 = None
        submod_6 = self.submod_6(getitem_9, s72, l_self_modules_layers_modules_2_modules_self_attn_modules_out_proj_parameters_weight_, l_self_modules_layers_modules_2_modules_ffn_norm_parameters_weight_, getitem_10, l_self_modules_layers_modules_2_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_2_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_3_modules_operator_norm_parameters_weight_);  getitem_9 = l_self_modules_layers_modules_2_modules_self_attn_modules_out_proj_parameters_weight_ = l_self_modules_layers_modules_2_modules_ffn_norm_parameters_weight_ = getitem_10 = l_self_modules_layers_modules_2_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_2_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_3_modules_operator_norm_parameters_weight_ = None
        getitem_11 = submod_6[0]
        getitem_12 = submod_6[1]
        getitem_13 = submod_6[2];  submod_6 = None
        submod_7 = self.submod_7(getitem_11, s72, getitem_12);  getitem_11 = submod_7 = None
        submod_8 = self.submod_8(l_self_modules_layers_modules_3_modules_ffn_norm_parameters_weight_, getitem_12, s72, getitem_13, l_self_modules_layers_modules_3_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_3_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_4_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_3_modules_ffn_norm_parameters_weight_ = getitem_12 = getitem_13 = l_self_modules_layers_modules_3_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_3_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_4_modules_operator_norm_parameters_weight_ = None
        getitem_14 = submod_8[0]
        getitem_15 = submod_8[1]
        getitem_16 = submod_8[2];  submod_8 = None
        submod_9 = self.submod_9(getitem_14, s72, getitem_15);  getitem_14 = submod_9 = None
        submod_10 = self.submod_10(l_self_modules_layers_modules_4_modules_ffn_norm_parameters_weight_, getitem_15, s72, getitem_16, l_self_modules_layers_modules_4_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_4_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_5_modules_operator_norm_parameters_weight_, l_self_modules_layers_modules_5_modules_self_attn_modules_qkv_proj_parameters_weight_, l_self_modules_layers_modules_5_modules_self_attn_modules_q_layernorm_parameters_weight_, l_self_modules_layers_modules_5_modules_self_attn_modules_k_layernorm_parameters_weight_, l_positions_, l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_);  l_self_modules_layers_modules_4_modules_ffn_norm_parameters_weight_ = getitem_15 = getitem_16 = l_self_modules_layers_modules_4_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_4_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_5_modules_operator_norm_parameters_weight_ = l_self_modules_layers_modules_5_modules_self_attn_modules_qkv_proj_parameters_weight_ = l_self_modules_layers_modules_5_modules_self_attn_modules_q_layernorm_parameters_weight_ = l_self_modules_layers_modules_5_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
        getitem_17 = submod_10[0]
        getitem_18 = submod_10[1]
        getitem_19 = submod_10[2]
        getitem_20 = submod_10[3]
        getitem_21 = submod_10[4];  submod_10 = None
        submod_11 = self.submod_11(getitem_17, s72, getitem_18, getitem_19, getitem_20);  getitem_17 = getitem_18 = getitem_19 = submod_11 = None
        submod_12 = self.submod_12(getitem_20, s72, l_self_modules_layers_modules_5_modules_self_attn_modules_out_proj_parameters_weight_, l_self_modules_layers_modules_5_modules_ffn_norm_parameters_weight_, getitem_21, l_self_modules_layers_modules_5_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_5_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_6_modules_operator_norm_parameters_weight_);  getitem_20 = l_self_modules_layers_modules_5_modules_self_attn_modules_out_proj_parameters_weight_ = l_self_modules_layers_modules_5_modules_ffn_norm_parameters_weight_ = getitem_21 = l_self_modules_layers_modules_5_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_5_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_6_modules_operator_norm_parameters_weight_ = None
        getitem_22 = submod_12[0]
        getitem_23 = submod_12[1]
        getitem_24 = submod_12[2];  submod_12 = None
        submod_13 = self.submod_13(getitem_22, s72, getitem_23);  getitem_22 = submod_13 = None
        submod_14 = self.submod_14(l_self_modules_layers_modules_6_modules_ffn_norm_parameters_weight_, getitem_23, s72, getitem_24, l_self_modules_layers_modules_6_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_6_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_7_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_6_modules_ffn_norm_parameters_weight_ = getitem_23 = getitem_24 = l_self_modules_layers_modules_6_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_6_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_7_modules_operator_norm_parameters_weight_ = None
        getitem_25 = submod_14[0]
        getitem_26 = submod_14[1]
        getitem_27 = submod_14[2];  submod_14 = None
        submod_15 = self.submod_15(getitem_25, s72, getitem_26);  getitem_25 = submod_15 = None
        submod_16 = self.submod_16(l_self_modules_layers_modules_7_modules_ffn_norm_parameters_weight_, getitem_26, s72, getitem_27, l_self_modules_layers_modules_7_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_7_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_8_modules_operator_norm_parameters_weight_, l_self_modules_layers_modules_8_modules_self_attn_modules_qkv_proj_parameters_weight_, l_self_modules_layers_modules_8_modules_self_attn_modules_q_layernorm_parameters_weight_, l_self_modules_layers_modules_8_modules_self_attn_modules_k_layernorm_parameters_weight_, l_positions_, l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_);  l_self_modules_layers_modules_7_modules_ffn_norm_parameters_weight_ = getitem_26 = getitem_27 = l_self_modules_layers_modules_7_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_7_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_8_modules_operator_norm_parameters_weight_ = l_self_modules_layers_modules_8_modules_self_attn_modules_qkv_proj_parameters_weight_ = l_self_modules_layers_modules_8_modules_self_attn_modules_q_layernorm_parameters_weight_ = l_self_modules_layers_modules_8_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
        getitem_28 = submod_16[0]
        getitem_29 = submod_16[1]
        getitem_30 = submod_16[2]
        getitem_31 = submod_16[3]
        getitem_32 = submod_16[4];  submod_16 = None
        submod_17 = self.submod_17(getitem_28, s72, getitem_29, getitem_30, getitem_31);  getitem_28 = getitem_29 = getitem_30 = submod_17 = None
        submod_18 = self.submod_18(getitem_31, s72, l_self_modules_layers_modules_8_modules_self_attn_modules_out_proj_parameters_weight_, l_self_modules_layers_modules_8_modules_ffn_norm_parameters_weight_, getitem_32, l_self_modules_layers_modules_8_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_8_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_9_modules_operator_norm_parameters_weight_);  getitem_31 = l_self_modules_layers_modules_8_modules_self_attn_modules_out_proj_parameters_weight_ = l_self_modules_layers_modules_8_modules_ffn_norm_parameters_weight_ = getitem_32 = l_self_modules_layers_modules_8_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_8_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_9_modules_operator_norm_parameters_weight_ = None
        getitem_33 = submod_18[0]
        getitem_34 = submod_18[1]
        getitem_35 = submod_18[2];  submod_18 = None
        submod_19 = self.submod_19(getitem_33, s72, getitem_34);  getitem_33 = submod_19 = None
        submod_20 = self.submod_20(l_self_modules_layers_modules_9_modules_ffn_norm_parameters_weight_, getitem_34, s72, getitem_35, l_self_modules_layers_modules_9_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_9_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_10_modules_operator_norm_parameters_weight_, l_self_modules_layers_modules_10_modules_self_attn_modules_qkv_proj_parameters_weight_, l_self_modules_layers_modules_10_modules_self_attn_modules_q_layernorm_parameters_weight_, l_self_modules_layers_modules_10_modules_self_attn_modules_k_layernorm_parameters_weight_, l_positions_, l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_);  l_self_modules_layers_modules_9_modules_ffn_norm_parameters_weight_ = getitem_34 = getitem_35 = l_self_modules_layers_modules_9_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_9_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_10_modules_operator_norm_parameters_weight_ = l_self_modules_layers_modules_10_modules_self_attn_modules_qkv_proj_parameters_weight_ = l_self_modules_layers_modules_10_modules_self_attn_modules_q_layernorm_parameters_weight_ = l_self_modules_layers_modules_10_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
        getitem_36 = submod_20[0]
        getitem_37 = submod_20[1]
        getitem_38 = submod_20[2]
        getitem_39 = submod_20[3]
        getitem_40 = submod_20[4];  submod_20 = None
        submod_21 = self.submod_21(getitem_36, s72, getitem_37, getitem_38, getitem_39);  getitem_36 = getitem_37 = getitem_38 = submod_21 = None
        submod_22 = self.submod_22(getitem_39, s72, l_self_modules_layers_modules_10_modules_self_attn_modules_out_proj_parameters_weight_, l_self_modules_layers_modules_10_modules_ffn_norm_parameters_weight_, getitem_40, l_self_modules_layers_modules_10_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_10_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_11_modules_operator_norm_parameters_weight_);  getitem_39 = l_self_modules_layers_modules_10_modules_self_attn_modules_out_proj_parameters_weight_ = l_self_modules_layers_modules_10_modules_ffn_norm_parameters_weight_ = getitem_40 = l_self_modules_layers_modules_10_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_10_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_11_modules_operator_norm_parameters_weight_ = None
        getitem_41 = submod_22[0]
        getitem_42 = submod_22[1]
        getitem_43 = submod_22[2];  submod_22 = None
        submod_23 = self.submod_23(getitem_41, s72, getitem_42);  getitem_41 = submod_23 = None
        submod_24 = self.submod_24(l_self_modules_layers_modules_11_modules_ffn_norm_parameters_weight_, getitem_42, s72, getitem_43, l_self_modules_layers_modules_11_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_11_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_12_modules_operator_norm_parameters_weight_, l_self_modules_layers_modules_12_modules_self_attn_modules_qkv_proj_parameters_weight_, l_self_modules_layers_modules_12_modules_self_attn_modules_q_layernorm_parameters_weight_, l_self_modules_layers_modules_12_modules_self_attn_modules_k_layernorm_parameters_weight_, l_positions_, l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_);  l_self_modules_layers_modules_11_modules_ffn_norm_parameters_weight_ = getitem_42 = getitem_43 = l_self_modules_layers_modules_11_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_11_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_12_modules_operator_norm_parameters_weight_ = l_self_modules_layers_modules_12_modules_self_attn_modules_qkv_proj_parameters_weight_ = l_self_modules_layers_modules_12_modules_self_attn_modules_q_layernorm_parameters_weight_ = l_self_modules_layers_modules_12_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
        getitem_44 = submod_24[0]
        getitem_45 = submod_24[1]
        getitem_46 = submod_24[2]
        getitem_47 = submod_24[3]
        getitem_48 = submod_24[4];  submod_24 = None
        submod_25 = self.submod_25(getitem_44, s72, getitem_45, getitem_46, getitem_47);  getitem_44 = getitem_45 = getitem_46 = submod_25 = None
        submod_26 = self.submod_26(getitem_47, s72, l_self_modules_layers_modules_12_modules_self_attn_modules_out_proj_parameters_weight_, l_self_modules_layers_modules_12_modules_ffn_norm_parameters_weight_, getitem_48, l_self_modules_layers_modules_12_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_12_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_13_modules_operator_norm_parameters_weight_);  getitem_47 = l_self_modules_layers_modules_12_modules_self_attn_modules_out_proj_parameters_weight_ = l_self_modules_layers_modules_12_modules_ffn_norm_parameters_weight_ = getitem_48 = l_self_modules_layers_modules_12_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_12_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_13_modules_operator_norm_parameters_weight_ = None
        getitem_49 = submod_26[0]
        getitem_50 = submod_26[1]
        getitem_51 = submod_26[2];  submod_26 = None
        submod_27 = self.submod_27(getitem_49, s72, getitem_50);  getitem_49 = submod_27 = None
        submod_28 = self.submod_28(l_self_modules_layers_modules_13_modules_ffn_norm_parameters_weight_, getitem_50, s72, getitem_51, l_self_modules_layers_modules_13_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_13_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_14_modules_operator_norm_parameters_weight_, l_self_modules_layers_modules_14_modules_self_attn_modules_qkv_proj_parameters_weight_, l_self_modules_layers_modules_14_modules_self_attn_modules_q_layernorm_parameters_weight_, l_self_modules_layers_modules_14_modules_self_attn_modules_k_layernorm_parameters_weight_, l_positions_, l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_);  l_self_modules_layers_modules_13_modules_ffn_norm_parameters_weight_ = getitem_50 = getitem_51 = l_self_modules_layers_modules_13_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_13_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_14_modules_operator_norm_parameters_weight_ = l_self_modules_layers_modules_14_modules_self_attn_modules_qkv_proj_parameters_weight_ = l_self_modules_layers_modules_14_modules_self_attn_modules_q_layernorm_parameters_weight_ = l_self_modules_layers_modules_14_modules_self_attn_modules_k_layernorm_parameters_weight_ = l_positions_ = l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_ = None
        getitem_52 = submod_28[0]
        getitem_53 = submod_28[1]
        getitem_54 = submod_28[2]
        getitem_55 = submod_28[3]
        getitem_56 = submod_28[4];  submod_28 = None
        submod_29 = self.submod_29(getitem_52, s72, getitem_53, getitem_54, getitem_55);  getitem_52 = getitem_53 = getitem_54 = submod_29 = None
        submod_30 = self.submod_30(getitem_55, s72, l_self_modules_layers_modules_14_modules_self_attn_modules_out_proj_parameters_weight_, l_self_modules_layers_modules_14_modules_ffn_norm_parameters_weight_, getitem_56, l_self_modules_layers_modules_14_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_14_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_layers_modules_15_modules_operator_norm_parameters_weight_);  getitem_55 = l_self_modules_layers_modules_14_modules_self_attn_modules_out_proj_parameters_weight_ = l_self_modules_layers_modules_14_modules_ffn_norm_parameters_weight_ = getitem_56 = l_self_modules_layers_modules_14_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_14_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_layers_modules_15_modules_operator_norm_parameters_weight_ = None
        getitem_57 = submod_30[0]
        getitem_58 = submod_30[1]
        getitem_59 = submod_30[2];  submod_30 = None
        submod_31 = self.submod_31(getitem_57, s72, getitem_58);  getitem_57 = submod_31 = None
        submod_32 = self.submod_32(l_self_modules_layers_modules_15_modules_ffn_norm_parameters_weight_, getitem_58, s72, getitem_59, l_self_modules_layers_modules_15_modules_feed_forward_modules_w1_parameters_weight_, l_self_modules_layers_modules_15_modules_feed_forward_modules_w2_parameters_weight_, l_self_modules_embedding_norm_parameters_weight_);  l_self_modules_layers_modules_15_modules_ffn_norm_parameters_weight_ = getitem_58 = s72 = getitem_59 = l_self_modules_layers_modules_15_modules_feed_forward_modules_w1_parameters_weight_ = l_self_modules_layers_modules_15_modules_feed_forward_modules_w2_parameters_weight_ = l_self_modules_embedding_norm_parameters_weight_ = None
        return (submod_32,)
        
    class submod_0(torch.nn.Module):
        def forward(self, l_input_ids_: "i32[s72]", s72: "Sym(s72)", l_self_modules_embed_tokens_parameters_weight_: "bf16[68480, 1024]", l_self_modules_layers_modules_0_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/vocab_parallel_embedding.py:478 in forward_native, code: output_parallel = self.quant_method.embedding(self, masked_input.long())
            long: "i64[s72]" = l_input_ids_.long();  l_input_ids_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/vocab_parallel_embedding.py:72 in embedding, code: return F.embedding(input_, layer.weight)
            embedding: "bf16[s72, 1024]" = torch.nn.functional.embedding(long, l_self_modules_embed_tokens_parameters_weight_);  long = l_self_modules_embed_tokens_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_0_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_0_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = embedding.to(torch.float32)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = to.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add);  add = None
            mul: "f32[s72, 1024]" = to * rsqrt;  to = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_1 * _get_data_attr;  to_1 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_1)
            return (mul_1, empty_like, embedding)
            
    class submod_1(torch.nn.Module):
        def forward(self, x_3: "bf16[s72, 1024]", s72: "Sym(s72)", output: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_3, output, 'model.layers.0.conv');  x_3 = output = short_conv = None
            return ()
            
    class submod_2(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_0_modules_ffn_norm_parameters_weight_: "bf16[1024]", output: "bf16[s72, 1024]", s72: "Sym(s72)", output_parallel: "bf16[s72, 1024]", l_self_modules_layers_modules_0_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_0_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_1_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_0_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_0_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output.to(torch.float32);  output = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + output_parallel;  to = output_parallel = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_0_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_0_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_0_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_0_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_1_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_1_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_3(torch.nn.Module):
        def forward(self, x_14: "bf16[s72, 1024]", s72: "Sym(s72)", output_1: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_14, output_1, 'model.layers.1.conv');  x_14 = output_1 = short_conv = None
            return ()
            
    class submod_4(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_1_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_1: "bf16[s72, 1024]", s72: "Sym(s72)", residual_1: "bf16[s72, 1024]", l_self_modules_layers_modules_1_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_1_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_2_modules_operator_norm_parameters_weight_: "bf16[1024]", l_self_modules_layers_modules_2_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", l_self_modules_layers_modules_2_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", l_self_modules_layers_modules_2_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", l_positions_: "i64[s72]", l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_: "bf16[128000, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_1_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_1_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_1.to(torch.float32);  output_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_1;  to = residual_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_1_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_1_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_1_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_1_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_2_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_2_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 2048]" = torch._C._nn.linear(mul_4, l_self_modules_layers_modules_2_modules_self_attn_modules_qkv_proj_parameters_weight_, None);  mul_4 = l_self_modules_layers_modules_2_modules_self_attn_modules_qkv_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:181 in forward, code: q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
            split = linear_2.split([1024, 512, 512], dim = -1);  linear_2 = None
            getitem_2: "bf16[s72, 1024]" = split[0]
            getitem_3: "bf16[s72, 512]" = split[1]
            getitem_4: "bf16[s72, 512]" = split[2];  split = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:182 in forward, code: q = q.view(n_tokens, self.num_heads, self.head_dim).contiguous()
            view: "bf16[s72, 16, 64]" = getitem_2.view(s72, 16, 64);  getitem_2 = None
            contiguous: "bf16[s72, 16, 64]" = view.contiguous();  view = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:183 in forward, code: k = k.view(n_tokens, self.num_kv_heads, self.head_dim).contiguous()
            view_1: "bf16[s72, 8, 64]" = getitem_3.view(s72, 8, 64);  getitem_3 = None
            contiguous_1: "bf16[s72, 8, 64]" = view_1.contiguous();  view_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_2: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_2_modules_self_attn_modules_q_layernorm_parameters_weight_);  l_self_modules_layers_modules_2_modules_self_attn_modules_q_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_6: "f32[s72, 16, 64]" = contiguous.to(torch.float32);  contiguous = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_3: "f32[s72, 16, 64]" = to_6.pow(2)
            mean_2: "f32[s72, 16, 1]" = pow_3.mean(dim = -1, keepdim = True);  pow_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_4: "f32[s72, 16, 1]" = mean_2 + 1e-05;  mean_2 = None
            rsqrt_2: "f32[s72, 16, 1]" = torch.rsqrt(add_4);  add_4 = None
            mul_5: "f32[s72, 16, 64]" = to_6 * rsqrt_2;  to_6 = rsqrt_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_7: "bf16[s72, 16, 64]" = mul_5.to(torch.bfloat16);  mul_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_6: "bf16[s72, 16, 64]" = to_7 * _get_data_attr_2;  to_7 = _get_data_attr_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_3: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_2_modules_self_attn_modules_k_layernorm_parameters_weight_);  l_self_modules_layers_modules_2_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_8: "f32[s72, 8, 64]" = contiguous_1.to(torch.float32);  contiguous_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_4: "f32[s72, 8, 64]" = to_8.pow(2)
            mean_3: "f32[s72, 8, 1]" = pow_4.mean(dim = -1, keepdim = True);  pow_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_5: "f32[s72, 8, 1]" = mean_3 + 1e-05;  mean_3 = None
            rsqrt_3: "f32[s72, 8, 1]" = torch.rsqrt(add_5);  add_5 = None
            mul_7: "f32[s72, 8, 64]" = to_8 * rsqrt_3;  to_8 = rsqrt_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_9: "bf16[s72, 8, 64]" = mul_7.to(torch.bfloat16);  mul_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_8: "bf16[s72, 8, 64]" = to_9 * _get_data_attr_3;  to_9 = _get_data_attr_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:127 in forward_static, code: positions = positions.flatten()
            flatten: "i64[s72]" = l_positions_.flatten();  l_positions_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:129 in forward_static, code: cos_sin = cos_sin_cache.index_select(0, positions)
            index_select: "bf16[s72, 64]" = l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_.index_select(0, flatten);  l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_ = flatten = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:130 in forward_static, code: cos, sin = cos_sin.chunk(2, dim=-1)
            chunk = index_select.chunk(2, dim = -1);  index_select = None
            getitem_5: "bf16[s72, 32]" = chunk[0]
            getitem_6: "bf16[s72, 32]" = chunk[1];  chunk = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:132 in forward_static, code: query_shape = query.shape
            size = mul_6.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:133 in forward_static, code: query = query.view(num_tokens, -1, head_size)
            view_2: "bf16[s72, 16, 64]" = mul_6.view(s72, -1, 64);  mul_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:134 in forward_static, code: query_rot = query[..., :rotary_dim]
            getitem_7: "bf16[s72, 16, 64]" = view_2[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:135 in forward_static, code: query_pass = query[..., rotary_dim:]
            getitem_8: "bf16[s72, 16, 0]" = view_2[(Ellipsis, slice(64, None, None))];  view_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2)
            to_10: "bf16[s72, 1, 32]" = unsqueeze.to(torch.bfloat16);  unsqueeze = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_1: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2)
            to_11: "bf16[s72, 1, 32]" = unsqueeze_1.to(torch.bfloat16);  unsqueeze_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_1 = torch.chunk(getitem_7, 2, dim = -1);  getitem_7 = None
            getitem_9: "bf16[s72, 16, 32]" = chunk_1[0]
            getitem_10: "bf16[s72, 16, 32]" = chunk_1[1];  chunk_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_9: "bf16[s72, 16, 32]" = getitem_9 * to_10
            mul_10: "bf16[s72, 16, 32]" = getitem_10 * to_11
            sub: "bf16[s72, 16, 32]" = mul_9 - mul_10;  mul_9 = mul_10 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_11: "bf16[s72, 16, 32]" = getitem_10 * to_10;  getitem_10 = to_10 = None
            mul_12: "bf16[s72, 16, 32]" = getitem_9 * to_11;  getitem_9 = to_11 = None
            add_6: "bf16[s72, 16, 32]" = mul_11 + mul_12;  mul_11 = mul_12 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat: "bf16[s72, 16, 64]" = torch.cat((sub, add_6), dim = -1);  sub = add_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:142 in forward_static, code: query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape)
            cat_1: "bf16[s72, 16, 64]" = torch.cat((cat, getitem_8), dim = -1);  cat = getitem_8 = None
            reshape: "bf16[s72, 16, 64]" = cat_1.reshape(size);  cat_1 = size = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:146 in forward_static, code: key_shape = key.shape
            size_1 = mul_8.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:147 in forward_static, code: key = key.view(num_tokens, -1, head_size)
            view_3: "bf16[s72, 8, 64]" = mul_8.view(s72, -1, 64);  mul_8 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:148 in forward_static, code: key_rot = key[..., :rotary_dim]
            getitem_11: "bf16[s72, 8, 64]" = view_3[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:149 in forward_static, code: key_pass = key[..., rotary_dim:]
            getitem_12: "bf16[s72, 8, 0]" = view_3[(Ellipsis, slice(64, None, None))];  view_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze_2: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2);  getitem_5 = None
            to_12: "bf16[s72, 1, 32]" = unsqueeze_2.to(torch.bfloat16);  unsqueeze_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_3: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2);  getitem_6 = None
            to_13: "bf16[s72, 1, 32]" = unsqueeze_3.to(torch.bfloat16);  unsqueeze_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_2 = torch.chunk(getitem_11, 2, dim = -1);  getitem_11 = None
            getitem_13: "bf16[s72, 8, 32]" = chunk_2[0]
            getitem_14: "bf16[s72, 8, 32]" = chunk_2[1];  chunk_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_13: "bf16[s72, 8, 32]" = getitem_13 * to_12
            mul_14: "bf16[s72, 8, 32]" = getitem_14 * to_13
            sub_1: "bf16[s72, 8, 32]" = mul_13 - mul_14;  mul_13 = mul_14 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_15: "bf16[s72, 8, 32]" = getitem_14 * to_12;  getitem_14 = to_12 = None
            mul_16: "bf16[s72, 8, 32]" = getitem_13 * to_13;  getitem_13 = to_13 = None
            add_7: "bf16[s72, 8, 32]" = mul_15 + mul_16;  mul_15 = mul_16 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat_2: "bf16[s72, 8, 64]" = torch.cat((sub_1, add_7), dim = -1);  sub_1 = add_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:156 in forward_static, code: key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape)
            cat_3: "bf16[s72, 8, 64]" = torch.cat((cat_2, getitem_12), dim = -1);  cat_2 = getitem_12 = None
            reshape_1: "bf16[s72, 8, 64]" = cat_3.reshape(size_1);  cat_3 = size_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:187 in forward, code: q = q.view(n_tokens, self.num_heads * self.head_dim)
            view_4: "bf16[s72, 1024]" = reshape.view(s72, 1024);  reshape = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:188 in forward, code: k = k.view(n_tokens, self.num_kv_heads * self.head_dim)
            view_5: "bf16[s72, 512]" = reshape_1.view(s72, 512);  reshape_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:381 in forward, code: output = torch.empty(output_shape, dtype=output_dtype, device=query.device)
            size_2 = torch.Size([s72, 1024]);  s72 = None
            empty: "bf16[s72, 1024]" = torch.empty(size_2, dtype = torch.bfloat16, device = device(type='cuda', index=0));  size_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:386 in forward, code: query = query.view(-1, self.num_heads, self.head_size)
            view_6: "bf16[s72, 16, 64]" = view_4.view(-1, 16, 64);  view_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:387 in forward, code: output = output.view(-1, self.num_heads, self.head_size_v)
            view_7: "bf16[s72, 16, 64]" = empty.view(-1, 16, 64);  empty = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:389 in forward, code: key = key.view(-1, self.num_kv_heads, self.head_size)
            view_8: "bf16[s72, 8, 64]" = view_5.view(-1, 8, 64);  view_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:391 in forward, code: value = value.view(-1, self.num_kv_heads, self.head_size_v)
            view_9: "bf16[s72, 8, 64]" = getitem_4.view(-1, 8, 64);  getitem_4 = None
            return (view_8, view_9, view_6, view_7, to_4)
            
    class submod_5(torch.nn.Module):
        def forward(self, key_2: "bf16[s72, 8, 64]", s72: "Sym(s72)", value: "bf16[s72, 8, 64]", query_2: "bf16[s72, 16, 64]", output_5: "bf16[s72, 16, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:412 in forward, code: kv_cache_dummy_dep = torch.ops.vllm.unified_kv_cache_update(
            unified_kv_cache_update: "bf16[0]" = torch.ops.vllm.unified_kv_cache_update(key_2, value, 'model.layers.2.self_attn.attn')
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:415 in forward, code: torch.ops.vllm.unified_attention_with_output(
            unified_attention_with_output = torch.ops.vllm.unified_attention_with_output(query_2, key_2, value, output_5, 'model.layers.2.self_attn.attn', kv_cache_dummy_dep = unified_kv_cache_update);  query_2 = key_2 = value = output_5 = unified_kv_cache_update = unified_attention_with_output = None
            return ()
            
    class submod_6(torch.nn.Module):
        def forward(self, output_5: "bf16[s72, 16, 64]", s72: "Sym(s72)", l_self_modules_layers_modules_2_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", l_self_modules_layers_modules_2_modules_ffn_norm_parameters_weight_: "bf16[1024]", residual_3: "bf16[s72, 1024]", l_self_modules_layers_modules_2_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_2_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_3_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:423 in forward, code: return output.view(-1, hidden_size)
            view: "bf16[s72, 1024]" = output_5.view(-1, 1024);  output_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 1024]" = torch._C._nn.linear(view, l_self_modules_layers_modules_2_modules_self_attn_modules_out_proj_parameters_weight_, None);  view = l_self_modules_layers_modules_2_modules_self_attn_modules_out_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_2_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_2_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = linear.to(torch.float32);  linear = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_3;  to = residual_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_2_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_2_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(4608, None, None))];  linear_1 = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_2_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_2_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_3_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_3_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_2.to(torch.float32);  linear_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_7(torch.nn.Module):
        def forward(self, x_44: "bf16[s72, 1024]", s72: "Sym(s72)", output_6: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_44, output_6, 'model.layers.3.conv');  x_44 = output_6 = short_conv = None
            return ()
            
    class submod_8(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_3_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_6: "bf16[s72, 1024]", s72: "Sym(s72)", residual_5: "bf16[s72, 1024]", l_self_modules_layers_modules_3_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_3_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_4_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_3_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_3_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_6.to(torch.float32);  output_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_5;  to = residual_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_3_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_3_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_3_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_3_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_4_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_4_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_9(torch.nn.Module):
        def forward(self, x_55: "bf16[s72, 1024]", s72: "Sym(s72)", output_7: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_55, output_7, 'model.layers.4.conv');  x_55 = output_7 = short_conv = None
            return ()
            
    class submod_10(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_4_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_7: "bf16[s72, 1024]", s72: "Sym(s72)", residual_7: "bf16[s72, 1024]", l_self_modules_layers_modules_4_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_4_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_5_modules_operator_norm_parameters_weight_: "bf16[1024]", l_self_modules_layers_modules_5_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", l_self_modules_layers_modules_5_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", l_self_modules_layers_modules_5_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", l_positions_: "i64[s72]", l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_: "bf16[128000, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_4_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_4_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_7.to(torch.float32);  output_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_7;  to = residual_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_4_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_4_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_4_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_4_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_5_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_5_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 2048]" = torch._C._nn.linear(mul_4, l_self_modules_layers_modules_5_modules_self_attn_modules_qkv_proj_parameters_weight_, None);  mul_4 = l_self_modules_layers_modules_5_modules_self_attn_modules_qkv_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:181 in forward, code: q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
            split = linear_2.split([1024, 512, 512], dim = -1);  linear_2 = None
            getitem_2: "bf16[s72, 1024]" = split[0]
            getitem_3: "bf16[s72, 512]" = split[1]
            getitem_4: "bf16[s72, 512]" = split[2];  split = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:182 in forward, code: q = q.view(n_tokens, self.num_heads, self.head_dim).contiguous()
            view: "bf16[s72, 16, 64]" = getitem_2.view(s72, 16, 64);  getitem_2 = None
            contiguous: "bf16[s72, 16, 64]" = view.contiguous();  view = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:183 in forward, code: k = k.view(n_tokens, self.num_kv_heads, self.head_dim).contiguous()
            view_1: "bf16[s72, 8, 64]" = getitem_3.view(s72, 8, 64);  getitem_3 = None
            contiguous_1: "bf16[s72, 8, 64]" = view_1.contiguous();  view_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_2: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_5_modules_self_attn_modules_q_layernorm_parameters_weight_);  l_self_modules_layers_modules_5_modules_self_attn_modules_q_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_6: "f32[s72, 16, 64]" = contiguous.to(torch.float32);  contiguous = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_3: "f32[s72, 16, 64]" = to_6.pow(2)
            mean_2: "f32[s72, 16, 1]" = pow_3.mean(dim = -1, keepdim = True);  pow_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_4: "f32[s72, 16, 1]" = mean_2 + 1e-05;  mean_2 = None
            rsqrt_2: "f32[s72, 16, 1]" = torch.rsqrt(add_4);  add_4 = None
            mul_5: "f32[s72, 16, 64]" = to_6 * rsqrt_2;  to_6 = rsqrt_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_7: "bf16[s72, 16, 64]" = mul_5.to(torch.bfloat16);  mul_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_6: "bf16[s72, 16, 64]" = to_7 * _get_data_attr_2;  to_7 = _get_data_attr_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_3: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_5_modules_self_attn_modules_k_layernorm_parameters_weight_);  l_self_modules_layers_modules_5_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_8: "f32[s72, 8, 64]" = contiguous_1.to(torch.float32);  contiguous_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_4: "f32[s72, 8, 64]" = to_8.pow(2)
            mean_3: "f32[s72, 8, 1]" = pow_4.mean(dim = -1, keepdim = True);  pow_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_5: "f32[s72, 8, 1]" = mean_3 + 1e-05;  mean_3 = None
            rsqrt_3: "f32[s72, 8, 1]" = torch.rsqrt(add_5);  add_5 = None
            mul_7: "f32[s72, 8, 64]" = to_8 * rsqrt_3;  to_8 = rsqrt_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_9: "bf16[s72, 8, 64]" = mul_7.to(torch.bfloat16);  mul_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_8: "bf16[s72, 8, 64]" = to_9 * _get_data_attr_3;  to_9 = _get_data_attr_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:127 in forward_static, code: positions = positions.flatten()
            flatten: "i64[s72]" = l_positions_.flatten();  l_positions_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:129 in forward_static, code: cos_sin = cos_sin_cache.index_select(0, positions)
            index_select: "bf16[s72, 64]" = l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_.index_select(0, flatten);  l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_ = flatten = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:130 in forward_static, code: cos, sin = cos_sin.chunk(2, dim=-1)
            chunk = index_select.chunk(2, dim = -1);  index_select = None
            getitem_5: "bf16[s72, 32]" = chunk[0]
            getitem_6: "bf16[s72, 32]" = chunk[1];  chunk = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:132 in forward_static, code: query_shape = query.shape
            size = mul_6.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:133 in forward_static, code: query = query.view(num_tokens, -1, head_size)
            view_2: "bf16[s72, 16, 64]" = mul_6.view(s72, -1, 64);  mul_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:134 in forward_static, code: query_rot = query[..., :rotary_dim]
            getitem_7: "bf16[s72, 16, 64]" = view_2[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:135 in forward_static, code: query_pass = query[..., rotary_dim:]
            getitem_8: "bf16[s72, 16, 0]" = view_2[(Ellipsis, slice(64, None, None))];  view_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2)
            to_10: "bf16[s72, 1, 32]" = unsqueeze.to(torch.bfloat16);  unsqueeze = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_1: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2)
            to_11: "bf16[s72, 1, 32]" = unsqueeze_1.to(torch.bfloat16);  unsqueeze_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_1 = torch.chunk(getitem_7, 2, dim = -1);  getitem_7 = None
            getitem_9: "bf16[s72, 16, 32]" = chunk_1[0]
            getitem_10: "bf16[s72, 16, 32]" = chunk_1[1];  chunk_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_9: "bf16[s72, 16, 32]" = getitem_9 * to_10
            mul_10: "bf16[s72, 16, 32]" = getitem_10 * to_11
            sub: "bf16[s72, 16, 32]" = mul_9 - mul_10;  mul_9 = mul_10 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_11: "bf16[s72, 16, 32]" = getitem_10 * to_10;  getitem_10 = to_10 = None
            mul_12: "bf16[s72, 16, 32]" = getitem_9 * to_11;  getitem_9 = to_11 = None
            add_6: "bf16[s72, 16, 32]" = mul_11 + mul_12;  mul_11 = mul_12 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat: "bf16[s72, 16, 64]" = torch.cat((sub, add_6), dim = -1);  sub = add_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:142 in forward_static, code: query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape)
            cat_1: "bf16[s72, 16, 64]" = torch.cat((cat, getitem_8), dim = -1);  cat = getitem_8 = None
            reshape: "bf16[s72, 16, 64]" = cat_1.reshape(size);  cat_1 = size = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:146 in forward_static, code: key_shape = key.shape
            size_1 = mul_8.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:147 in forward_static, code: key = key.view(num_tokens, -1, head_size)
            view_3: "bf16[s72, 8, 64]" = mul_8.view(s72, -1, 64);  mul_8 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:148 in forward_static, code: key_rot = key[..., :rotary_dim]
            getitem_11: "bf16[s72, 8, 64]" = view_3[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:149 in forward_static, code: key_pass = key[..., rotary_dim:]
            getitem_12: "bf16[s72, 8, 0]" = view_3[(Ellipsis, slice(64, None, None))];  view_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze_2: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2);  getitem_5 = None
            to_12: "bf16[s72, 1, 32]" = unsqueeze_2.to(torch.bfloat16);  unsqueeze_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_3: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2);  getitem_6 = None
            to_13: "bf16[s72, 1, 32]" = unsqueeze_3.to(torch.bfloat16);  unsqueeze_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_2 = torch.chunk(getitem_11, 2, dim = -1);  getitem_11 = None
            getitem_13: "bf16[s72, 8, 32]" = chunk_2[0]
            getitem_14: "bf16[s72, 8, 32]" = chunk_2[1];  chunk_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_13: "bf16[s72, 8, 32]" = getitem_13 * to_12
            mul_14: "bf16[s72, 8, 32]" = getitem_14 * to_13
            sub_1: "bf16[s72, 8, 32]" = mul_13 - mul_14;  mul_13 = mul_14 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_15: "bf16[s72, 8, 32]" = getitem_14 * to_12;  getitem_14 = to_12 = None
            mul_16: "bf16[s72, 8, 32]" = getitem_13 * to_13;  getitem_13 = to_13 = None
            add_7: "bf16[s72, 8, 32]" = mul_15 + mul_16;  mul_15 = mul_16 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat_2: "bf16[s72, 8, 64]" = torch.cat((sub_1, add_7), dim = -1);  sub_1 = add_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:156 in forward_static, code: key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape)
            cat_3: "bf16[s72, 8, 64]" = torch.cat((cat_2, getitem_12), dim = -1);  cat_2 = getitem_12 = None
            reshape_1: "bf16[s72, 8, 64]" = cat_3.reshape(size_1);  cat_3 = size_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:187 in forward, code: q = q.view(n_tokens, self.num_heads * self.head_dim)
            view_4: "bf16[s72, 1024]" = reshape.view(s72, 1024);  reshape = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:188 in forward, code: k = k.view(n_tokens, self.num_kv_heads * self.head_dim)
            view_5: "bf16[s72, 512]" = reshape_1.view(s72, 512);  reshape_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:381 in forward, code: output = torch.empty(output_shape, dtype=output_dtype, device=query.device)
            size_2 = torch.Size([s72, 1024]);  s72 = None
            empty: "bf16[s72, 1024]" = torch.empty(size_2, dtype = torch.bfloat16, device = device(type='cuda', index=0));  size_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:386 in forward, code: query = query.view(-1, self.num_heads, self.head_size)
            view_6: "bf16[s72, 16, 64]" = view_4.view(-1, 16, 64);  view_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:387 in forward, code: output = output.view(-1, self.num_heads, self.head_size_v)
            view_7: "bf16[s72, 16, 64]" = empty.view(-1, 16, 64);  empty = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:389 in forward, code: key = key.view(-1, self.num_kv_heads, self.head_size)
            view_8: "bf16[s72, 8, 64]" = view_5.view(-1, 8, 64);  view_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:391 in forward, code: value = value.view(-1, self.num_kv_heads, self.head_size_v)
            view_9: "bf16[s72, 8, 64]" = getitem_4.view(-1, 8, 64);  getitem_4 = None
            return (view_8, view_9, view_6, view_7, to_4)
            
    class submod_11(torch.nn.Module):
        def forward(self, key_5: "bf16[s72, 8, 64]", s72: "Sym(s72)", value_1: "bf16[s72, 8, 64]", query_5: "bf16[s72, 16, 64]", output_11: "bf16[s72, 16, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:412 in forward, code: kv_cache_dummy_dep = torch.ops.vllm.unified_kv_cache_update(
            unified_kv_cache_update: "bf16[0]" = torch.ops.vllm.unified_kv_cache_update(key_5, value_1, 'model.layers.5.self_attn.attn')
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:415 in forward, code: torch.ops.vllm.unified_attention_with_output(
            unified_attention_with_output = torch.ops.vllm.unified_attention_with_output(query_5, key_5, value_1, output_11, 'model.layers.5.self_attn.attn', kv_cache_dummy_dep = unified_kv_cache_update);  query_5 = key_5 = value_1 = output_11 = unified_kv_cache_update = unified_attention_with_output = None
            return ()
            
    class submod_12(torch.nn.Module):
        def forward(self, output_11: "bf16[s72, 16, 64]", s72: "Sym(s72)", l_self_modules_layers_modules_5_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", l_self_modules_layers_modules_5_modules_ffn_norm_parameters_weight_: "bf16[1024]", residual_9: "bf16[s72, 1024]", l_self_modules_layers_modules_5_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_5_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_6_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:423 in forward, code: return output.view(-1, hidden_size)
            view: "bf16[s72, 1024]" = output_11.view(-1, 1024);  output_11 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 1024]" = torch._C._nn.linear(view, l_self_modules_layers_modules_5_modules_self_attn_modules_out_proj_parameters_weight_, None);  view = l_self_modules_layers_modules_5_modules_self_attn_modules_out_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_5_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_5_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = linear.to(torch.float32);  linear = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_9;  to = residual_9 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_5_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_5_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(4608, None, None))];  linear_1 = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_5_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_5_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_6_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_6_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_2.to(torch.float32);  linear_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_13(torch.nn.Module):
        def forward(self, x_85: "bf16[s72, 1024]", s72: "Sym(s72)", output_12: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_85, output_12, 'model.layers.6.conv');  x_85 = output_12 = short_conv = None
            return ()
            
    class submod_14(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_6_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_12: "bf16[s72, 1024]", s72: "Sym(s72)", residual_11: "bf16[s72, 1024]", l_self_modules_layers_modules_6_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_6_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_7_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_6_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_6_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_12.to(torch.float32);  output_12 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_11;  to = residual_11 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_6_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_6_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_6_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_6_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_7_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_7_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_15(torch.nn.Module):
        def forward(self, x_96: "bf16[s72, 1024]", s72: "Sym(s72)", output_13: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_96, output_13, 'model.layers.7.conv');  x_96 = output_13 = short_conv = None
            return ()
            
    class submod_16(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_7_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_13: "bf16[s72, 1024]", s72: "Sym(s72)", residual_13: "bf16[s72, 1024]", l_self_modules_layers_modules_7_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_7_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_8_modules_operator_norm_parameters_weight_: "bf16[1024]", l_self_modules_layers_modules_8_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", l_self_modules_layers_modules_8_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", l_self_modules_layers_modules_8_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", l_positions_: "i64[s72]", l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_: "bf16[128000, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_7_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_7_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_13.to(torch.float32);  output_13 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_13;  to = residual_13 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_7_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_7_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_7_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_7_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_8_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_8_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 2048]" = torch._C._nn.linear(mul_4, l_self_modules_layers_modules_8_modules_self_attn_modules_qkv_proj_parameters_weight_, None);  mul_4 = l_self_modules_layers_modules_8_modules_self_attn_modules_qkv_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:181 in forward, code: q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
            split = linear_2.split([1024, 512, 512], dim = -1);  linear_2 = None
            getitem_2: "bf16[s72, 1024]" = split[0]
            getitem_3: "bf16[s72, 512]" = split[1]
            getitem_4: "bf16[s72, 512]" = split[2];  split = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:182 in forward, code: q = q.view(n_tokens, self.num_heads, self.head_dim).contiguous()
            view: "bf16[s72, 16, 64]" = getitem_2.view(s72, 16, 64);  getitem_2 = None
            contiguous: "bf16[s72, 16, 64]" = view.contiguous();  view = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:183 in forward, code: k = k.view(n_tokens, self.num_kv_heads, self.head_dim).contiguous()
            view_1: "bf16[s72, 8, 64]" = getitem_3.view(s72, 8, 64);  getitem_3 = None
            contiguous_1: "bf16[s72, 8, 64]" = view_1.contiguous();  view_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_2: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_8_modules_self_attn_modules_q_layernorm_parameters_weight_);  l_self_modules_layers_modules_8_modules_self_attn_modules_q_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_6: "f32[s72, 16, 64]" = contiguous.to(torch.float32);  contiguous = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_3: "f32[s72, 16, 64]" = to_6.pow(2)
            mean_2: "f32[s72, 16, 1]" = pow_3.mean(dim = -1, keepdim = True);  pow_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_4: "f32[s72, 16, 1]" = mean_2 + 1e-05;  mean_2 = None
            rsqrt_2: "f32[s72, 16, 1]" = torch.rsqrt(add_4);  add_4 = None
            mul_5: "f32[s72, 16, 64]" = to_6 * rsqrt_2;  to_6 = rsqrt_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_7: "bf16[s72, 16, 64]" = mul_5.to(torch.bfloat16);  mul_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_6: "bf16[s72, 16, 64]" = to_7 * _get_data_attr_2;  to_7 = _get_data_attr_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_3: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_8_modules_self_attn_modules_k_layernorm_parameters_weight_);  l_self_modules_layers_modules_8_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_8: "f32[s72, 8, 64]" = contiguous_1.to(torch.float32);  contiguous_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_4: "f32[s72, 8, 64]" = to_8.pow(2)
            mean_3: "f32[s72, 8, 1]" = pow_4.mean(dim = -1, keepdim = True);  pow_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_5: "f32[s72, 8, 1]" = mean_3 + 1e-05;  mean_3 = None
            rsqrt_3: "f32[s72, 8, 1]" = torch.rsqrt(add_5);  add_5 = None
            mul_7: "f32[s72, 8, 64]" = to_8 * rsqrt_3;  to_8 = rsqrt_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_9: "bf16[s72, 8, 64]" = mul_7.to(torch.bfloat16);  mul_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_8: "bf16[s72, 8, 64]" = to_9 * _get_data_attr_3;  to_9 = _get_data_attr_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:127 in forward_static, code: positions = positions.flatten()
            flatten: "i64[s72]" = l_positions_.flatten();  l_positions_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:129 in forward_static, code: cos_sin = cos_sin_cache.index_select(0, positions)
            index_select: "bf16[s72, 64]" = l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_.index_select(0, flatten);  l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_ = flatten = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:130 in forward_static, code: cos, sin = cos_sin.chunk(2, dim=-1)
            chunk = index_select.chunk(2, dim = -1);  index_select = None
            getitem_5: "bf16[s72, 32]" = chunk[0]
            getitem_6: "bf16[s72, 32]" = chunk[1];  chunk = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:132 in forward_static, code: query_shape = query.shape
            size = mul_6.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:133 in forward_static, code: query = query.view(num_tokens, -1, head_size)
            view_2: "bf16[s72, 16, 64]" = mul_6.view(s72, -1, 64);  mul_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:134 in forward_static, code: query_rot = query[..., :rotary_dim]
            getitem_7: "bf16[s72, 16, 64]" = view_2[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:135 in forward_static, code: query_pass = query[..., rotary_dim:]
            getitem_8: "bf16[s72, 16, 0]" = view_2[(Ellipsis, slice(64, None, None))];  view_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2)
            to_10: "bf16[s72, 1, 32]" = unsqueeze.to(torch.bfloat16);  unsqueeze = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_1: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2)
            to_11: "bf16[s72, 1, 32]" = unsqueeze_1.to(torch.bfloat16);  unsqueeze_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_1 = torch.chunk(getitem_7, 2, dim = -1);  getitem_7 = None
            getitem_9: "bf16[s72, 16, 32]" = chunk_1[0]
            getitem_10: "bf16[s72, 16, 32]" = chunk_1[1];  chunk_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_9: "bf16[s72, 16, 32]" = getitem_9 * to_10
            mul_10: "bf16[s72, 16, 32]" = getitem_10 * to_11
            sub: "bf16[s72, 16, 32]" = mul_9 - mul_10;  mul_9 = mul_10 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_11: "bf16[s72, 16, 32]" = getitem_10 * to_10;  getitem_10 = to_10 = None
            mul_12: "bf16[s72, 16, 32]" = getitem_9 * to_11;  getitem_9 = to_11 = None
            add_6: "bf16[s72, 16, 32]" = mul_11 + mul_12;  mul_11 = mul_12 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat: "bf16[s72, 16, 64]" = torch.cat((sub, add_6), dim = -1);  sub = add_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:142 in forward_static, code: query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape)
            cat_1: "bf16[s72, 16, 64]" = torch.cat((cat, getitem_8), dim = -1);  cat = getitem_8 = None
            reshape: "bf16[s72, 16, 64]" = cat_1.reshape(size);  cat_1 = size = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:146 in forward_static, code: key_shape = key.shape
            size_1 = mul_8.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:147 in forward_static, code: key = key.view(num_tokens, -1, head_size)
            view_3: "bf16[s72, 8, 64]" = mul_8.view(s72, -1, 64);  mul_8 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:148 in forward_static, code: key_rot = key[..., :rotary_dim]
            getitem_11: "bf16[s72, 8, 64]" = view_3[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:149 in forward_static, code: key_pass = key[..., rotary_dim:]
            getitem_12: "bf16[s72, 8, 0]" = view_3[(Ellipsis, slice(64, None, None))];  view_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze_2: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2);  getitem_5 = None
            to_12: "bf16[s72, 1, 32]" = unsqueeze_2.to(torch.bfloat16);  unsqueeze_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_3: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2);  getitem_6 = None
            to_13: "bf16[s72, 1, 32]" = unsqueeze_3.to(torch.bfloat16);  unsqueeze_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_2 = torch.chunk(getitem_11, 2, dim = -1);  getitem_11 = None
            getitem_13: "bf16[s72, 8, 32]" = chunk_2[0]
            getitem_14: "bf16[s72, 8, 32]" = chunk_2[1];  chunk_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_13: "bf16[s72, 8, 32]" = getitem_13 * to_12
            mul_14: "bf16[s72, 8, 32]" = getitem_14 * to_13
            sub_1: "bf16[s72, 8, 32]" = mul_13 - mul_14;  mul_13 = mul_14 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_15: "bf16[s72, 8, 32]" = getitem_14 * to_12;  getitem_14 = to_12 = None
            mul_16: "bf16[s72, 8, 32]" = getitem_13 * to_13;  getitem_13 = to_13 = None
            add_7: "bf16[s72, 8, 32]" = mul_15 + mul_16;  mul_15 = mul_16 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat_2: "bf16[s72, 8, 64]" = torch.cat((sub_1, add_7), dim = -1);  sub_1 = add_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:156 in forward_static, code: key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape)
            cat_3: "bf16[s72, 8, 64]" = torch.cat((cat_2, getitem_12), dim = -1);  cat_2 = getitem_12 = None
            reshape_1: "bf16[s72, 8, 64]" = cat_3.reshape(size_1);  cat_3 = size_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:187 in forward, code: q = q.view(n_tokens, self.num_heads * self.head_dim)
            view_4: "bf16[s72, 1024]" = reshape.view(s72, 1024);  reshape = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:188 in forward, code: k = k.view(n_tokens, self.num_kv_heads * self.head_dim)
            view_5: "bf16[s72, 512]" = reshape_1.view(s72, 512);  reshape_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:381 in forward, code: output = torch.empty(output_shape, dtype=output_dtype, device=query.device)
            size_2 = torch.Size([s72, 1024]);  s72 = None
            empty: "bf16[s72, 1024]" = torch.empty(size_2, dtype = torch.bfloat16, device = device(type='cuda', index=0));  size_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:386 in forward, code: query = query.view(-1, self.num_heads, self.head_size)
            view_6: "bf16[s72, 16, 64]" = view_4.view(-1, 16, 64);  view_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:387 in forward, code: output = output.view(-1, self.num_heads, self.head_size_v)
            view_7: "bf16[s72, 16, 64]" = empty.view(-1, 16, 64);  empty = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:389 in forward, code: key = key.view(-1, self.num_kv_heads, self.head_size)
            view_8: "bf16[s72, 8, 64]" = view_5.view(-1, 8, 64);  view_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:391 in forward, code: value = value.view(-1, self.num_kv_heads, self.head_size_v)
            view_9: "bf16[s72, 8, 64]" = getitem_4.view(-1, 8, 64);  getitem_4 = None
            return (view_8, view_9, view_6, view_7, to_4)
            
    class submod_17(torch.nn.Module):
        def forward(self, key_8: "bf16[s72, 8, 64]", s72: "Sym(s72)", value_2: "bf16[s72, 8, 64]", query_8: "bf16[s72, 16, 64]", output_17: "bf16[s72, 16, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:412 in forward, code: kv_cache_dummy_dep = torch.ops.vllm.unified_kv_cache_update(
            unified_kv_cache_update: "bf16[0]" = torch.ops.vllm.unified_kv_cache_update(key_8, value_2, 'model.layers.8.self_attn.attn')
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:415 in forward, code: torch.ops.vllm.unified_attention_with_output(
            unified_attention_with_output = torch.ops.vllm.unified_attention_with_output(query_8, key_8, value_2, output_17, 'model.layers.8.self_attn.attn', kv_cache_dummy_dep = unified_kv_cache_update);  query_8 = key_8 = value_2 = output_17 = unified_kv_cache_update = unified_attention_with_output = None
            return ()
            
    class submod_18(torch.nn.Module):
        def forward(self, output_17: "bf16[s72, 16, 64]", s72: "Sym(s72)", l_self_modules_layers_modules_8_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", l_self_modules_layers_modules_8_modules_ffn_norm_parameters_weight_: "bf16[1024]", residual_15: "bf16[s72, 1024]", l_self_modules_layers_modules_8_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_8_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_9_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:423 in forward, code: return output.view(-1, hidden_size)
            view: "bf16[s72, 1024]" = output_17.view(-1, 1024);  output_17 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 1024]" = torch._C._nn.linear(view, l_self_modules_layers_modules_8_modules_self_attn_modules_out_proj_parameters_weight_, None);  view = l_self_modules_layers_modules_8_modules_self_attn_modules_out_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_8_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_8_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = linear.to(torch.float32);  linear = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_15;  to = residual_15 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_8_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_8_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(4608, None, None))];  linear_1 = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_8_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_8_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_9_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_9_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_2.to(torch.float32);  linear_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_19(torch.nn.Module):
        def forward(self, x_126: "bf16[s72, 1024]", s72: "Sym(s72)", output_18: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_126, output_18, 'model.layers.9.conv');  x_126 = output_18 = short_conv = None
            return ()
            
    class submod_20(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_9_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_18: "bf16[s72, 1024]", s72: "Sym(s72)", residual_17: "bf16[s72, 1024]", l_self_modules_layers_modules_9_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_9_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_10_modules_operator_norm_parameters_weight_: "bf16[1024]", l_self_modules_layers_modules_10_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", l_self_modules_layers_modules_10_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", l_self_modules_layers_modules_10_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", l_positions_: "i64[s72]", l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_: "bf16[128000, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_9_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_9_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_18.to(torch.float32);  output_18 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_17;  to = residual_17 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_9_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_9_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_9_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_9_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_10_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_10_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 2048]" = torch._C._nn.linear(mul_4, l_self_modules_layers_modules_10_modules_self_attn_modules_qkv_proj_parameters_weight_, None);  mul_4 = l_self_modules_layers_modules_10_modules_self_attn_modules_qkv_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:181 in forward, code: q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
            split = linear_2.split([1024, 512, 512], dim = -1);  linear_2 = None
            getitem_2: "bf16[s72, 1024]" = split[0]
            getitem_3: "bf16[s72, 512]" = split[1]
            getitem_4: "bf16[s72, 512]" = split[2];  split = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:182 in forward, code: q = q.view(n_tokens, self.num_heads, self.head_dim).contiguous()
            view: "bf16[s72, 16, 64]" = getitem_2.view(s72, 16, 64);  getitem_2 = None
            contiguous: "bf16[s72, 16, 64]" = view.contiguous();  view = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:183 in forward, code: k = k.view(n_tokens, self.num_kv_heads, self.head_dim).contiguous()
            view_1: "bf16[s72, 8, 64]" = getitem_3.view(s72, 8, 64);  getitem_3 = None
            contiguous_1: "bf16[s72, 8, 64]" = view_1.contiguous();  view_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_2: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_10_modules_self_attn_modules_q_layernorm_parameters_weight_);  l_self_modules_layers_modules_10_modules_self_attn_modules_q_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_6: "f32[s72, 16, 64]" = contiguous.to(torch.float32);  contiguous = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_3: "f32[s72, 16, 64]" = to_6.pow(2)
            mean_2: "f32[s72, 16, 1]" = pow_3.mean(dim = -1, keepdim = True);  pow_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_4: "f32[s72, 16, 1]" = mean_2 + 1e-05;  mean_2 = None
            rsqrt_2: "f32[s72, 16, 1]" = torch.rsqrt(add_4);  add_4 = None
            mul_5: "f32[s72, 16, 64]" = to_6 * rsqrt_2;  to_6 = rsqrt_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_7: "bf16[s72, 16, 64]" = mul_5.to(torch.bfloat16);  mul_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_6: "bf16[s72, 16, 64]" = to_7 * _get_data_attr_2;  to_7 = _get_data_attr_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_3: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_10_modules_self_attn_modules_k_layernorm_parameters_weight_);  l_self_modules_layers_modules_10_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_8: "f32[s72, 8, 64]" = contiguous_1.to(torch.float32);  contiguous_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_4: "f32[s72, 8, 64]" = to_8.pow(2)
            mean_3: "f32[s72, 8, 1]" = pow_4.mean(dim = -1, keepdim = True);  pow_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_5: "f32[s72, 8, 1]" = mean_3 + 1e-05;  mean_3 = None
            rsqrt_3: "f32[s72, 8, 1]" = torch.rsqrt(add_5);  add_5 = None
            mul_7: "f32[s72, 8, 64]" = to_8 * rsqrt_3;  to_8 = rsqrt_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_9: "bf16[s72, 8, 64]" = mul_7.to(torch.bfloat16);  mul_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_8: "bf16[s72, 8, 64]" = to_9 * _get_data_attr_3;  to_9 = _get_data_attr_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:127 in forward_static, code: positions = positions.flatten()
            flatten: "i64[s72]" = l_positions_.flatten();  l_positions_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:129 in forward_static, code: cos_sin = cos_sin_cache.index_select(0, positions)
            index_select: "bf16[s72, 64]" = l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_.index_select(0, flatten);  l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_ = flatten = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:130 in forward_static, code: cos, sin = cos_sin.chunk(2, dim=-1)
            chunk = index_select.chunk(2, dim = -1);  index_select = None
            getitem_5: "bf16[s72, 32]" = chunk[0]
            getitem_6: "bf16[s72, 32]" = chunk[1];  chunk = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:132 in forward_static, code: query_shape = query.shape
            size = mul_6.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:133 in forward_static, code: query = query.view(num_tokens, -1, head_size)
            view_2: "bf16[s72, 16, 64]" = mul_6.view(s72, -1, 64);  mul_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:134 in forward_static, code: query_rot = query[..., :rotary_dim]
            getitem_7: "bf16[s72, 16, 64]" = view_2[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:135 in forward_static, code: query_pass = query[..., rotary_dim:]
            getitem_8: "bf16[s72, 16, 0]" = view_2[(Ellipsis, slice(64, None, None))];  view_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2)
            to_10: "bf16[s72, 1, 32]" = unsqueeze.to(torch.bfloat16);  unsqueeze = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_1: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2)
            to_11: "bf16[s72, 1, 32]" = unsqueeze_1.to(torch.bfloat16);  unsqueeze_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_1 = torch.chunk(getitem_7, 2, dim = -1);  getitem_7 = None
            getitem_9: "bf16[s72, 16, 32]" = chunk_1[0]
            getitem_10: "bf16[s72, 16, 32]" = chunk_1[1];  chunk_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_9: "bf16[s72, 16, 32]" = getitem_9 * to_10
            mul_10: "bf16[s72, 16, 32]" = getitem_10 * to_11
            sub: "bf16[s72, 16, 32]" = mul_9 - mul_10;  mul_9 = mul_10 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_11: "bf16[s72, 16, 32]" = getitem_10 * to_10;  getitem_10 = to_10 = None
            mul_12: "bf16[s72, 16, 32]" = getitem_9 * to_11;  getitem_9 = to_11 = None
            add_6: "bf16[s72, 16, 32]" = mul_11 + mul_12;  mul_11 = mul_12 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat: "bf16[s72, 16, 64]" = torch.cat((sub, add_6), dim = -1);  sub = add_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:142 in forward_static, code: query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape)
            cat_1: "bf16[s72, 16, 64]" = torch.cat((cat, getitem_8), dim = -1);  cat = getitem_8 = None
            reshape: "bf16[s72, 16, 64]" = cat_1.reshape(size);  cat_1 = size = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:146 in forward_static, code: key_shape = key.shape
            size_1 = mul_8.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:147 in forward_static, code: key = key.view(num_tokens, -1, head_size)
            view_3: "bf16[s72, 8, 64]" = mul_8.view(s72, -1, 64);  mul_8 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:148 in forward_static, code: key_rot = key[..., :rotary_dim]
            getitem_11: "bf16[s72, 8, 64]" = view_3[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:149 in forward_static, code: key_pass = key[..., rotary_dim:]
            getitem_12: "bf16[s72, 8, 0]" = view_3[(Ellipsis, slice(64, None, None))];  view_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze_2: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2);  getitem_5 = None
            to_12: "bf16[s72, 1, 32]" = unsqueeze_2.to(torch.bfloat16);  unsqueeze_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_3: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2);  getitem_6 = None
            to_13: "bf16[s72, 1, 32]" = unsqueeze_3.to(torch.bfloat16);  unsqueeze_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_2 = torch.chunk(getitem_11, 2, dim = -1);  getitem_11 = None
            getitem_13: "bf16[s72, 8, 32]" = chunk_2[0]
            getitem_14: "bf16[s72, 8, 32]" = chunk_2[1];  chunk_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_13: "bf16[s72, 8, 32]" = getitem_13 * to_12
            mul_14: "bf16[s72, 8, 32]" = getitem_14 * to_13
            sub_1: "bf16[s72, 8, 32]" = mul_13 - mul_14;  mul_13 = mul_14 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_15: "bf16[s72, 8, 32]" = getitem_14 * to_12;  getitem_14 = to_12 = None
            mul_16: "bf16[s72, 8, 32]" = getitem_13 * to_13;  getitem_13 = to_13 = None
            add_7: "bf16[s72, 8, 32]" = mul_15 + mul_16;  mul_15 = mul_16 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat_2: "bf16[s72, 8, 64]" = torch.cat((sub_1, add_7), dim = -1);  sub_1 = add_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:156 in forward_static, code: key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape)
            cat_3: "bf16[s72, 8, 64]" = torch.cat((cat_2, getitem_12), dim = -1);  cat_2 = getitem_12 = None
            reshape_1: "bf16[s72, 8, 64]" = cat_3.reshape(size_1);  cat_3 = size_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:187 in forward, code: q = q.view(n_tokens, self.num_heads * self.head_dim)
            view_4: "bf16[s72, 1024]" = reshape.view(s72, 1024);  reshape = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:188 in forward, code: k = k.view(n_tokens, self.num_kv_heads * self.head_dim)
            view_5: "bf16[s72, 512]" = reshape_1.view(s72, 512);  reshape_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:381 in forward, code: output = torch.empty(output_shape, dtype=output_dtype, device=query.device)
            size_2 = torch.Size([s72, 1024]);  s72 = None
            empty: "bf16[s72, 1024]" = torch.empty(size_2, dtype = torch.bfloat16, device = device(type='cuda', index=0));  size_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:386 in forward, code: query = query.view(-1, self.num_heads, self.head_size)
            view_6: "bf16[s72, 16, 64]" = view_4.view(-1, 16, 64);  view_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:387 in forward, code: output = output.view(-1, self.num_heads, self.head_size_v)
            view_7: "bf16[s72, 16, 64]" = empty.view(-1, 16, 64);  empty = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:389 in forward, code: key = key.view(-1, self.num_kv_heads, self.head_size)
            view_8: "bf16[s72, 8, 64]" = view_5.view(-1, 8, 64);  view_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:391 in forward, code: value = value.view(-1, self.num_kv_heads, self.head_size_v)
            view_9: "bf16[s72, 8, 64]" = getitem_4.view(-1, 8, 64);  getitem_4 = None
            return (view_8, view_9, view_6, view_7, to_4)
            
    class submod_21(torch.nn.Module):
        def forward(self, key_11: "bf16[s72, 8, 64]", s72: "Sym(s72)", value_3: "bf16[s72, 8, 64]", query_11: "bf16[s72, 16, 64]", output_22: "bf16[s72, 16, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:412 in forward, code: kv_cache_dummy_dep = torch.ops.vllm.unified_kv_cache_update(
            unified_kv_cache_update: "bf16[0]" = torch.ops.vllm.unified_kv_cache_update(key_11, value_3, 'model.layers.10.self_attn.attn')
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:415 in forward, code: torch.ops.vllm.unified_attention_with_output(
            unified_attention_with_output = torch.ops.vllm.unified_attention_with_output(query_11, key_11, value_3, output_22, 'model.layers.10.self_attn.attn', kv_cache_dummy_dep = unified_kv_cache_update);  query_11 = key_11 = value_3 = output_22 = unified_kv_cache_update = unified_attention_with_output = None
            return ()
            
    class submod_22(torch.nn.Module):
        def forward(self, output_22: "bf16[s72, 16, 64]", s72: "Sym(s72)", l_self_modules_layers_modules_10_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", l_self_modules_layers_modules_10_modules_ffn_norm_parameters_weight_: "bf16[1024]", residual_19: "bf16[s72, 1024]", l_self_modules_layers_modules_10_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_10_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_11_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:423 in forward, code: return output.view(-1, hidden_size)
            view: "bf16[s72, 1024]" = output_22.view(-1, 1024);  output_22 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 1024]" = torch._C._nn.linear(view, l_self_modules_layers_modules_10_modules_self_attn_modules_out_proj_parameters_weight_, None);  view = l_self_modules_layers_modules_10_modules_self_attn_modules_out_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_10_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_10_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = linear.to(torch.float32);  linear = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_19;  to = residual_19 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_10_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_10_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(4608, None, None))];  linear_1 = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_10_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_10_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_11_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_11_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_2.to(torch.float32);  linear_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_23(torch.nn.Module):
        def forward(self, x_156: "bf16[s72, 1024]", s72: "Sym(s72)", output_23: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_156, output_23, 'model.layers.11.conv');  x_156 = output_23 = short_conv = None
            return ()
            
    class submod_24(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_11_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_23: "bf16[s72, 1024]", s72: "Sym(s72)", residual_21: "bf16[s72, 1024]", l_self_modules_layers_modules_11_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_11_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_12_modules_operator_norm_parameters_weight_: "bf16[1024]", l_self_modules_layers_modules_12_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", l_self_modules_layers_modules_12_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", l_self_modules_layers_modules_12_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", l_positions_: "i64[s72]", l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_: "bf16[128000, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_11_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_11_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_23.to(torch.float32);  output_23 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_21;  to = residual_21 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_11_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_11_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_11_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_11_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_12_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_12_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 2048]" = torch._C._nn.linear(mul_4, l_self_modules_layers_modules_12_modules_self_attn_modules_qkv_proj_parameters_weight_, None);  mul_4 = l_self_modules_layers_modules_12_modules_self_attn_modules_qkv_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:181 in forward, code: q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
            split = linear_2.split([1024, 512, 512], dim = -1);  linear_2 = None
            getitem_2: "bf16[s72, 1024]" = split[0]
            getitem_3: "bf16[s72, 512]" = split[1]
            getitem_4: "bf16[s72, 512]" = split[2];  split = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:182 in forward, code: q = q.view(n_tokens, self.num_heads, self.head_dim).contiguous()
            view: "bf16[s72, 16, 64]" = getitem_2.view(s72, 16, 64);  getitem_2 = None
            contiguous: "bf16[s72, 16, 64]" = view.contiguous();  view = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:183 in forward, code: k = k.view(n_tokens, self.num_kv_heads, self.head_dim).contiguous()
            view_1: "bf16[s72, 8, 64]" = getitem_3.view(s72, 8, 64);  getitem_3 = None
            contiguous_1: "bf16[s72, 8, 64]" = view_1.contiguous();  view_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_2: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_12_modules_self_attn_modules_q_layernorm_parameters_weight_);  l_self_modules_layers_modules_12_modules_self_attn_modules_q_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_6: "f32[s72, 16, 64]" = contiguous.to(torch.float32);  contiguous = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_3: "f32[s72, 16, 64]" = to_6.pow(2)
            mean_2: "f32[s72, 16, 1]" = pow_3.mean(dim = -1, keepdim = True);  pow_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_4: "f32[s72, 16, 1]" = mean_2 + 1e-05;  mean_2 = None
            rsqrt_2: "f32[s72, 16, 1]" = torch.rsqrt(add_4);  add_4 = None
            mul_5: "f32[s72, 16, 64]" = to_6 * rsqrt_2;  to_6 = rsqrt_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_7: "bf16[s72, 16, 64]" = mul_5.to(torch.bfloat16);  mul_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_6: "bf16[s72, 16, 64]" = to_7 * _get_data_attr_2;  to_7 = _get_data_attr_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_3: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_12_modules_self_attn_modules_k_layernorm_parameters_weight_);  l_self_modules_layers_modules_12_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_8: "f32[s72, 8, 64]" = contiguous_1.to(torch.float32);  contiguous_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_4: "f32[s72, 8, 64]" = to_8.pow(2)
            mean_3: "f32[s72, 8, 1]" = pow_4.mean(dim = -1, keepdim = True);  pow_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_5: "f32[s72, 8, 1]" = mean_3 + 1e-05;  mean_3 = None
            rsqrt_3: "f32[s72, 8, 1]" = torch.rsqrt(add_5);  add_5 = None
            mul_7: "f32[s72, 8, 64]" = to_8 * rsqrt_3;  to_8 = rsqrt_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_9: "bf16[s72, 8, 64]" = mul_7.to(torch.bfloat16);  mul_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_8: "bf16[s72, 8, 64]" = to_9 * _get_data_attr_3;  to_9 = _get_data_attr_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:127 in forward_static, code: positions = positions.flatten()
            flatten: "i64[s72]" = l_positions_.flatten();  l_positions_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:129 in forward_static, code: cos_sin = cos_sin_cache.index_select(0, positions)
            index_select: "bf16[s72, 64]" = l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_.index_select(0, flatten);  l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_ = flatten = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:130 in forward_static, code: cos, sin = cos_sin.chunk(2, dim=-1)
            chunk = index_select.chunk(2, dim = -1);  index_select = None
            getitem_5: "bf16[s72, 32]" = chunk[0]
            getitem_6: "bf16[s72, 32]" = chunk[1];  chunk = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:132 in forward_static, code: query_shape = query.shape
            size = mul_6.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:133 in forward_static, code: query = query.view(num_tokens, -1, head_size)
            view_2: "bf16[s72, 16, 64]" = mul_6.view(s72, -1, 64);  mul_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:134 in forward_static, code: query_rot = query[..., :rotary_dim]
            getitem_7: "bf16[s72, 16, 64]" = view_2[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:135 in forward_static, code: query_pass = query[..., rotary_dim:]
            getitem_8: "bf16[s72, 16, 0]" = view_2[(Ellipsis, slice(64, None, None))];  view_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2)
            to_10: "bf16[s72, 1, 32]" = unsqueeze.to(torch.bfloat16);  unsqueeze = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_1: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2)
            to_11: "bf16[s72, 1, 32]" = unsqueeze_1.to(torch.bfloat16);  unsqueeze_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_1 = torch.chunk(getitem_7, 2, dim = -1);  getitem_7 = None
            getitem_9: "bf16[s72, 16, 32]" = chunk_1[0]
            getitem_10: "bf16[s72, 16, 32]" = chunk_1[1];  chunk_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_9: "bf16[s72, 16, 32]" = getitem_9 * to_10
            mul_10: "bf16[s72, 16, 32]" = getitem_10 * to_11
            sub: "bf16[s72, 16, 32]" = mul_9 - mul_10;  mul_9 = mul_10 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_11: "bf16[s72, 16, 32]" = getitem_10 * to_10;  getitem_10 = to_10 = None
            mul_12: "bf16[s72, 16, 32]" = getitem_9 * to_11;  getitem_9 = to_11 = None
            add_6: "bf16[s72, 16, 32]" = mul_11 + mul_12;  mul_11 = mul_12 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat: "bf16[s72, 16, 64]" = torch.cat((sub, add_6), dim = -1);  sub = add_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:142 in forward_static, code: query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape)
            cat_1: "bf16[s72, 16, 64]" = torch.cat((cat, getitem_8), dim = -1);  cat = getitem_8 = None
            reshape: "bf16[s72, 16, 64]" = cat_1.reshape(size);  cat_1 = size = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:146 in forward_static, code: key_shape = key.shape
            size_1 = mul_8.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:147 in forward_static, code: key = key.view(num_tokens, -1, head_size)
            view_3: "bf16[s72, 8, 64]" = mul_8.view(s72, -1, 64);  mul_8 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:148 in forward_static, code: key_rot = key[..., :rotary_dim]
            getitem_11: "bf16[s72, 8, 64]" = view_3[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:149 in forward_static, code: key_pass = key[..., rotary_dim:]
            getitem_12: "bf16[s72, 8, 0]" = view_3[(Ellipsis, slice(64, None, None))];  view_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze_2: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2);  getitem_5 = None
            to_12: "bf16[s72, 1, 32]" = unsqueeze_2.to(torch.bfloat16);  unsqueeze_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_3: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2);  getitem_6 = None
            to_13: "bf16[s72, 1, 32]" = unsqueeze_3.to(torch.bfloat16);  unsqueeze_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_2 = torch.chunk(getitem_11, 2, dim = -1);  getitem_11 = None
            getitem_13: "bf16[s72, 8, 32]" = chunk_2[0]
            getitem_14: "bf16[s72, 8, 32]" = chunk_2[1];  chunk_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_13: "bf16[s72, 8, 32]" = getitem_13 * to_12
            mul_14: "bf16[s72, 8, 32]" = getitem_14 * to_13
            sub_1: "bf16[s72, 8, 32]" = mul_13 - mul_14;  mul_13 = mul_14 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_15: "bf16[s72, 8, 32]" = getitem_14 * to_12;  getitem_14 = to_12 = None
            mul_16: "bf16[s72, 8, 32]" = getitem_13 * to_13;  getitem_13 = to_13 = None
            add_7: "bf16[s72, 8, 32]" = mul_15 + mul_16;  mul_15 = mul_16 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat_2: "bf16[s72, 8, 64]" = torch.cat((sub_1, add_7), dim = -1);  sub_1 = add_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:156 in forward_static, code: key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape)
            cat_3: "bf16[s72, 8, 64]" = torch.cat((cat_2, getitem_12), dim = -1);  cat_2 = getitem_12 = None
            reshape_1: "bf16[s72, 8, 64]" = cat_3.reshape(size_1);  cat_3 = size_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:187 in forward, code: q = q.view(n_tokens, self.num_heads * self.head_dim)
            view_4: "bf16[s72, 1024]" = reshape.view(s72, 1024);  reshape = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:188 in forward, code: k = k.view(n_tokens, self.num_kv_heads * self.head_dim)
            view_5: "bf16[s72, 512]" = reshape_1.view(s72, 512);  reshape_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:381 in forward, code: output = torch.empty(output_shape, dtype=output_dtype, device=query.device)
            size_2 = torch.Size([s72, 1024]);  s72 = None
            empty: "bf16[s72, 1024]" = torch.empty(size_2, dtype = torch.bfloat16, device = device(type='cuda', index=0));  size_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:386 in forward, code: query = query.view(-1, self.num_heads, self.head_size)
            view_6: "bf16[s72, 16, 64]" = view_4.view(-1, 16, 64);  view_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:387 in forward, code: output = output.view(-1, self.num_heads, self.head_size_v)
            view_7: "bf16[s72, 16, 64]" = empty.view(-1, 16, 64);  empty = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:389 in forward, code: key = key.view(-1, self.num_kv_heads, self.head_size)
            view_8: "bf16[s72, 8, 64]" = view_5.view(-1, 8, 64);  view_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:391 in forward, code: value = value.view(-1, self.num_kv_heads, self.head_size_v)
            view_9: "bf16[s72, 8, 64]" = getitem_4.view(-1, 8, 64);  getitem_4 = None
            return (view_8, view_9, view_6, view_7, to_4)
            
    class submod_25(torch.nn.Module):
        def forward(self, key_14: "bf16[s72, 8, 64]", s72: "Sym(s72)", value_4: "bf16[s72, 8, 64]", query_14: "bf16[s72, 16, 64]", output_27: "bf16[s72, 16, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:412 in forward, code: kv_cache_dummy_dep = torch.ops.vllm.unified_kv_cache_update(
            unified_kv_cache_update: "bf16[0]" = torch.ops.vllm.unified_kv_cache_update(key_14, value_4, 'model.layers.12.self_attn.attn')
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:415 in forward, code: torch.ops.vllm.unified_attention_with_output(
            unified_attention_with_output = torch.ops.vllm.unified_attention_with_output(query_14, key_14, value_4, output_27, 'model.layers.12.self_attn.attn', kv_cache_dummy_dep = unified_kv_cache_update);  query_14 = key_14 = value_4 = output_27 = unified_kv_cache_update = unified_attention_with_output = None
            return ()
            
    class submod_26(torch.nn.Module):
        def forward(self, output_27: "bf16[s72, 16, 64]", s72: "Sym(s72)", l_self_modules_layers_modules_12_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", l_self_modules_layers_modules_12_modules_ffn_norm_parameters_weight_: "bf16[1024]", residual_23: "bf16[s72, 1024]", l_self_modules_layers_modules_12_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_12_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_13_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:423 in forward, code: return output.view(-1, hidden_size)
            view: "bf16[s72, 1024]" = output_27.view(-1, 1024);  output_27 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 1024]" = torch._C._nn.linear(view, l_self_modules_layers_modules_12_modules_self_attn_modules_out_proj_parameters_weight_, None);  view = l_self_modules_layers_modules_12_modules_self_attn_modules_out_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_12_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_12_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = linear.to(torch.float32);  linear = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_23;  to = residual_23 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_12_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_12_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(4608, None, None))];  linear_1 = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_12_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_12_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_13_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_13_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_2.to(torch.float32);  linear_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_27(torch.nn.Module):
        def forward(self, x_186: "bf16[s72, 1024]", s72: "Sym(s72)", output_28: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_186, output_28, 'model.layers.13.conv');  x_186 = output_28 = short_conv = None
            return ()
            
    class submod_28(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_13_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_28: "bf16[s72, 1024]", s72: "Sym(s72)", residual_25: "bf16[s72, 1024]", l_self_modules_layers_modules_13_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_13_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_14_modules_operator_norm_parameters_weight_: "bf16[1024]", l_self_modules_layers_modules_14_modules_self_attn_modules_qkv_proj_parameters_weight_: "bf16[2048, 1024]", l_self_modules_layers_modules_14_modules_self_attn_modules_q_layernorm_parameters_weight_: "bf16[64]", l_self_modules_layers_modules_14_modules_self_attn_modules_k_layernorm_parameters_weight_: "bf16[64]", l_positions_: "i64[s72]", l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_: "bf16[128000, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_13_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_13_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_28.to(torch.float32);  output_28 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_25;  to = residual_25 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_13_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_13_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_13_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_13_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_14_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_14_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 2048]" = torch._C._nn.linear(mul_4, l_self_modules_layers_modules_14_modules_self_attn_modules_qkv_proj_parameters_weight_, None);  mul_4 = l_self_modules_layers_modules_14_modules_self_attn_modules_qkv_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:181 in forward, code: q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
            split = linear_2.split([1024, 512, 512], dim = -1);  linear_2 = None
            getitem_2: "bf16[s72, 1024]" = split[0]
            getitem_3: "bf16[s72, 512]" = split[1]
            getitem_4: "bf16[s72, 512]" = split[2];  split = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:182 in forward, code: q = q.view(n_tokens, self.num_heads, self.head_dim).contiguous()
            view: "bf16[s72, 16, 64]" = getitem_2.view(s72, 16, 64);  getitem_2 = None
            contiguous: "bf16[s72, 16, 64]" = view.contiguous();  view = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:183 in forward, code: k = k.view(n_tokens, self.num_kv_heads, self.head_dim).contiguous()
            view_1: "bf16[s72, 8, 64]" = getitem_3.view(s72, 8, 64);  getitem_3 = None
            contiguous_1: "bf16[s72, 8, 64]" = view_1.contiguous();  view_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_2: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_14_modules_self_attn_modules_q_layernorm_parameters_weight_);  l_self_modules_layers_modules_14_modules_self_attn_modules_q_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_6: "f32[s72, 16, 64]" = contiguous.to(torch.float32);  contiguous = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_3: "f32[s72, 16, 64]" = to_6.pow(2)
            mean_2: "f32[s72, 16, 1]" = pow_3.mean(dim = -1, keepdim = True);  pow_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_4: "f32[s72, 16, 1]" = mean_2 + 1e-05;  mean_2 = None
            rsqrt_2: "f32[s72, 16, 1]" = torch.rsqrt(add_4);  add_4 = None
            mul_5: "f32[s72, 16, 64]" = to_6 * rsqrt_2;  to_6 = rsqrt_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_7: "bf16[s72, 16, 64]" = mul_5.to(torch.bfloat16);  mul_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_6: "bf16[s72, 16, 64]" = to_7 * _get_data_attr_2;  to_7 = _get_data_attr_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_3: "bf16[64]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_14_modules_self_attn_modules_k_layernorm_parameters_weight_);  l_self_modules_layers_modules_14_modules_self_attn_modules_k_layernorm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_8: "f32[s72, 8, 64]" = contiguous_1.to(torch.float32);  contiguous_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_4: "f32[s72, 8, 64]" = to_8.pow(2)
            mean_3: "f32[s72, 8, 1]" = pow_4.mean(dim = -1, keepdim = True);  pow_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_5: "f32[s72, 8, 1]" = mean_3 + 1e-05;  mean_3 = None
            rsqrt_3: "f32[s72, 8, 1]" = torch.rsqrt(add_5);  add_5 = None
            mul_7: "f32[s72, 8, 64]" = to_8 * rsqrt_3;  to_8 = rsqrt_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_9: "bf16[s72, 8, 64]" = mul_7.to(torch.bfloat16);  mul_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_8: "bf16[s72, 8, 64]" = to_9 * _get_data_attr_3;  to_9 = _get_data_attr_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:127 in forward_static, code: positions = positions.flatten()
            flatten: "i64[s72]" = l_positions_.flatten();  l_positions_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:129 in forward_static, code: cos_sin = cos_sin_cache.index_select(0, positions)
            index_select: "bf16[s72, 64]" = l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_.index_select(0, flatten);  l_self_modules_layers_modules_2_modules_self_attn_modules_rotary_emb_buffers_cos_sin_cache_ = flatten = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:130 in forward_static, code: cos, sin = cos_sin.chunk(2, dim=-1)
            chunk = index_select.chunk(2, dim = -1);  index_select = None
            getitem_5: "bf16[s72, 32]" = chunk[0]
            getitem_6: "bf16[s72, 32]" = chunk[1];  chunk = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:132 in forward_static, code: query_shape = query.shape
            size = mul_6.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:133 in forward_static, code: query = query.view(num_tokens, -1, head_size)
            view_2: "bf16[s72, 16, 64]" = mul_6.view(s72, -1, 64);  mul_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:134 in forward_static, code: query_rot = query[..., :rotary_dim]
            getitem_7: "bf16[s72, 16, 64]" = view_2[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:135 in forward_static, code: query_pass = query[..., rotary_dim:]
            getitem_8: "bf16[s72, 16, 0]" = view_2[(Ellipsis, slice(64, None, None))];  view_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2)
            to_10: "bf16[s72, 1, 32]" = unsqueeze.to(torch.bfloat16);  unsqueeze = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_1: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2)
            to_11: "bf16[s72, 1, 32]" = unsqueeze_1.to(torch.bfloat16);  unsqueeze_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_1 = torch.chunk(getitem_7, 2, dim = -1);  getitem_7 = None
            getitem_9: "bf16[s72, 16, 32]" = chunk_1[0]
            getitem_10: "bf16[s72, 16, 32]" = chunk_1[1];  chunk_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_9: "bf16[s72, 16, 32]" = getitem_9 * to_10
            mul_10: "bf16[s72, 16, 32]" = getitem_10 * to_11
            sub: "bf16[s72, 16, 32]" = mul_9 - mul_10;  mul_9 = mul_10 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_11: "bf16[s72, 16, 32]" = getitem_10 * to_10;  getitem_10 = to_10 = None
            mul_12: "bf16[s72, 16, 32]" = getitem_9 * to_11;  getitem_9 = to_11 = None
            add_6: "bf16[s72, 16, 32]" = mul_11 + mul_12;  mul_11 = mul_12 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat: "bf16[s72, 16, 64]" = torch.cat((sub, add_6), dim = -1);  sub = add_6 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:142 in forward_static, code: query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape)
            cat_1: "bf16[s72, 16, 64]" = torch.cat((cat, getitem_8), dim = -1);  cat = getitem_8 = None
            reshape: "bf16[s72, 16, 64]" = cat_1.reshape(size);  cat_1 = size = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:146 in forward_static, code: key_shape = key.shape
            size_1 = mul_8.size()
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:147 in forward_static, code: key = key.view(num_tokens, -1, head_size)
            view_3: "bf16[s72, 8, 64]" = mul_8.view(s72, -1, 64);  mul_8 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:148 in forward_static, code: key_rot = key[..., :rotary_dim]
            getitem_11: "bf16[s72, 8, 64]" = view_3[(Ellipsis, slice(None, 64, None))]
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:149 in forward_static, code: key_pass = key[..., rotary_dim:]
            getitem_12: "bf16[s72, 8, 0]" = view_3[(Ellipsis, slice(64, None, None))];  view_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:163 in forward_static, code: cos = cos.unsqueeze(-2).to(x.dtype)
            unsqueeze_2: "bf16[s72, 1, 32]" = getitem_5.unsqueeze(-2);  getitem_5 = None
            to_12: "bf16[s72, 1, 32]" = unsqueeze_2.to(torch.bfloat16);  unsqueeze_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:164 in forward_static, code: sin = sin.unsqueeze(-2).to(x.dtype)
            unsqueeze_3: "bf16[s72, 1, 32]" = getitem_6.unsqueeze(-2);  getitem_6 = None
            to_13: "bf16[s72, 1, 32]" = unsqueeze_3.to(torch.bfloat16);  unsqueeze_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:167 in forward_static, code: x1, x2 = torch.chunk(x, 2, dim=-1)
            chunk_2 = torch.chunk(getitem_11, 2, dim = -1);  getitem_11 = None
            getitem_13: "bf16[s72, 8, 32]" = chunk_2[0]
            getitem_14: "bf16[s72, 8, 32]" = chunk_2[1];  chunk_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:172 in forward_static, code: o1 = x1 * cos - x2 * sin
            mul_13: "bf16[s72, 8, 32]" = getitem_13 * to_12
            mul_14: "bf16[s72, 8, 32]" = getitem_14 * to_13
            sub_1: "bf16[s72, 8, 32]" = mul_13 - mul_14;  mul_13 = mul_14 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:173 in forward_static, code: o2 = x2 * cos + x1 * sin
            mul_15: "bf16[s72, 8, 32]" = getitem_14 * to_12;  getitem_14 = to_12 = None
            mul_16: "bf16[s72, 8, 32]" = getitem_13 * to_13;  getitem_13 = to_13 = None
            add_7: "bf16[s72, 8, 32]" = mul_15 + mul_16;  mul_15 = mul_16 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/common.py:176 in forward_static, code: output = torch.cat((o1, o2), dim=-1)
            cat_2: "bf16[s72, 8, 64]" = torch.cat((sub_1, add_7), dim = -1);  sub_1 = add_7 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/rotary_embedding/base.py:156 in forward_static, code: key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape)
            cat_3: "bf16[s72, 8, 64]" = torch.cat((cat_2, getitem_12), dim = -1);  cat_2 = getitem_12 = None
            reshape_1: "bf16[s72, 8, 64]" = cat_3.reshape(size_1);  cat_3 = size_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:187 in forward, code: q = q.view(n_tokens, self.num_heads * self.head_dim)
            view_4: "bf16[s72, 1024]" = reshape.view(s72, 1024);  reshape = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:188 in forward, code: k = k.view(n_tokens, self.num_kv_heads * self.head_dim)
            view_5: "bf16[s72, 512]" = reshape_1.view(s72, 512);  reshape_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:381 in forward, code: output = torch.empty(output_shape, dtype=output_dtype, device=query.device)
            size_2 = torch.Size([s72, 1024]);  s72 = None
            empty: "bf16[s72, 1024]" = torch.empty(size_2, dtype = torch.bfloat16, device = device(type='cuda', index=0));  size_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:386 in forward, code: query = query.view(-1, self.num_heads, self.head_size)
            view_6: "bf16[s72, 16, 64]" = view_4.view(-1, 16, 64);  view_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:387 in forward, code: output = output.view(-1, self.num_heads, self.head_size_v)
            view_7: "bf16[s72, 16, 64]" = empty.view(-1, 16, 64);  empty = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:389 in forward, code: key = key.view(-1, self.num_kv_heads, self.head_size)
            view_8: "bf16[s72, 8, 64]" = view_5.view(-1, 8, 64);  view_5 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:391 in forward, code: value = value.view(-1, self.num_kv_heads, self.head_size_v)
            view_9: "bf16[s72, 8, 64]" = getitem_4.view(-1, 8, 64);  getitem_4 = None
            return (view_8, view_9, view_6, view_7, to_4)
            
    class submod_29(torch.nn.Module):
        def forward(self, key_17: "bf16[s72, 8, 64]", s72: "Sym(s72)", value_5: "bf16[s72, 8, 64]", query_17: "bf16[s72, 16, 64]", output_32: "bf16[s72, 16, 64]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:412 in forward, code: kv_cache_dummy_dep = torch.ops.vllm.unified_kv_cache_update(
            unified_kv_cache_update: "bf16[0]" = torch.ops.vllm.unified_kv_cache_update(key_17, value_5, 'model.layers.14.self_attn.attn')
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:415 in forward, code: torch.ops.vllm.unified_attention_with_output(
            unified_attention_with_output = torch.ops.vllm.unified_attention_with_output(query_17, key_17, value_5, output_32, 'model.layers.14.self_attn.attn', kv_cache_dummy_dep = unified_kv_cache_update);  query_17 = key_17 = value_5 = output_32 = unified_kv_cache_update = unified_attention_with_output = None
            return ()
            
    class submod_30(torch.nn.Module):
        def forward(self, output_32: "bf16[s72, 16, 64]", s72: "Sym(s72)", l_self_modules_layers_modules_14_modules_self_attn_modules_out_proj_parameters_weight_: "bf16[1024, 1024]", l_self_modules_layers_modules_14_modules_ffn_norm_parameters_weight_: "bf16[1024]", residual_27: "bf16[s72, 1024]", l_self_modules_layers_modules_14_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_14_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_layers_modules_15_modules_operator_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/attention/layer.py:423 in forward, code: return output.view(-1, hidden_size)
            view: "bf16[s72, 1024]" = output_32.view(-1, 1024);  output_32 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 1024]" = torch._C._nn.linear(view, l_self_modules_layers_modules_14_modules_self_attn_modules_out_proj_parameters_weight_, None);  view = l_self_modules_layers_modules_14_modules_self_attn_modules_out_proj_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_14_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_14_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = linear.to(torch.float32);  linear = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_27;  to = residual_27 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_14_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_14_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear_1[(Ellipsis, slice(4608, None, None))];  linear_1 = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_2: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_14_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_14_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_15_modules_operator_norm_parameters_weight_);  l_self_modules_layers_modules_15_modules_operator_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_2.to(torch.float32);  linear_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/models/lfm2.py:296 in forward, code: output = torch.empty_like(hidden_states)
            empty_like: "bf16[s72, 1024]" = torch.empty_like(mul_4)
            return (mul_4, empty_like, to_4)
            
    class submod_31(torch.nn.Module):
        def forward(self, x_216: "bf16[s72, 1024]", s72: "Sym(s72)", output_33: "bf16[s72, 1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/mamba/short_conv.py:98 in forward, code: torch.ops.vllm.short_conv(
            short_conv = torch.ops.vllm.short_conv(x_216, output_33, 'model.layers.15.conv');  x_216 = output_33 = short_conv = None
            return ()
            
    class submod_32(torch.nn.Module):
        def forward(self, l_self_modules_layers_modules_15_modules_ffn_norm_parameters_weight_: "bf16[1024]", output_33: "bf16[s72, 1024]", s72: "Sym(s72)", residual_29: "bf16[s72, 1024]", l_self_modules_layers_modules_15_modules_feed_forward_modules_w1_parameters_weight_: "bf16[9216, 1024]", l_self_modules_layers_modules_15_modules_feed_forward_modules_w2_parameters_weight_: "bf16[1024, 4608]", l_self_modules_embedding_norm_parameters_weight_: "bf16[1024]"):
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_layers_modules_15_modules_ffn_norm_parameters_weight_);  l_self_modules_layers_modules_15_modules_ffn_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to: "f32[s72, 1024]" = output_33.to(torch.float32);  output_33 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add: "f32[s72, 1024]" = to + residual_29;  to = residual_29 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_1: "bf16[s72, 1024]" = add.to(torch.bfloat16)
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_1: "f32[s72, 1024]" = add.pow(2)
            mean: "f32[s72, 1]" = pow_1.mean(dim = -1, keepdim = True);  pow_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_1: "f32[s72, 1]" = mean + 1e-05;  mean = None
            rsqrt: "f32[s72, 1]" = torch.rsqrt(add_1);  add_1 = None
            mul: "f32[s72, 1024]" = add * rsqrt;  add = rsqrt = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_2: "bf16[s72, 1024]" = mul.to(torch.bfloat16);  mul = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_1: "bf16[s72, 1024]" = to_2 * _get_data_attr;  to_2 = _get_data_attr = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear: "bf16[s72, 9216]" = torch._C._nn.linear(mul_1, l_self_modules_layers_modules_15_modules_feed_forward_modules_w1_parameters_weight_, None);  mul_1 = l_self_modules_layers_modules_15_modules_feed_forward_modules_w1_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/activation.py:145 in forward_native, code: return F.silu(x[..., :d]) * x[..., d:]
            getitem: "bf16[s72, 4608]" = linear[(Ellipsis, slice(None, 4608, None))]
            silu: "bf16[s72, 4608]" = torch.nn.functional.silu(getitem);  getitem = None
            getitem_1: "bf16[s72, 4608]" = linear[(Ellipsis, slice(4608, None, None))];  linear = None
            mul_2: "bf16[s72, 4608]" = silu * getitem_1;  silu = getitem_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/utils.py:105 in default_unquantized_gemm, code: return torch.nn.functional.linear(x, weight, bias)
            linear_1: "bf16[s72, 1024]" = torch._C._nn.linear(mul_2, l_self_modules_layers_modules_15_modules_feed_forward_modules_w2_parameters_weight_, None);  mul_2 = l_self_modules_layers_modules_15_modules_feed_forward_modules_w2_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:192 in forward_native, code: self.weight.data if self.has_weight else None,
            _get_data_attr_1: "bf16[1024]" = torch._C._autograd._get_data_attr(l_self_modules_embedding_norm_parameters_weight_);  l_self_modules_embedding_norm_parameters_weight_ = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:145 in forward_static, code: x = x.to(torch.float32)
            to_3: "f32[s72, 1024]" = linear_1.to(torch.float32);  linear_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:150 in forward_static, code: x = x + residual
            add_2: "f32[s72, 1024]" = to_3 + to_1;  to_3 = to_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:151 in forward_static, code: residual = x.to(orig_dtype)
            to_4: "bf16[s72, 1024]" = add_2.to(torch.bfloat16);  to_4 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:169 in forward_static, code: variance = x_var.pow(2).mean(dim=-1, keepdim=True)
            pow_2: "f32[s72, 1024]" = add_2.pow(2)
            mean_1: "f32[s72, 1]" = pow_2.mean(dim = -1, keepdim = True);  pow_2 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:171 in forward_static, code: x = x * torch.rsqrt(variance + variance_epsilon)
            add_3: "f32[s72, 1]" = mean_1 + 1e-05;  mean_1 = None
            rsqrt_1: "f32[s72, 1]" = torch.rsqrt(add_3);  add_3 = None
            mul_3: "f32[s72, 1024]" = add_2 * rsqrt_1;  add_2 = rsqrt_1 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:172 in forward_static, code: x = x.to(orig_dtype)
            to_5: "bf16[s72, 1024]" = mul_3.to(torch.bfloat16);  mul_3 = None
            
             # File: /home/ubuntu/.local/lib/python3.10/site-packages/vllm/model_executor/layers/layernorm.py:174 in forward_static, code: x = x * weight
            mul_4: "bf16[s72, 1024]" = to_5 * _get_data_attr_1;  to_5 = _get_data_attr_1 = None
            return mul_4
            