# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM import streamlit as st def count_parameters(model): print("Counting parameters of model:{") return sum(p.numel() for p in model.parameters() if p.requires_grad) m_id = st.input_text("model id", placeholder="K00B404/Merged_Beowolx-CodePro_Medusa2-14X-7B-Mistral-I-v0-2") with m_id: tokenizer = AutoTokenizer.from_pretrained(m_id) model = AutoModelForCausalLM.from_pretrained(m_id) st.info(f"{count_parameters(model)} parameters")