SHMT / config.yml
zeroMN's picture
Upload folder using huggingface_hub
83a8f68 verified
raw
history blame
2.07 kB
model_name: Evolutionary Multi-Modal Model
model_type: transformer
license: mit
language: en zh
datasets:
- "Custom"
tags:
- text-generation
- code-generation
- speech-recognition
- multi-modal
- evolutionary
base_model: facebook/bart-base
finetuned_from: gpt2, bert-base-uncased, facebook/wav2vec2-base-960h, openai/clip-vit-base-patch32
dataset: Custom Multi-Modal Dataset
metrics:
- perplexity
- bleu
- wer
- cer
library_name: transformers
pipeline_tag: text-generation
inference:
parameters:
max_length: 50
top_k: 50
top_p: 0.95
temperature: 1.2
do_sample: true
speech_recognition:
waveform_path: "C:/Users/baby7/Desktop/权重参数/sample-15s.wav"
task: "speech_recognition"
output_audio_key: "Transcription"
text_generation:
input_text: "What is the future of AI?"
task: "text_generation"
output_text_key: "Generated Text"
code_generation:
input_code: "def add(a, b): return"
task: "code_generation"
output_code_key: "Generated Code"
tests:
- name: speech_recognition_test
waveform_path: "C:/Users/baby7/Desktop/权重参数/sample-15s.wav"
expected_output: "Expected transcription"
- name: text_generation_test
input_text: "What is the future of AI?"
expected_output: "Predicted text about AI"
- name: code_generation_test
input_code: "def add(a, b): return"
expected_output: "def add(a, b): return a + b"
extra_info:
author: zero
version: 1.0
description: |
This Evolutionary Multi-Modal Model is designed for tasks like text generation, code generation,
speech recognition, and vision understanding. It leverages the capabilities of multiple pre-trained
models and applies evolutionary techniques to optimize performance across these tasks.
citation:
- |
@article{your_reference_2025,
title={Evolutionary Multi-Modal Model for Enhanced Performance},
author={Your Name},
journal={Journal of AI Research},
year={2025}
}