import streamlit as st
#pip install transformers
import torch
import tensorflow as tf
import flax
from transformers import pipeline
from transformers import BertTokenizer, BertModel
with st.sidebar:
st.title("Hugging Face Hands on")
st.image("logo.jpg")
choice = st.radio("Selct the HF Cocnept", ["Pipeline","Tokenization","Generation"])
if choice == "Pipeline":
task_name = st.selectbox("Choose a task", ["sentiment-analysis", "text-classification", "question-answering", "translation", "fill-mask"])
model_name = st.selectbox("Choose a model", ["distilbert-base-uncased", "bert-base-uncased", "roberta-base", "gpt2", "ctrl"])
model = pipeline(task_name, model_name)
input_text = st.text_area("Enter your text here")
result = model(input_text)[0]
st.success(f"Sentiment: {result['label']}, Score: {result['score']:.2f}")
if choice == "Tokenization":
# Tokenization with BERT
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = BertModel.from_pretrained("bert-base-uncased")
input_text = st.text_area("Enter text here to encode")
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model(**inputs)
st.success(outputs.last_hidden_state.shape)
# Output: torch.Size([1, 1, 768])
if choice == "Generation":
model_name = st.selectbox("Choose a model", ["gpt2", "ctrl"])
model = pipeline("text-generation", model=model_name)
input_text= "hugging Face is"
input_text = st.text_area("Enter your text here")
generator = pipeline("text-generation", model=model_name)
num_return_sequences = st.slider('Sequence No', min_value=10, max_value=100, value=5, step=1)
num_tokens_to_generate = st.slider('No of Tokens', min_value=10, max_value=100, value=5, step=1)
result = model(input_text, max_length=int(num_tokens_to_generate),
num_return_sequences=int(num_return_sequences))[0]
st.success(generator(input_text, max_length=30,
num_return_sequences=1))