Spaces:
Sleeping
Sleeping
| import gradio | |
| from transformers import pipeline | |
| classifier = pipeline("zero-shot-classification", | |
| model="facebook/bart-large-mnli") | |
| # sequence_to_classify = "one day I will see the world" | |
| # candidate_labels = ['travel', 'cooking', 'dancing'] | |
| # CATEGORIES = ['doc_type.jur', 'doc_type.Spec', 'doc_type.ZDF', 'doc_type.Publ', | |
| # 'doc_type.Scheme', 'content_type.Alt', 'content_type.Krypto', | |
| # 'content_type.Karte', 'content_type.Banking', 'content_type.Reg', | |
| # 'content_type.Konto'] | |
| categories = [ | |
| "Legal", "Specification", "Facts and Figures", | |
| "Publication", "Payment Scheme", | |
| "Alternative Payment Systems", "Crypto Payments", | |
| "Card Payments", "Banking", "Regulations", "Account Payments" | |
| ] | |
| def transform_output(res: dict) -> list: | |
| return list( | |
| sorted( | |
| zip(res["labels"], res["scores"]), | |
| key=lambda tpl: tpl[1], | |
| reverse=True | |
| ) | |
| ) | |
| def clf_text(txt: str | list[str]): | |
| res = classifier(txt, categories, multi_label=True) | |
| if isinstance(res, list): | |
| return [ transform_output(dct) for dct in res ] | |
| else: | |
| return transform_output(res) | |
| # items = sorted(zip(res["labels"], res["scores"]), key=lambda tpl: tpl[1], reverse=True) | |
| # d = dict(zip(res["labels"], res["scores"])) | |
| # output = [f"{lbl}:\t{score}" for lbl, score in items] | |
| # return "\n".join(output) | |
| # return list(items) | |
| # classifier(sequence_to_classify, candidate_labels) | |
| #{'labels': ['travel', 'dancing', 'cooking'], | |
| # 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289], | |
| # 'sequence': 'one day I will see the world'} | |
| def my_inference_function(name): | |
| return "Hello " + name + "!" | |
| gradio_interface = gradio.Interface( | |
| # fn = my_inference_function, | |
| fn = clf_text, | |
| inputs = "text", | |
| outputs = gradio.JSON() | |
| ) | |
| gradio_interface.launch() |