= spacy.tokenizer.Tokenizer(textcat_spacy.vocab) classes = list(textcat_spacy.get_pipe("textcat").labels) def predict(texts): texts = [str(text) for text in texts] results = [] for doc in textcat_spacy.pipe(texts): results.append([doc.cats[cat] for cat in classes]) return results
= spacy.tokenizer.Tokenizer(textcat_spacy.vocab) classes = list(textcat_spacy.get_pipe("textcat").labels) def predict(texts): texts = [str(text) for text in texts] results = [] for doc in textcat_spacy.pipe(texts): results.append([doc.cats[cat] for cat in classes]) return results