Loading salt classifier data: Dictionary not installed Error

I read all the questions related to this, but did not find a working solution:

Creating my classifier:

class StemmedTfidfVectorizer(TfidfVectorizer): def build_analyzer(self): analyzer = super(TfidfVectorizer, self).build_analyzer() return lambda doc: english_stemmer.stemWords(analyzer(doc)) tf = StemmedTfidfVectorizer(analyzer='word', ngram_range=(1,2), min_df = 0, max_features=200000, stop_words = 'english') def create_tfidf(f): docs = [] targets = [] with open(f, "r") as sentences_file: reader = csv.reader(sentences_file, delimiter=';') reader.next() for row in reader: docs.append(row[1]) targets.append(row[0]) tfidf_matrix = tf.fit_transform(docs) print tfidf_matrix.shape # print tf.get_feature_names() return tfidf_matrix, targets X,y = create_tfidf("l0.csv") clf = LinearSVC().fit(X,y) _ = joblib.dump(clf, 'linearL0_3gram_100K.pkl', compress=9) 

This bit works and generates .pkl, which I then try to use as such in another script:

 class StemmedTfidfVectorizer(TfidfVectorizer): def build_analyzer(self): analyzer = super(TfidfVectorizer, self).build_analyzer() return lambda doc: english_stemmer.stemWords(analyzer(doc)) tf = StemmedTfidfVectorizer(analyzer='word', ngram_range=(1,2), min_df = 0, max_features=200000, stop_words = 'english') clf = joblib.load('linearL0_3gram_100K.pkl') print clf test = "My super elaborate test string to test predictions" print test + clf.predict(tf.transform([test]))[0] 

And I get ValueError: Vocabulary wasn't fitted or is empty!

Edit: Track error on request

  File "classifier.py", line 27, in <module> print test + clf.predict(tf.transform([test]))[0] File "/home/ec2-user/.local/lib/python2.7/site-packages/sklearn/feature_extraction/text.py", line 1313, in transform X = super(TfidfVectorizer, self).transform(raw_documents) File "/home/ec2-user/.local/lib/python2.7/site-packages/sklearn/feature_extraction/text.py", line 850, in transform self._check_vocabulary() File "/home/ec2-user/.local/lib/python2.7/site-packages/sklearn/feature_extraction/text.py", line 271, in _check_vocabulary check_is_fitted(self, 'vocabulary_', msg=msg), File "/home/ec2-user/.local/lib/python2.7/site-packages/sklearn/utils/validation.py", line 627, in check_is_fitted raise NotFittedError(msg % {'name': type(estimator).__name__}) sklearn.utils.validation.NotFittedError: StemmedTfidfVectorizer - Vocabulary wasn't fitted. 
+4
source share
1 answer

Ok, I solved the problem using the pipeline to get my vectorizer saved in .plk

Here's how it looks (also easier):

 from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.externals import joblib from sklearn.pipeline import Pipeline import Stemmer import pickle english_stemmer = Stemmer.Stemmer('en') class StemmedTfidfVectorizer(TfidfVectorizer): def build_analyzer(self): analyzer = super(TfidfVectorizer, self).build_analyzer() return lambda doc: english_stemmer.stemWords(analyzer(doc)) def create_tfidf(f): docs = [] targets = [] with open(f, "r") as sentences_file: reader = csv.reader(sentences_file, delimiter=';') reader.next() for row in reader: docs.append(row[1]) targets.append(row[0]) return docs, targets docs,y = create_tfidf("l1.csv") tf = StemmedTfidfVectorizer(analyzer='word', ngram_range=(1,2), min_df = 0, max_features=200000, stop_words = 'english') clf = LinearSVC() vec_clf = Pipeline([('tfvec', tf), ('svm', clf)]) vec_clf.fit(docs,y) _ = joblib.dump(vec_clf, 'linearL0_3gram_100K.pkl', compress=9) 

And on the other hand:

 from sklearn.svm import LinearSVC from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.externals import joblib import Stemmer import pickle english_stemmer = Stemmer.Stemmer('en') class StemmedTfidfVectorizer(TfidfVectorizer): def build_analyzer(self): analyzer = super(TfidfVectorizer, self).build_analyzer() return lambda doc: english_stemmer.stemWords(analyzer(doc)) clf = joblib.load('linearL0_3gram_100K.pkl') test = ["My super elaborate test string to test predictions"] print test + clf.predict(test)[0] 

It is important to note:

The transformer is part of the pipeline, like tf, so there is no need to either update a new vector object (which used to be a point of failure, since it required vocabulary from the prepared data) or convert the .transform () test string.

+7
source

All Articles