ソースを参照

Merge pull request #30 from THIYAGU22/patch-3

text_preprocessing.py
master
Jeffery Russell 4年前
committed by GitHub
コミット
bb2fb4975d
この署名に対応する既知のキーがデータベースに存在しません GPGキーID: 4AEE18F83AFDEB23
2個のファイルの変更80行の追加0行の削除
  1. +41
    -0
      ML Cookbook/text_preprocessing
  2. +39
    -0
      text_preprocessing.py

+ 41
- 0
ML Cookbook/text_preprocessing ファイルの表示

@ -0,0 +1,41 @@
import shutil
from shutil import copyfile
copyfile(src = "../input/cleantext/cleantext.py", dst = "../working/cleantext.py")
# import all our functions
from cleantext import *
#!pylint cleantext
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
training = [
" I am master of all",
"I am a absolute learner"
]
generalization = [
"I am absolute learner learner"
]
vectorization = CountVectorizer(
stop_words = "english",
preprocessor = process.master_clean_text)
vectorization.fit(training)
build_vocab = {
value:key
for key , value in vectorization.vocabulary_.items()
}
vocab = [build_vocab[i] for i in range(len(build_vocab))]
pd.DataFrame(
data = vectorization.transform(generalization).toarray(),
index=["generalization"],
columns=vocab
)

+ 39
- 0
text_preprocessing.py ファイルの表示

@ -0,0 +1,39 @@
import clean_text
# import all our functions
from clean_text import *
#!pylint cleantext
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
training = [
" I am master of all",
"I am a absolute learner"
]
generalization = [
"I am absolute learner learner"
]
vectorization = CountVectorizer(
stop_words = "english",
preprocessor = process.master_clean_text)
vectorization.fit(training)
build_vocab = {
value:key
for key , value in vectorization.vocabulary_.items()
}
vocab = [build_vocab[i] for i in range(len(build_vocab))]
extracted = pd.DataFrame(
data = vectorization.transform(generalization).toarray(),
index=["generalization"],
columns=vocab
)
print(extracted)

読み込み中…
キャンセル
保存