質問編集履歴
2
写真追加
test
CHANGED
File without changes
|
test
CHANGED
@@ -32,12 +32,16 @@
|
|
32
32
|
|
33
33
|

|
34
34
|
|
35
|
+
|
36
|
+
|
37
|
+

|
38
|
+
|
39
|
+
|
40
|
+
|
35
41
|
この辞書を読み込ませると
|
36
42
|
|
37
43
|
```
|
38
44
|
|
39
|
-
|
40
|
-
|
41
45
|
#作成した辞書使う場合
|
42
46
|
|
43
47
|
---------------------------------------------------------------------------
|
1
エラー内容追記
test
CHANGED
File without changes
|
test
CHANGED
@@ -40,180 +40,234 @@
|
|
40
40
|
|
41
41
|
#作成した辞書使う場合
|
42
42
|
|
43
|
+
---------------------------------------------------------------------------
|
44
|
+
|
45
|
+
ValueError Traceback (most recent call last)
|
46
|
+
|
47
|
+
<ipython-input-13-13ce50d97c07> in <module>()
|
48
|
+
|
49
|
+
53 #t = Tokenizer()
|
50
|
+
|
51
|
+
54 #作成した辞書使う場合
|
52
|
+
|
53
|
+
---> 55 t = Tokenizer(r'C:\Users\Desktop\100.csv', udic_type='simpledic', udic_enc="utf8")
|
54
|
+
|
55
|
+
56
|
56
|
+
|
57
|
+
57 ret = []
|
58
|
+
|
59
|
+
|
60
|
+
|
61
|
+
~\AppData\Local\Continuum\anaconda3\lib\site-packages\janome\tokenizer.py in __init__(self, udic, udic_enc, udic_type, max_unknown_length, wakati, mmap)
|
62
|
+
|
63
|
+
166 if udic.endswith('.csv'):
|
64
|
+
|
65
|
+
167 # build user dictionary from CSV
|
66
|
+
|
67
|
+
--> 168 self.user_dic = UserDictionary(udic, udic_enc, udic_type, connections)
|
68
|
+
|
69
|
+
169 elif os.path.isdir(udic):
|
70
|
+
|
71
|
+
170 # load compiled user dictionary
|
72
|
+
|
73
|
+
|
74
|
+
|
75
|
+
~\AppData\Local\Continuum\anaconda3\lib\site-packages\janome\dic.py in __init__(self, user_dict, enc, type, connections)
|
76
|
+
|
77
|
+
372 """
|
78
|
+
|
79
|
+
373 build_method = getattr(self, 'build' + type)
|
80
|
+
|
81
|
+
--> 374 compiledFST, entries = build_method(user_dict, enc)
|
82
|
+
|
83
|
+
375 Dictionary.__init__(self, [compiledFST], entries, connections)
|
84
|
+
|
85
|
+
376
|
86
|
+
|
87
|
+
|
88
|
+
|
89
|
+
~\AppData\Local\Continuum\anaconda3\lib\site-packages\janome\dic.py in buildsimpledic(self, user_dict, enc)
|
90
|
+
|
91
|
+
402 for line in f:
|
92
|
+
|
93
|
+
403 line = line.rstrip()
|
94
|
+
|
95
|
+
--> 404 surface, pos_major, reading = line.split(',')
|
96
|
+
|
97
|
+
405 part_of_speech = ','.join([pos_major, u'*', u'*', u'*'])
|
98
|
+
|
99
|
+
406 morph_id = len(surfaces)
|
100
|
+
|
101
|
+
|
102
|
+
|
103
|
+
ValueError: not enough values to unpack (expected 3, got 1)
|
104
|
+
|
105
|
+
```
|
106
|
+
|
107
|
+
というエラーが発生しどこが原因か分からず悩んでいます
|
108
|
+
|
109
|
+
|
110
|
+
|
111
|
+
以下コードです
|
112
|
+
|
113
|
+
```python
|
114
|
+
|
115
|
+
import pandas as pd
|
116
|
+
|
117
|
+
from janome.tokenizer import Tokenizer
|
118
|
+
|
119
|
+
from janome.analyzer import Analyzer
|
120
|
+
|
121
|
+
from janome.tokenfilter import POSStopFilter
|
122
|
+
|
123
|
+
from collections import Counter
|
124
|
+
|
125
|
+
from gensim.models import word2vec
|
126
|
+
|
127
|
+
import logging
|
128
|
+
|
129
|
+
import nltk
|
130
|
+
|
131
|
+
import re
|
132
|
+
|
133
|
+
from nltk.collocations import *
|
134
|
+
|
135
|
+
|
136
|
+
|
137
|
+
#from IPython.core.debugger import Pdb; Pdb().set_trace()
|
138
|
+
|
139
|
+
#データフレーム作成--------------------------------------------------------------------------------
|
140
|
+
|
141
|
+
|
142
|
+
|
143
|
+
#ファイル指定
|
144
|
+
|
145
|
+
input_file_name = (r'C:\Users\Desktop\Book11.xlsm')
|
146
|
+
|
147
|
+
|
148
|
+
|
149
|
+
#ファイル読み込み
|
150
|
+
|
151
|
+
input_book = pd.ExcelFile(input_file_name)
|
152
|
+
|
153
|
+
|
154
|
+
|
155
|
+
#シート名取得(sheet_namesメソッドでExcelブック内の各シートの名前をリスト型で取得)
|
156
|
+
|
157
|
+
input_sheet_name = input_book.sheet_names
|
158
|
+
|
159
|
+
|
160
|
+
|
161
|
+
#シート総数取得
|
162
|
+
|
163
|
+
num_sheet = len(input_sheet_name)
|
164
|
+
|
165
|
+
|
166
|
+
|
167
|
+
#シート総数表示
|
168
|
+
|
169
|
+
print('シートの数:', num_sheet)
|
170
|
+
|
171
|
+
#シート名表示
|
172
|
+
|
173
|
+
print('シートの名前:',input_sheet_name)
|
174
|
+
|
175
|
+
#type確認
|
176
|
+
|
177
|
+
print('type :',type(input_book))
|
178
|
+
|
179
|
+
|
180
|
+
|
181
|
+
#DataFrameとして2枚目(標準トラブルシート)のsheetを読み込み
|
182
|
+
|
183
|
+
#skiprows = 5 : 先頭5行読み飛ばす
|
184
|
+
|
185
|
+
#skip_footer = 1 : 最後1行読み飛ばす
|
186
|
+
|
187
|
+
#usecols = 'Z,AD:AF' : Z列、AD,AE,AF列を読み込む
|
188
|
+
|
189
|
+
#names = range(0,4) : 列名に0~4を付与
|
190
|
+
|
191
|
+
input_sheet_df = input_book.parse(input_sheet_name[1],
|
192
|
+
|
193
|
+
skiprows = 5,
|
194
|
+
|
195
|
+
skip_footer = 1,
|
196
|
+
|
197
|
+
usecols = 'Z,AD:AF',
|
198
|
+
|
199
|
+
names = range(0,4))
|
200
|
+
|
201
|
+
|
202
|
+
|
203
|
+
#列名変更
|
204
|
+
|
205
|
+
input_sheet_df = input_sheet_df.rename(columns={0:'異常発生個所',1:'異常名称',2:'現象',3:'原因'})
|
206
|
+
|
207
|
+
print('type :',type(input_sheet_df))
|
208
|
+
|
209
|
+
|
210
|
+
|
211
|
+
|
212
|
+
|
213
|
+
#形態素解析---------------------------------------------------------------------------------------
|
214
|
+
|
215
|
+
|
216
|
+
|
217
|
+
# Tokenizerのインスタンス生成
|
218
|
+
|
219
|
+
#t = Tokenizer()
|
220
|
+
|
221
|
+
#作成した辞書使う場合
|
222
|
+
|
43
223
|
t = Tokenizer(r'C:\Users\Desktop\100.csv', udic_type='simpledic', udic_enc="utf8")
|
44
224
|
|
45
225
|
|
46
226
|
|
47
|
-
|
227
|
+
ret = []
|
48
|
-
|
228
|
+
|
229
|
+
|
230
|
+
|
231
|
+
|
232
|
+
|
49
|
-
|
233
|
+
docking = input_sheet_df.values.tolist()
|
234
|
+
|
235
|
+
|
236
|
+
|
237
|
+
#print(type(docking))
|
238
|
+
|
239
|
+
|
240
|
+
|
241
|
+
|
242
|
+
|
243
|
+
for wordlist in docking:
|
244
|
+
|
245
|
+
#print(wordlist)
|
246
|
+
|
247
|
+
tokens = t.tokenize(str(wordlist))
|
248
|
+
|
249
|
+
for token in tokens:
|
250
|
+
|
251
|
+
if token.part_of_speech.split(',')[0] in ['名詞']:
|
252
|
+
|
253
|
+
if not token.part_of_speech.split(',')[1] in ['サ変接続']:
|
254
|
+
|
255
|
+
ret.append(token.base_form)
|
256
|
+
|
257
|
+
print(ret)
|
258
|
+
|
259
|
+
|
260
|
+
|
261
|
+
|
262
|
+
|
263
|
+
|
264
|
+
|
265
|
+
|
266
|
+
|
267
|
+
|
50
268
|
|
51
269
|
```
|
52
270
|
|
53
|
-
というエラーが発生しどこが原因か分からず悩んでいます
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
以下コードです
|
58
|
-
|
59
|
-
```python
|
60
|
-
|
61
|
-
import pandas as pd
|
62
|
-
|
63
|
-
from janome.tokenizer import Tokenizer
|
64
|
-
|
65
|
-
from janome.analyzer import Analyzer
|
66
|
-
|
67
|
-
from janome.tokenfilter import POSStopFilter
|
68
|
-
|
69
|
-
from collections import Counter
|
70
|
-
|
71
|
-
from gensim.models import word2vec
|
72
|
-
|
73
|
-
import logging
|
74
|
-
|
75
|
-
import nltk
|
76
|
-
|
77
|
-
import re
|
78
|
-
|
79
|
-
from nltk.collocations import *
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
#from IPython.core.debugger import Pdb; Pdb().set_trace()
|
84
|
-
|
85
|
-
#データフレーム作成--------------------------------------------------------------------------------
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
#ファイル指定
|
90
|
-
|
91
|
-
input_file_name = (r'C:\Users\Desktop\Book11.xlsm')
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
#ファイル読み込み
|
96
|
-
|
97
|
-
input_book = pd.ExcelFile(input_file_name)
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
#シート名取得(sheet_namesメソッドでExcelブック内の各シートの名前をリスト型で取得)
|
102
|
-
|
103
|
-
input_sheet_name = input_book.sheet_names
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
#シート総数取得
|
108
|
-
|
109
|
-
num_sheet = len(input_sheet_name)
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
#シート総数表示
|
114
|
-
|
115
|
-
print('シートの数:', num_sheet)
|
116
|
-
|
117
|
-
#シート名表示
|
118
|
-
|
119
|
-
print('シートの名前:',input_sheet_name)
|
120
|
-
|
121
|
-
#type確認
|
122
|
-
|
123
|
-
print('type :',type(input_book))
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
#DataFrameとして2枚目(標準トラブルシート)のsheetを読み込み
|
128
|
-
|
129
|
-
#skiprows = 5 : 先頭5行読み飛ばす
|
130
|
-
|
131
|
-
#skip_footer = 1 : 最後1行読み飛ばす
|
132
|
-
|
133
|
-
#usecols = 'Z,AD:AF' : Z列、AD,AE,AF列を読み込む
|
134
|
-
|
135
|
-
#names = range(0,4) : 列名に0~4を付与
|
136
|
-
|
137
|
-
input_sheet_df = input_book.parse(input_sheet_name[1],
|
138
|
-
|
139
|
-
skiprows = 5,
|
140
|
-
|
141
|
-
skip_footer = 1,
|
142
|
-
|
143
|
-
usecols = 'Z,AD:AF',
|
144
|
-
|
145
|
-
names = range(0,4))
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
#列名変更
|
150
|
-
|
151
|
-
input_sheet_df = input_sheet_df.rename(columns={0:'異常発生個所',1:'異常名称',2:'現象',3:'原因'})
|
152
|
-
|
153
|
-
print('type :',type(input_sheet_df))
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
#形態素解析---------------------------------------------------------------------------------------
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
# Tokenizerのインスタンス生成
|
164
|
-
|
165
|
-
#t = Tokenizer()
|
166
|
-
|
167
|
-
#作成した辞書使う場合
|
168
|
-
|
169
|
-
t = Tokenizer(r'C:\Users\Desktop\100.csv', udic_type='simpledic', udic_enc="utf8")
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
ret = []
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
docking = input_sheet_df.values.tolist()
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
#print(type(docking))
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
for wordlist in docking:
|
190
|
-
|
191
|
-
#print(wordlist)
|
192
|
-
|
193
|
-
tokens = t.tokenize(str(wordlist))
|
194
|
-
|
195
|
-
for token in tokens:
|
196
|
-
|
197
|
-
if token.part_of_speech.split(',')[0] in ['名詞']:
|
198
|
-
|
199
|
-
if not token.part_of_speech.split(',')[1] in ['サ変接続']:
|
200
|
-
|
201
|
-
ret.append(token.base_form)
|
202
|
-
|
203
|
-
print(ret)
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
```
|
216
|
-
|
217
271
|
|
218
272
|
|
219
273
|
また、簡略辞書を使う為にはjanomeのバージョンが0.2.7以上である必要があるのですが0.3.6を使用しているので問題ないかと思います
|