質問編集履歴
2
訂正
test
CHANGED
@@ -1 +1 @@
|
|
1
|
-
|
1
|
+
PythonでのlightGBMの実装の際にevals_resultのエラー,XGBoost
|
test
CHANGED
File without changes
|
1
訂正
test
CHANGED
File without changes
|
test
CHANGED
@@ -10,6 +10,66 @@
|
|
10
10
|
|
11
11
|
```python
|
12
12
|
|
13
|
+
#インポート
|
14
|
+
|
15
|
+
import lightgbm as lgb
|
16
|
+
|
17
|
+
from sklearn import datasets
|
18
|
+
|
19
|
+
from sklearn.model_selection import train_test_split
|
20
|
+
|
21
|
+
import pandas as pd
|
22
|
+
|
23
|
+
import numpy as np
|
24
|
+
|
25
|
+
from matplotlib import pyplot as plt
|
26
|
+
|
27
|
+
from sklearn.metrics import mean_squared_error
|
28
|
+
|
29
|
+
from sklearn.model_selection import GridSearchCV
|
30
|
+
|
31
|
+
|
32
|
+
|
33
|
+
#############################################################################################
|
34
|
+
|
35
|
+
|
36
|
+
|
37
|
+
# ファイルの読み込みおよびN/Aの削除
|
38
|
+
|
39
|
+
df9 = pd.read_csv(path1, header = 0, index_col = 0, encoding='shift-JIS')
|
40
|
+
|
41
|
+
df8 = df9.dropna()
|
42
|
+
|
43
|
+
|
44
|
+
|
45
|
+
# 分類するクラスの種類と数を確認
|
46
|
+
|
47
|
+
df8['evaluation'].value_counts()
|
48
|
+
|
49
|
+
|
50
|
+
|
51
|
+
# 訓練用とテスト用に7:3で分割
|
52
|
+
|
53
|
+
train_x = df8.drop(['evaluation'], axis=1)
|
54
|
+
|
55
|
+
train_y = df8['evaluation']
|
56
|
+
|
57
|
+
(train_x, test_x ,train_y, test_y) = train_test_split(train_x, train_y, test_size = 0.3)
|
58
|
+
|
59
|
+
|
60
|
+
|
61
|
+
|
62
|
+
|
63
|
+
# LightGBMにデータセットを登録
|
64
|
+
|
65
|
+
lgb_train = lgb.Dataset(train_x, train_y)
|
66
|
+
|
67
|
+
lgb_test = lgb.Dataset(test_x, test_y, reference=lgb_train)
|
68
|
+
|
69
|
+
|
70
|
+
|
71
|
+
|
72
|
+
|
13
73
|
# LightGBMのハイパーパラメータを設定
|
14
74
|
|
15
75
|
params = {'task': 'train', # タスクを訓練に設定
|
@@ -20,7 +80,7 @@
|
|
20
80
|
|
21
81
|
'metric': {'multi_logloss'}, # 多クラス分類の損失(誤差)
|
22
82
|
|
23
|
-
'num_class':
|
83
|
+
'num_class': 4, # クラスの数(irisデータセットが3個のクラスなので)
|
24
84
|
|
25
85
|
'learning_rate': 0.1, # 学習率
|
26
86
|
|
@@ -31,6 +91,10 @@
|
|
31
91
|
'num_iteration': 100} # 予測器(決定木)の数:イタレーション
|
32
92
|
|
33
93
|
|
94
|
+
|
95
|
+
|
96
|
+
|
97
|
+
# LightGBMで訓練する
|
34
98
|
|
35
99
|
lgb_results = {} # 学習の履歴を入れる入物
|
36
100
|
|
@@ -44,9 +108,39 @@
|
|
44
108
|
|
45
109
|
num_boost_round=100, # 計算回数
|
46
110
|
|
47
|
-
early_stopping_rounds=10
|
111
|
+
early_stopping_rounds=10) # アーリーストッピング設定
|
48
112
|
|
49
|
-
evals_result=lgb_results) # 履歴を保存する
|
113
|
+
evals_result=lgb_results) # 履歴を保存する
|
114
|
+
|
115
|
+
|
116
|
+
|
117
|
+
# 結果を抽出する
|
118
|
+
|
119
|
+
loss_train = lgb_results['Train']['multi_logloss'] # 訓練誤差
|
120
|
+
|
121
|
+
loss_test = lgb_results['Test']['multi_logloss'] # 汎化誤差
|
122
|
+
|
123
|
+
best_iteration = model.best_iteration # 最良の予測器が得られたイタレーション数
|
124
|
+
|
125
|
+
print(best_iteration)
|
126
|
+
|
127
|
+
|
128
|
+
|
129
|
+
############################################################################################
|
130
|
+
|
131
|
+
|
132
|
+
|
133
|
+
# グラフ描画
|
134
|
+
|
135
|
+
import lightgbm as lgb
|
136
|
+
|
137
|
+
from sklearn import datasets
|
138
|
+
|
139
|
+
from sklearn.model_selection import train_test_split
|
140
|
+
|
141
|
+
import pandas as pd
|
142
|
+
|
143
|
+
from matplotlib import pyplot as plt
|
50
144
|
|
51
145
|
```
|
52
146
|
|