🥇🥇🥇1.使用sklearn的随機森林算法對樣本資料進行分類,要求:
🥈🥈🥈(1) 導入乳腺癌資料集
# 首先導入内置資料集子產品
from sklearn.datasets import load_breast_cancer
# 然後導入乳腺癌資料集
cancer = load_breast_cancer()
🏆🏆🏆(2) 對比随機森林算法和決策樹的分類效果;
# 定義一個決策樹分類器對象用于做比較
dt = DecisionTreeClassifier(random_state=0)
# 定義一個随機森林分類器對象
rf = RandomForestClassifier(random_state=0)
dt.fit(x_train,y_train)
rf.fit(x_train,y_train)
score_dt = dt.score(x_test,y_test)
score_rf = rf.score(x_test,y_test)
使用cross_val_score進行交叉驗證,其中:
cv為份數,即将資料集劃分為n分,依次取每一份做測試集,其他n-1份做訓練集,
#傳回每次測試準确率評分的清單
for i in range(10):
rf_score = cross_val_score(RandomForestClassifier(n_estimators=25), cancer.data,
cancer.target, cv=10).mean()
rf_scores.append(rf_score)
dt_score = cross_val_score(DecisionTreeClassifier(), cancer.data, cancer.target, cv=10).mean()
dt_scores.append(dt_score)
🏀🏀🏀(3) 測試弱分類器個數n_estimators對分類精度的影響。
rf_scores = []
for i in range(1,50):
rf = RandomForestClassifier(n_estimators=i)
rf_score = cross_val_score(rf, cancer.data, cancer.target, cv=10).mean()
rf_scores.append(rf_score)
完整代碼:
# 導入内置資料集子產品
from sklearn.datasets import load_breast_cancer
# 導入sklearn子產品中的決策樹分類器類和随機森林分類器類
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# 導入sklearn子產品中的模型驗證類
from sklearn.model_selection import train_test_split,cross_val_score
import matplotlib.pyplot as plt
# 導入乳腺癌資料集
cancer = load_breast_cancer()
# 使用train_test_split函數自動分割訓練集與測試集,其中test_size為測試集所占比例
x_train, x_test, y_train, y_test = train_test_split(cancer.data,cancer.target,test_size=0.3)
# 定義一個決策樹分類器對象用于做比較
dt = DecisionTreeClassifier(random_state=0)
# 定義一個随機森林分類器對象
rf = RandomForestClassifier(random_state=0)
dt.fit(x_train,y_train)
rf.fit(x_train,y_train)
score_dt = dt.score(x_test,y_test)
score_rf = rf.score(x_test,y_test)
# 輸出準确率
print('Single Tree : ', score_dt)
print('Random Forest : ', score_rf)
dt_scores = []
rf_scores = []
# 使用cross_val_score進行交叉驗證,其中:
# cv為份數,即将資料集劃分為n分,依次取每一份做測試集,其他n-1份做訓練集,
# 傳回每次測試準确率評分的清單
for i in range(10):
rf_score = cross_val_score(RandomForestClassifier(n_estimators=25), cancer.data,
cancer.target, cv=10).mean()
rf_scores.append(rf_score)
dt_score = cross_val_score(DecisionTreeClassifier(), cancer.data, cancer.target, cv=10).mean()
dt_scores.append(dt_score)
# 繪制評分對比曲線
plt.figure()
plt.title('Random Forest VS Decision Tree')
plt.xlabel('Index')
plt.ylabel('Accuracy')
plt.plot(range(10),rf_scores,label = 'Random Forest')
plt.plot(range(10),dt_scores,label = 'Decision Tree')
plt.legend()
plt.show()
# 觀察弱分類器數量對分類準确度的影響
rf_scores = []
for i in range(1,50):
rf = RandomForestClassifier(n_estimators=i)
rf_score = cross_val_score(rf, cancer.data, cancer.target, cv=10).mean()
rf_scores.append(rf_score)
plt.figure()
plt.title('Random Forest')
plt.xlabel('n_estimators')
plt.ylabel('Accuracy')
plt.plot(range(1,50),rf_scores)
plt.show()
可視化結果:
⚽⚽⚽⚽2.使用sklearn的AdaBoost和GradientBoost對乳腺癌資料進行分類,要求:
兩個方法相同,一個為例
🎖🎖🎖(1) 導入乳腺癌資料集
# 首先導入内置資料集子產品
from sklearn.datasets import load_breast_cancer
# 然後導入乳腺癌資料集
cancer = load_breast_cancer()
🎲🎲🎰(2) 測試learning_rate參數對分類效果的影響;
# 測試learning_rate參數對分類效果的影響
abc_scores = []
for i in np.arange(0.1,1,0.05):
abc.learning_rate = i
abc.fit(x_train, y_train)
abc_score = abc.score(x_test, y_test)
abc_scores.append(abc_score)
👓👓🕶(3) 測試n_estimators參數對分類效果的影響。
# 測試n_estimators參數對分類效果的影響
abc_scores = []
for i in range(1,50):
abc.estimators_ = i
abc.fit(x_train, y_train)
abc_score = abc.score(x_test, y_test)
abc_scores.append(abc_score)
完整代碼:
1.AdaBoost
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split,cross_val_score
import matplotlib.pyplot as plt
import numpy as np
cancer = load_breast_cancer()
x_train, x_test, y_train, y_test = train_test_split(cancer.data,cancer.target, test_size=0.3,random_state=1)
dt = DecisionTreeClassifier()
abc = AdaBoostClassifier(DecisionTreeClassifier(), algorithm='SAMME.R', n_estimators=20,learning_rate=0.1)
abc.fit(x_train,y_train)
dt.fit(x_train,y_train)
score_abc = abc.score(x_test,y_test)
score_dt = dt.score(x_test,y_test)
# 輸出準确率
print('Ada Boost : ', score_abc)
print('Decision Tree : ', score_dt)
# 測試learning_rate參數對分類效果的影響
abc_scores = []
for i in np.arange(0.1,1,0.05):
abc.learning_rate = i
abc.fit(x_train, y_train)
abc_score = abc.score(x_test, y_test)
abc_scores.append(abc_score)
# 繪制測試結果
plt.figure()
plt.title('AdaBoost')
plt.xlabel('learning_rate')
plt.ylabel('Accuracy')
plt.plot(range(len(abc_scores)),abc_scores)
plt.show()
# 測試n_estimators參數對分類效果的影響
abc_scores = []
for i in range(1,50):
abc.estimators_ = i
abc.fit(x_train, y_train)
abc_score = abc.score(x_test, y_test)
abc_scores.append(abc_score)
# 繪制結果
plt.figure()
plt.title('AdaBoost')
plt.xlabel('n_estimators')
plt.ylabel('Accuracy')
plt.plot(range(1,50),abc_scores)
plt.show()
# 使用cross_val_score進行交叉驗證
abc_scores = []
dt_scores = []
for i in range(20):
abc_score = cross_val_score(abc, cancer.data, cancer.target, cv=10).mean()
abc_scores.append(abc_score)
dt_score = cross_val_score(dt, cancer.data, cancer.target, cv=10).mean()
dt_scores.append(dt_score)
# 繪制評分對比曲線
plt.figure()
plt.title('AdaBoost VS Decision Tree')
plt.xlabel('Index')
plt.ylabel('Accuracy')
plt.plot(range(20),dt_scores,label = 'Decision Tree')
plt.plot(range(20),abc_scores,label = 'AdaBoost')
plt.legend()
plt.show()
可視化結果:
2.Gradient Boost
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split,cross_val_score
import matplotlib.pyplot as plt
import numpy as np
cancer = load_breast_cancer()
x_train, x_test, y_train, y_test = train_test_split(cancer.data,cancer.target,test_size=0.3,random_state=1)
gbc = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
dt = DecisionTreeClassifier()
gbc.fit(x_train,y_train)
dt.fit(x_train,y_train)
score_gbc = gbc.score(x_test,y_test)
score_dt = dt.score(x_test,y_test)
# 輸出準确率
print('Gradient Boost : ', score_gbc)
print('Decision Tree : ', score_dt)
# 測試learning_rate參數對分類效果的影響
gbc_scores = []
for i in np.arange(0.1,1,0.05):
gbc.learning_rate = i
gbc.fit(x_train, y_train)
gbc_score = gbc.score(x_test, y_test)
gbc_scores.append(gbc_score)
# 繪制測試結果
plt.figure()
plt.title('Gradient Boost')
plt.xlabel('learning_rate')
plt.ylabel('Accuracy')
plt.plot(range(len(gbc_scores)),gbc_scores)
plt.show()
# 測試n_estimators參數對分類效果的影響
gbc_scores = []
for i in range(1,50):
gbc.estimators_ = i
gbc.fit(x_train, y_train)
gbc_score = gbc.score(x_test, y_test)
gbc_scores.append(gbc_score)
# 繪制結果
plt.figure()
plt.title('Gradient Boost')
plt.xlabel('n_estimators')
plt.ylabel('Accuracy')
plt.plot(range(1,50),gbc_scores)
plt.show()
gbc_scores = []
dt_scores = []
# 使用cross_val_score進行交叉驗證
for i in range(20):
gbc_score = cross_val_score(gbc, cancer.data, cancer.target, cv=10).mean()
gbc_scores.append(gbc_score)
dt_score = cross_val_score(dt, cancer.data, cancer.target, cv=10).mean()
dt_scores.append(dt_score)
# 繪制評分對比曲線
plt.figure()
plt.title('Gradient Boost VS Decision Tree')
plt.xlabel('Index')
plt.ylabel('Accuracy')
plt.plot(range(20),dt_scores,label = 'Decision Tree')
plt.plot(range(20),gbc_scores,label = 'Gradient Boost')
plt.legend()
plt.show()