天天看點

機器學習_01_算法最優

機器學習入門01

  1. 準備工作:下載下傳iris.data.csv檔案(其實就是excel檔案)

    下載下傳連結

    如下圖

機器學習_01_算法最優
機器學習_01_算法最優
#   01.導入類庫

    from pandas import read_csv
    from pandas.plotting import scatter_matrix
    from matplotlib import  pyplot
    from sklearn.model_selection import train_test_split
    from sklearn.model_selection import KFold
    from sklearn.model_selection import cross_val_score
    
    
    
    from sklearn.metrics import classification_report
    from sklearn.metrics import  confusion_matrix
    from sklearn.metrics import accuracy_score
    from sklearn.linear_model import LogisticRegression
    from sklearn.tree import DecisionTreeClassifier
    from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.naive_bayes import GaussianNB
    from  sklearn.svm import SVC
    #   02.導入資料
    filename="iris.data.csv"
    names= ['separ-length','separ-width','petal-length','petal-width','class']
    dataset=read_csv(filename,names=names
                    )
    #  03.顯示資料次元
    print('資料次元:行%s,列%s'% dataset.shape)
           
機器學習_01_算法最優
#  04.檢視資料前10行
    print(dataset.head(10))
           
機器學習_01_算法最優
#  05.統計描述資料資訊
    print(dataset.describe())
           
機器學習_01_算法最優
#  06.分類分布情況
    print(dataset.groupby('class').size())

           
機器學習_01_算法最優
#  07.箱線圖
    dataset.plot(kind='box',subplots=True,layout=(2,2),sharex=False,sharey=False)
    pyplot.show()
           
機器學習_01_算法最優
#  08.直方圖
    dataset.hist()
    pyplot.show()
           
機器學習_01_算法最優
#  09.散點矩陣圖
    scatter_matrix(dataset)
    pyplot.show()
  
           
機器學習_01_算法最優
#  10.分離資料集
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.2
seed = 7
X_train,X_validation,Y_train,Y_validation = \
    train_test_split(X,Y,test_size=validation_size,random_state=seed)
# 11.算法審查
models = {}
models['LR'] = LogisticRegression()
models['LDA'] = LinearDiscriminantAnalysis()
models['KNN'] = KNeighborsClassifier()
models['CART'] = DecisionTreeClassifier()
models['NB'] = GaussianNB()
models['SVM'] =SVC()

#  12.評估算法
results = []
for key in models:
    kfold = KFold(n_splits=10,random_state=seed)
    cv_results = cross_val_score(models[key],X_train,Y_train,cv =kfold ,scoring="accuracy" )
      results.append(cv_results)
    print('%s:%f (%f)'%(key,cv_results.mean(),cv_results.std()))
           
機器學習_01_算法最優
完整代碼如下:”





           
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score

from sklearn.metrics import classification_report
from sklearn.metrics import  confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from  sklearn.svm import SVC
#   02.導入資料
filename="iris.data.csv"
names= ['separ-length','separ-width','petal-length','petal-width','class']
dataset=read_csv(filename,names=names
                )
#  03.顯示資料次元
print('資料次元:行%s,列%s'% dataset.shape)

#  04.檢視資料前10行
print(dataset.head(10))

#  05.統計描述資料資訊
print(dataset.describe())

#  06.分類分布情況
print(dataset.groupby('class').size())

#  07.箱線圖
dataset.plot(kind='box',subplots=True,layout=(2,2),sharex=False,sharey=False)
pyplot.show()

#  08.直方圖
dataset.hist()
pyplot.show()

scatter_matrix(dataset)
pyplot.show()

#  10.分離資料集
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.2
seed = 7
X_train,X_validation,Y_train,Y_validation = \
    train_test_split(X,Y,test_size=validation_size,random_state=seed)

# 11.算法審查
models = {}
models['LR'] = LogisticRegression()
models['LDA'] = LinearDiscriminantAnalysis()
models['KNN'] = KNeighborsClassifier()
models['CART'] = DecisionTreeClassifier()
models['NB'] = GaussianNB()
models['SVM'] =SVC()

#  12.評估算法
results = []
for key in models:
    kfold = KFold(n_splits=10,random_state=seed)
    cv_results = cross_val_score(models[key],X_train,Y_train,cv =kfold ,scoring="accuracy" )
    results.append(cv_results)
    print('%s:%f (%f)'%(key,cv_results.mean(),cv_results.std()))