天天看點

機器學習-決策樹-C4.5決策樹

機器學習-決策樹-C4.5決策樹

針對ID3算法存在的一些問題,1993年,Quinlan将ID3算法改進為C4.5算法。該算法成功地解決了ID3算法遇到的諸多問題,發展成為機器學習的十大算法之一。

C4.5并沒有改變ID3的算法邏輯,基本的程式結構仍與ID3相同,但在節點的劃分标準上做了改進。C4.5使用資訊增益率(GainRatio)來替代資訊增益(Gain)進行特征的選擇,克服了資訊增益選擇特征時偏向于特征值個數較多的不足。

資訊增益率:

GainRatio(S,A) = Gain(S,A) / SplitInfo(S,A)

其中Gain(S,A)就是ID3算法中的資訊增益,而劃分資訊SplitInfo(S,A)代表了按照特征A劃分樣本集S的廣度和均勻性。

機器學習-決策樹-C4.5決策樹

代碼

# C4.5決策樹,使用資訊增益率确定最優特征
from numpy import *
import math
import copy
import pickle

class C45DTree(object):
    def __init__(self): # 構造方法
        self.tree = {}  # 生成的樹
        self.dataSet = []   # 資料集
        self.labels = []    # 标簽集

    # 資料導入函數
    def loadDataSet(self, path, labels):
        recordlist = []
        fp = open(path, "r")  # 讀取檔案内容
        content = fp.read()
        fp.close()
        rowlist = content.splitlines()  # 按行轉換為一維表
        recordlist = [row.split("\t") for row in rowlist if row.strip()]
        self.dataSet = recordlist
        self.labels = labels

    # 執行決策樹函數
    def train(self):
        labels = copy.deepcopy(self.labels)
        self.tree = self.buildTree(self.dataSet, labels)

        # 建立決策樹主程式

    def buildTree(self, dataSet, labels):
        cateList = [data[-1] for data in dataSet]  # 抽取源資料集的決策标簽列
        # 程式終止條件1:如果classList隻有一種決策标簽,停止劃分,傳回這個決策标簽
        if cateList.count(cateList[0]) == len(cateList):
            return cateList[0]
        # 程式終止條件2:如果資料集的第一個決策标簽隻有一個,則傳回這個決策标簽
        if len(dataSet[0]) == 1:
            return self.maxCate(cateList)
        # 算法核心:
        bestFeat,featValueList = self.getBestFeat(dataSet)  # 傳回資料集的最優特征軸
        bestFeatLabel = labels[bestFeat]
        tree = {bestFeatLabel: {}}
        del (labels[bestFeat])
        # 抽取最優特征軸的列向量
        for value in featValueList:  # 決策樹遞歸生長
            subLabels = labels[:]  # 将删除後的特征類别集建立子類别集
            # 按最優特征列和值分隔資料集
            splitDataset = self.splitDataSet(dataSet, bestFeat, value)
            subTree = self.buildTree(splitDataset, subLabels)  # 建構子樹
            tree[bestFeatLabel][value] = subTree
        return tree

    # 計算出現次數最多的類别标簽
    def maxCate(self, catelist):
        items = dict([(catelist.count(i), i) for i in catelist])
        return items[max(items.keys())]

    # 計算資訊熵
    def computeEntropy(self, dataSet):
        datalen = float(len(dataSet))
        cateList = [data[-1] for data in dataSet]  # 從資料集中得到類别标簽
        # 得到類别為key、出現次數value的字典
        items = dict([(i, cateList.count(i)) for i in cateList])
        infoEntropy = 0.0  # 初始化香農熵
        for key in items:  # 香農熵:
            prob = float(items[key]) / datalen
            infoEntropy -= prob * math.log(prob, 2)
        return infoEntropy

    # 劃分資料集;分隔資料集;删除特征軸所在的資料列,傳回剩餘的資料集
    # dataSet:資料集   axis:特征軸    value:特征軸的取值
    def splitDataSet(self, dataSet, axis, value):
        rtnList = []
        for featVec in dataSet:
            if featVec[axis] == value:
                rFeatVec = featVec[:axis]  # list操作:提取0~(axis-1)的元素
                rFeatVec.extend(featVec[axis + 1:])  # list操作:将特征軸(列)之後的元素加回
                rtnList.append(rFeatVec)
        return rtnList

    # 計算劃分資訊(SpilitInfo)
    def computeSplitInfo(self, featureVList):
        numEntries = len(featureVList)
        featureValueListSetList = list(set(featureVList))
        valueCounts = [featureVList.count(featVec) for featVec in featureValueListSetList]
        # 計算香農熵
        pList = [float(item) / numEntries for item in valueCounts]
        lList = [item * math.log(item, 2) for item in pList]
        splitInfo = -sum(lList)
        return splitInfo, featureValueListSetList

    # 使用資訊增益率劃分最優節點
    def getBestFeat(self, dataSet):
        Num_feats = len(dataSet[0][:-1])
        totality = len(dataSet)
        BaseEntropy = self.computeEntropy(dataSet)
        ConditionEntropy = []   # 初始化條件熵
        splitInfo = []  # 計算資訊增益率
        allFeatVList = []
        for f in range(Num_feats):
            featList = [example[f] for example in dataSet]
            [splitI, featureValueList] = self.computeSplitInfo(featList)
            allFeatVList.append(featureValueList)
            splitInfo.append(splitI)
            resultGain = 0.0
            for value in featureValueList:
                subSet = self.splitDataSet(dataSet, f, value)
                appearNum = float(len(subSet))
                subEntropy = self.computeEntropy(subSet)
                resultGain += (appearNum/totality) * subEntropy
            ConditionEntropy.append(resultGain) # 總條件熵
        infoGainArray = BaseEntropy * ones(Num_feats) - array(ConditionEntropy)
        infoGainRatio = infoGainArray / array(splitInfo)    # C4.5資訊增益的計算
        bestFeatureIndex = argsort(-infoGainRatio)[0]
        return bestFeatureIndex, allFeatVList[bestFeatureIndex]

    # 分類
    def predict(self, inputTree, featLabels, testVec):
        root = list(inputTree.keys())[0]  # 樹根節點
        secondDict = inputTree[root]  # value-子樹結構或分類标簽
        featIndex = featLabels.index(root)  # 根節點在分類标簽集中的位置
        key = testVec[featIndex]  # 測試集數組取值
        valueOfFeat = secondDict[key]
        if isinstance(valueOfFeat, dict):
            classLabel = self.predict(valueOfFeat, featLabels, testVec)  # 遞歸分類
        else:
            classLabel = valueOfFeat
        return classLabel


    # 持久化
    def storeTree(self, inputTree, filename):
        fw = open(filename, 'wb')
        pickle.dump(inputTree, fw)
        fw.close()

    # 從檔案抓取樹
    def grabTree(self, filename):
        fr = open(filename, 'rb')
        return pickle.load(fr)

#訓練
dtree = C45DTree()
dtree.loadDataSet("/Users/FengZhen/Desktop/accumulate/機器學習/決策樹/決策樹訓練集.txt", ["age", "revenue", "student", "credit"])
dtree.train()
print(dtree.tree)

#持久化
dtree.storeTree(dtree.tree, "/Users/FengZhen/Desktop/accumulate/機器學習/決策樹/決策樹C45.tree")
mytree = dtree.grabTree("/Users/FengZhen/Desktop/accumulate/機器學習/決策樹/決策樹C45.tree")
print(mytree)

#測試
labels = ["age", "revenue", "student", "credit"]
vector = ['0','1','0','0']
print(dtree.predict(mytree, labels, vector))