python C 4.5 의사 결정 트 리 알고리즘 구현

C 4.5 알고리즘 은 정보 이득 률 을 사용 하여 ID3 의 정보 이득 을 대체 하여 특징 적 인 선택 을 하고 정보 이득 이 특징 을 선택 할 때 특징 값 의 개수 가 비교적 많은 부족 을 극복 했다.정보 이득 률 의 정 의 는 다음 과 같다.

# -*- coding: utf-8 -*-


from numpy import *
import math
import copy
import cPickle as pickle


class C45DTree(object):
 def __init__(self): #     
  self.tree = {} #    
  self.dataSet = [] #    
  self.labels = [] #    


 #       
 def loadDataSet(self, path, labels):
  recordList = []
  fp = open(path, "rb") #       
  content = fp.read()
  fp.close()
  rowList = content.splitlines() #         
  recordList = [row.split("\t") for row in rowList if row.strip()] # strip()      、Tab 
  self.dataSet = recordList
  self.labels = labels


 #        
 def train(self):
  labels = copy.deepcopy(self.labels)
  self.tree = self.buildTree(self.dataSet, labels)


 #      :        
 def buildTree(self, dataSet, lables):
  cateList = [data[-1] for data in dataSet] #              
  #       1:  classList        ,    ,        
  if cateList.count(cateList[0]) == len(cateList):
   return cateList[0]
  #       2:                 ,      
  if len(dataSet[0]) == 1:
   return self.maxCate(cateList)
  #     
  bestFeat, featValueList= self.getBestFeat(dataSet) #            
  bestFeatLabel = lables[bestFeat]
  tree = {bestFeatLabel: {}}
  del (lables[bestFeat])
  for value in featValueList: #        
   subLables = lables[:] #                 
   #              
   splitDataset = self.splitDataSet(dataSet, bestFeat, value)
   subTree = self.buildTree(splitDataset, subLables) #     
   tree[bestFeatLabel][value] = subTree
  return tree


 #              
 def maxCate(self, cateList):
  items = dict([(cateList.count(i), i) for i in cateList])
  return items[max(items.keys())]


 #       
 def getBestFeat(self, dataSet):
  Num_Feats = len(dataSet[0][:-1])
  totality = len(dataSet)
  BaseEntropy = self.computeEntropy(dataSet)
  ConditionEntropy = []  #       
  slpitInfo = [] # for C4.5,caculate gain ratio
  allFeatVList = []
  for f in xrange(Num_Feats):
   featList = [example[f] for example in dataSet]
   [splitI, featureValueList] = self.computeSplitInfo(featList)
   allFeatVList.append(featureValueList)
   slpitInfo.append(splitI)
   resultGain = 0.0
   for value in featureValueList:
    subSet = self.splitDataSet(dataSet, f, value)
    appearNum = float(len(subSet))
    subEntropy = self.computeEntropy(subSet)
    resultGain += (appearNum/totality)*subEntropy
   ConditionEntropy.append(resultGain) #     
  infoGainArray = BaseEntropy*ones(Num_Feats)-array(ConditionEntropy)
  infoGainRatio = infoGainArray/array(slpitInfo) # C4.5       
  bestFeatureIndex = argsort(-infoGainRatio)[0]
  return bestFeatureIndex, allFeatVList[bestFeatureIndex]

 #       
 def computeSplitInfo(self, featureVList):
  numEntries = len(featureVList)
  featureVauleSetList = list(set(featureVList))
  valueCounts = [featureVList.count(featVec) for featVec in featureVauleSetList]
  pList = [float(item)/numEntries for item in valueCounts]
  lList = [item*math.log(item, 2) for item in pList]
  splitInfo = -sum(lList)
  return splitInfo, featureVauleSetList


 #      
 # @staticmethod
 def computeEntropy(self, dataSet):
  dataLen = float(len(dataSet))
  cateList = [data[-1] for data in dataSet] #            
  #      key、     value   
  items = dict([(i, cateList.count(i)) for i in cateList])
  infoEntropy = 0.0
  for key in items: #    : = -p*log2(p) --infoEntropy = -prob * log(prob, 2)
   prob = float(items[key]) / dataLen
   infoEntropy -= prob * math.log(prob, 2)
  return infoEntropy


 #      :      ;            ,        
 # dataSet :    ; axis:    ; value:       
 def splitDataSet(self, dataSet, axis, value):
  rtnList = []
  for featVec in dataSet:
   if featVec[axis] == value:
    rFeatVec = featVec[:axis] # list  :  0~(axis-1)   
    rFeatVec.extend(featVec[axis + 1:]) #            
    rtnList.append(rFeatVec)
  return rtnList

 #       
 def storetree(self, inputTree, filename):
  fw = open(filename,'w')
  pickle.dump(inputTree, fw)
  fw.close()

 #       
 def grabTree(self, filename):
  fr = open(filename)
  return pickle.load(fr)

호출 코드

# -*- coding: utf-8 -*-

from numpy import *
from C45DTree import *

dtree = C45DTree()
dtree.loadDataSet("dataset.dat",["age", "revenue", "student", "credit"])
dtree.train()

dtree.storetree(dtree.tree, "data.tree")
mytree = dtree.grabTree("data.tree")
print mytree
이상 이 바로 본 고의 모든 내용 입 니 다.여러분 의 학습 에 도움 이 되 고 저 희 를 많이 응원 해 주 셨 으 면 좋 겠 습 니 다.

좋은 웹페이지 즐겨찾기