2017-10-06 83 views
0

我使用基于here的randomforest代码。 这(跳过来看看到底问题):更改Random Forest代码的数据集导致异常结果

# Select the best split point for a dataset 
def get_split(dataset, n_features): 
    class_values = list(set(row[-1] for row in dataset)) 
    b_index, b_value, b_score, b_groups = 999, 999, 999, None 
    features = list() 
    while len(features) < n_features: 
     index = randrange(len(dataset[0])-1) 
     if index not in features: 
      features.append(index) 
    for index in features: 
     for row in dataset: 
      groups = test_split(index, row[index], dataset) 
      gini = gini_index(groups, class_values) 
      if gini < b_score: 
       b_index, b_value, b_score, b_groups = index, row[index], gini, groups 
    return {'index':b_index, 'value':b_value, 'groups':b_groups} 


# Random Forest Algorithm on Sonar Dataset 
from random import seed 
from random import randrange 
from csv import reader 
from math import sqrt 


# Load a CSV file 
def load_csv(filename): 
    dataset = list() 
    with open(filename, 'r') as file: 
     csv_reader = reader(file) 
     for row in csv_reader: 
      if not row: 
       continue 
      dataset.append(row) 
    return dataset 


# Convert string column to float 
def str_column_to_float(dataset, column): 
    for row in dataset: 
     row[column] = float(row[column].strip()) 


# Convert string column to integer 
def str_column_to_int(dataset, column): 
    class_values = [row[column] for row in dataset] 
    unique = set(class_values) 
    lookup = dict() 
    for i, value in enumerate(unique): 
     lookup[value] = i 
    for row in dataset: 
     row[column] = lookup[row[column]] 
    return lookup 


# Split a dataset into k folds 
def cross_validation_split(dataset, n_folds): 
    dataset_split = list() 
    dataset_copy = list(dataset) 
    fold_size = int(len(dataset)/n_folds) 
    for i in range(n_folds): 
     fold = list() 
     while len(fold) < fold_size: 
      index = randrange(len(dataset_copy)) 
      fold.append(dataset_copy.pop(index)) 
     dataset_split.append(fold) 
    return dataset_split 


# Calculate accuracy percentage 
def accuracy_metric(actual, predicted): 
    correct = 0 
    for i in range(len(actual)): 
     if actual[i] == predicted[i]: 
      correct += 1 
    return correct/float(len(actual)) * 100.0 


# Evaluate an algorithm using a cross validation split 
def evaluate_algorithm(dataset, algorithm, n_folds, *args): 
    folds = cross_validation_split(dataset, n_folds) 
    scores = list() 
    for fold in folds: 
     train_set = list(folds) 
     train_set.remove(fold) 
     train_set = sum(train_set, []) 
     test_set = list() 
     for row in fold: 
      row_copy = list(row) 
      test_set.append(row_copy) 
      row_copy[-1] = None 
     predicted = algorithm(train_set, test_set, *args) 
     actual = [row[-1] for row in fold] 
     accuracy = accuracy_metric(actual, predicted) 
     scores.append(accuracy) 
    return scores 


# Split a dataset based on an attribute and an attribute value 
def test_split(index, value, dataset): 
    left, right = list(), list() 
    for row in dataset: 
     if row[index] < value: 
      left.append(row) 
     else: 
      right.append(row) 
    return left, right 


# Calculate the Gini index for a split dataset 
def gini_index(groups, classes): 
    # count all samples at split point 
    n_instances = float(sum([len(group) for group in groups])) 
    # sum weighted Gini index for each group 
    gini = 0.0 
    for group in groups: 
     size = float(len(group)) 
     # avoid divide by zero 
     if size == 0: 
      continue 
     score = 0.0 
     # score the group based on the score for each class 
     for class_val in classes: 
      p = [row[-1] for row in group].count(class_val)/size 
      score += p * p 
     # weight the group score by its relative size 
     gini += (1.0 - score) * (size/n_instances) 
    return gini 


# Select the best split point for a dataset 
def get_split(dataset, n_features): 
    class_values = list(set(row[-1] for row in dataset)) 
    b_index, b_value, b_score, b_groups = 999, 999, 999, None 
    features = list() 
    while len(features) < n_features: 
     index = randrange(len(dataset[0]) - 1) 
     if index not in features: 
      features.append(index) 
    for index in features: 
     for row in dataset: 
      groups = test_split(index, row[index], dataset) 
      gini = gini_index(groups, class_values) 
      if gini < b_score: 
       b_index, b_value, b_score, b_groups = index, row[index], gini, groups 
    return {'index': b_index, 'value': b_value, 'groups': b_groups} 


# Create a terminal node value 
def to_terminal(group): 
    outcomes = [row[-1] for row in group] 
    return max(set(outcomes), key=outcomes.count) 


# Create child splits for a node or make terminal 
def split(node, max_depth, min_size, n_features, depth): 
    left, right = node['groups'] 
    del (node['groups']) 
    # check for a no split 
    if not left or not right: 
     node['left'] = node['right'] = to_terminal(left + right) 
     return 
    # check for max depth 
    if depth >= max_depth: 
     node['left'], node['right'] = to_terminal(left), to_terminal(right) 
     return 
    # process left child 
    if len(left) <= min_size: 
     node['left'] = to_terminal(left) 
    else: 
     node['left'] = get_split(left, n_features) 
     split(node['left'], max_depth, min_size, n_features, depth + 1) 
    # process right child 
    if len(right) <= min_size: 
     node['right'] = to_terminal(right) 
    else: 
     node['right'] = get_split(right, n_features) 
     split(node['right'], max_depth, min_size, n_features, depth + 1) 


# Build a decision tree 
def build_tree(train, max_depth, min_size, n_features): 
    root = get_split(train, n_features) 
    split(root, max_depth, min_size, n_features, 1) 
    return root 


# Make a prediction with a decision tree 
def predict(node, row): 
    if row[node['index']] < node['value']: 
     if isinstance(node['left'], dict): 
      return predict(node['left'], row) 
     else: 
      return node['left'] 
    else: 
     if isinstance(node['right'], dict): 
      return predict(node['right'], row) 
     else: 
      return node['right'] 


# Create a random subsample from the dataset with replacement 
def subsample(dataset, ratio): 
    sample = list() 
    n_sample = round(len(dataset) * ratio) 
    while len(sample) < n_sample: 
     index = randrange(len(dataset)) 
     sample.append(dataset[index]) 
    return sample 


# Make a prediction with a list of bagged trees 
def bagging_predict(trees, row): 
    predictions = [predict(tree, row) for tree in trees] 
    return max(set(predictions), key=predictions.count) 


# Random Forest Algorithm 
def random_forest(train, test, max_depth, min_size, sample_size, n_trees, n_features): 
    trees = list() 
    for i in range(n_trees): 
     sample = subsample(train, sample_size) 
     tree = build_tree(sample, max_depth, min_size, n_features) 
     trees.append(tree) 
    predictions = [bagging_predict(trees, row) for row in test] 
    return (predictions) 

为了推广它,所以它会为每一个数据集上运行我写了下面:

import pandas as pd 
file_path ='http://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data' 
dataset2 =pd.read_csv(file_path, header=None, sep=',') 
v = dataset2.values 

f = pd.factorize(v.ravel())[0].reshape(v.shape) 

dataset1 = pd.DataFrame(f) 
df = dataset1.astype('str') 

dataset = df.values.tolist() 
target_index = 60 
for i in range(0, len(dataset[0])): 
     if i != target_index: 
      str_column_to_float(dataset, i) 
# convert class column to integers 
str_column_to_int(dataset, target_index) 
n_folds = 5 
max_depth = 10 
min_size = 1 
sample_size = 1.0 
n_features = int(sqrt(len(dataset[0]) - 1)) 


for n_trees in [5]: 
    scores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features) 
    print('Trees: %d' % n_trees) 
    print('Scores: %s' % scores) 
    print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores)))) 

以上上述代码对于SONAR数据集非常适用。它的结构是:

0.0200,0.0371,0.0428,0.0207,0.0954,0.0986,0.1539,0.1601,0.3109,0.2111,0.1609,0.1582,0.2238,0.0645,0.0660,0.2273,0.3100,0.2999,0.5078,0.4797,0.5783,0.5071,0.4328,0.5550,0.6711,0.6415,0.7104,0.8080,0.6791,0.3857,0.1307,0.2604,0.5121,0.7547,0.8537,0.8507,0.6692,0.6097,0.4943,0.2744,0.0510,0.2834,0.2825,0.4256,0.2641,0.1386,0.1051,0.1343,0.0383,0.0324,0.0232,0.0027,0.0065,0.0159,0.0072,0.0167,0.0180,0.0084,0.0090,0.0032,R 
0.0453,0.0523,0.0843,0.0689,0.1183,0.2583,0.2156,0.3481,0.3337,0.2872,0.4918,0.6552,0.6919,0.7797,0.7464,0.9444,1.0000,0.8874,0.8024,0.7818,0.5212,0.4052,0.3957,0.3914,0.3250,0.3200,0.3271,0.2767,0.4423,0.2028,0.3788,0.2947,0.1984,0.2341,0.1306,0.4182,0.3835,0.1057,0.1840,0.1970,0.1674,0.0583,0.1401,0.1628,0.0621,0.0203,0.0530,0.0742,0.0409,0.0061,0.0125,0.0084,0.0089,0.0048,0.0094,0.0191,0.0140,0.0049,0.0052,0.0044,R 

这些结果(这似乎OK):

Trees: 5 
Scores: [100.0, 95.1219512195122, 100.0, 97.5609756097561, 100.0] 
Mean Accuracy: 98.537% 

当我更改数据集为乳腺癌,威斯康星州:

842302,M,17.99,10.38,122.8,1001,0.1184,0.2776,0.3001,0.1471,0.2419,0.07871,1.095,0.9053,8.589,153.4,0.006399,0.04904,0.05373,0.01587,0.03003,0.006193,25.38,17.33,184.6,2019,0.1622,0.6656,0.7119,0.2654,0.4601,0.1189 
842517,M,20.57,17.77,132.9,1326,0.08474,0.07864,0.0869,0.07017,0.1812,0.05667,0.5435,0.7339,3.398,74.08,0.005225,0.01308,0.0186,0.0134,0.01389,0.003532,24.99,23.41,158.8,1956,0.1238,0.1866,0.2416,0.186,0.275,0.08902 

我改变相关密码分为:

import pandas as pd 
file_path ='https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data' 
dataset2 =pd.read_csv(file_path, header=None, sep=',') 
v = dataset2.values 

f = pd.factorize(v.ravel())[0].reshape(v.shape) 

dataset1 = pd.DataFrame(f) 
df = dataset1.astype('str') 

dataset = df.values.tolist() 
target_index = 1 ## <---- 
for i in range(0, len(dataset[0])): 
     if i != target_index: 
      str_column_to_float(dataset, i) 
# convert class column to integers 
str_column_to_int(dataset, target_index) 
n_folds = 5 
max_depth = 10 
min_size = 1 
sample_size = 1.0 
n_features = int(sqrt(len(dataset[0]) - 1)) 


for n_trees in [5]: 
    scores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features) 
    print('Trees: %d' % n_trees) 
    print('Scores: %s' % scores) 
    print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores)))) 

我跑了很长一段时间,结果似乎是错误的:

Trees: 5 
Scores: [0.0, 0.0, 0.0, 0.8849557522123894, 0.0] 
Mean Accuracy: 0.177% 
+0

您是否尝试重复运行? *随机森林*方法名称中的**随机**这个词有一个原因...尝试几次运行怎么样? – sophros

回答

1

据我所知,随机森林法的本质是高度数据依赖性和方法灵敏既在随机种子和噪声数据。因此,将数据集更改为具有不同噪声特性和类可分离性的数据集可能会产生平庸的结果,即使它对另一个数据集完美工作也是如此。

在方法的随机部分中还有一个纯粹机会的因素......因此,所有取得的结果都应重复进行验证。虽然您的结果表明该方法仅适用于数据集,但这可能仅仅是这次特定运行的一次糟糕运气。

如果你真的需要深入到随机森林的话题,我会建议Gilles Louppe在(免费提供)Understanding Random Forests: From Theory to Practice的全面总结。

关于CrossValidated论坛的方法的异常值敏感性也有一个有趣的讨论。

+0

谢谢@sophros,从乳腺癌威斯康星数据集的结果可以看出:5次折叠中有4次产生准确度为0%,而第五次产生88.5%的东西看起来很腥! – Avi

相关问题