Search
 
SCRIPT & CODE EXAMPLE
 

PYTHON

python RandomForest

# Author: Kian Ho <hui.kian.ho@gmail.com>
#         Gilles Louppe <g.louppe@gmail.com>
#         Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause

import matplotlib.pyplot as plt

from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier

RANDOM_STATE = 123

# Generate a binary classification dataset.
X, y = make_classification(
    n_samples=500,
    n_features=25,
    n_clusters_per_class=1,
    n_informative=15,
    random_state=RANDOM_STATE,
)

# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
    (
        "RandomForestClassifier, max_features='sqrt'",
        RandomForestClassifier(
            warm_start=True,
            oob_score=True,
            max_features="sqrt",
            random_state=RANDOM_STATE,
        ),
    ),
    (
        "RandomForestClassifier, max_features='log2'",
        RandomForestClassifier(
            warm_start=True,
            max_features="log2",
            oob_score=True,
            random_state=RANDOM_STATE,
        ),
    ),
    (
        "RandomForestClassifier, max_features=None",
        RandomForestClassifier(
            warm_start=True,
            max_features=None,
            oob_score=True,
            random_state=RANDOM_STATE,
        ),
    ),
]

# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)

# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 150

for label, clf in ensemble_clfs:
    for i in range(min_estimators, max_estimators + 1, 5):
        clf.set_params(n_estimators=i)
        clf.fit(X, y)

        # Record the OOB error for each `n_estimators=i` setting.
        oob_error = 1 - clf.oob_score_
        error_rate[label].append((i, oob_error))

# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
    xs, ys = zip(*clf_err)
    plt.plot(xs, ys, label=label)

plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
Comment

PREVIOUS NEXT
Code Example
Python :: if ele in python 
Python :: 1046 uri solution 
Python :: build the .pyx file to c and run on python 
Python :: Add extra data to Django notifications model object (extend the notify signal) 
Python :: python tuple first column 
Python :: Start Django Project At http://127.0.0.1:8080/ 
Python :: how to do a python loop 
Python :: To fix this error install pymongo with the srv extra 
Python :: how to connect smartphone camera to opencv python 
Python :: mysql connector select 
Python :: How to Empty a Set in Python Using the clear() Method 
Python :: adding multiple items to a list python 
Python :: write python code in ansible 
Python :: cara ambil 2 kata menggunakan phyton 
Python :: intersection_update() Function of sets in python 
Python :: python Prefix Sum of Matrix (Or 2D Array) 
Python :: get column means pandas 
Python :: print backward number from input 
Python :: design patterns in python free download 
Python :: metros para cm para mm 
Python :: Lightbank b2c 
Python :: needle in haystack 
Python :: tweepy to dataframe 
Python :: python using boolean len comparing with 
Python :: how to download a website using python 
Python :: transform jpg image into array for conv2d 
Python :: split column in exact spot python 
Python :: python how to change a point in a multidimensional list 
Python :: pandan jaya lrt 
Python :: pyqt5 messagebox settext 
ADD CONTENT
Topic
Content
Source link
Name
9+1 =