Show Sidebar Hide Sidebar

Varying Regularization in Multi-layer Perceptron in Scikit-learn

A comparison of different values for regularization parameter ‘alpha’ on synthetic datasets. The plot shows that different alphas yield different decision functions.

Alpha is a parameter for regularization term, aka penalty term, that combats overfitting by constraining the size of the weights. Increasing alpha may fix high variance (a sign of overfitting) by encouraging smaller weights, resulting in a decision boundary plot that appears with lesser curvatures. Similarly, decreasing alpha may fix high bias (a sign of underfitting) by encouraging larger weights, potentially resulting in a more complicated decision boundary.

New to Plotly?

Plotly's Python library is free and open source! Get started by downloading the client and reading the primer.
You can set up Plotly to work in online or offline mode, or in jupyter notebooks.
We also have a quick-reference cheatsheet (new!) to help you get started!

Version

In [1]:
import sklearn
sklearn.__version__
Out[1]:
'0.18.1'

Imports

In [2]:
print(__doc__)

import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools

import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
Automatically created module for IPython interactive environment

Calculations

In [3]:
h = .02  # step size in the mesh

alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
    for j in range(0, 3):
        names.append('alpha ' + str(i))

classifiers = []
for i in alphas:
    classifiers.append(MLPClassifier(alpha=i, random_state=1))

X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
                           random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)

datasets = [make_moons(noise=0.3, random_state=0),
            make_circles(noise=0.2, factor=0.5, random_state=1),
            linearly_separable]

fig = tools.make_subplots(rows=6, cols=3, 
                          print_grid=False,
                          subplot_titles=tuple(['', '', '']+names)
                          )
def matplotlib_to_plotly(cmap, pl_entries):
    h = 1.0/(pl_entries-1)
    pl_colorscale = []
    
    for k in range(pl_entries):
        C = map(np.uint8, np.array(cmap(k*h)[:3])*255)
        pl_colorscale.append([k*h, 'rgb'+str((C[0], C[1], C[2]))])
        
    return pl_colorscale

i = 1
j = 1

Plot Results

In [4]:
for X, y in datasets:
    # preprocess dataset, split into training and test part
    X = StandardScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)

    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    x_ = np.arange(x_min, x_max, h)
    y_ = np.arange(y_min, y_max, h)
    xx, yy = np.meshgrid(x_, y_)

    # just plot the dataset first
    cm = plt.cm.RdBu
    cm_bright = ListedColormap(['#FF0000', '#0000FF'])
    
    # Plot the training points
    p1 = go.Scatter(x=X_train[:, 0],
                    y=X_train[:, 1], 
                    mode='markers',
                    marker=dict(color=X_train[:, 0],
                                colorscale=matplotlib_to_plotly(cm_bright, 5),
                                line=dict(color='black', width=1))
                    )
                    
    # and testing points
    p2 = go.Scatter(x=X_test[:, 0], 
                    y=X_test[:, 1],
                    mode='markers',
                    marker=dict(color=X_test[:, 0],
                                colorscale=matplotlib_to_plotly(cm_bright, 5),
                                line=dict(color='black', width=1))
                    )
    fig.append_trace(p1, 1, j)
    fig.append_trace(p2, 1, j)
    
    i = 2

    # iterate over classifiers
    for name, clf in zip(names, classifiers):
        clf.fit(X_train, y_train)
        score = clf.score(X_test, y_test)

        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, x_max]x[y_min, y_max].
        if hasattr(clf, "decision_function"):
            Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
        else:
            Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]

        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        trace = go.Contour(x=x_, y=y_, z=Z, 
                           line=dict(width=0),
                           contours=dict( coloring='heatmap'),
                           colorscale= matplotlib_to_plotly(cm, 3),
                           opacity = 0.7, showscale=False)

        # Plot also the training points
        p3 = go.Scatter(x=X_train[:, 0], 
                        y=X_train[:, 1], 
                        mode='markers',
                        marker=dict(color=X_train[:, 0],
                                    colorscale=matplotlib_to_plotly(cm_bright, 5),
                                    line=dict(color='black', width=1))
                       )
        # and testing points
        p4 = go.Scatter(x=X_test[:, 0],
                        y=X_test[:, 1],
                        mode='markers',
                        marker=dict(color=X_test[:, 0],
                                    colorscale=matplotlib_to_plotly(cm_bright, 5),
                                    line=dict(color='black', width=1))
                       )
        fig.append_trace(trace, i, j)
        fig.append_trace(p3, i, j)
        fig.append_trace(p4, i, j)
        i=i+1
        
    j+=1 
In [5]:
for i in map(str, range(1, 19)):
    x='xaxis' + i
    y='yaxis' + i
    fig['layout'][y].update(showticklabels=False, ticks='',
                           showgrid=False, zeroline=False)
    fig['layout'][x].update(showticklabels=False, ticks='',
                           showgrid=False, zeroline=False)

fig['layout'].update(height=2000, showlegend=False)
In [6]:
py.iplot(fig)
Out[6]:

License

Author:

    Issam H. Laradji

License:

    BSD 3 clause
Still need help?
Contact Us

For guaranteed 24 hour response turnarounds, upgrade to a Developer Support Plan.