1.数据标准化,使数据满足高斯分布
preprocessing.scale()
函数
import numpy as np
from sklearn import preprocessing
from scipy.stats import anderson
rain = np.load('rain.npy')
rain = * rain
rain[rain < ] = /
scaled = preprocessing.scale(rain)
print("rain mean",scaled.mean())
print("rain variance",scaled.var())
print("anderson rain",anderson(scaled))
2.使数据所有样本数值缩放到(-1,1)之间
方法一:
from sklearn import preprocessing
X = [[ , -, ],
[ , , ],
[ , , -]]
X_normalized = preprocessing.normalize(X, norm='l2')
>>> X_normalized
array([[ ..., -..., ...],
[ ..., ..., ...],
[ ..., ..., -...]])
方法二:
XY = [[ 1., -1., 2.],
[ 2., 0., 0.],
[ 0., 1., -1.]]
normalizer = preprocessing.Normalizer().fit(XY)
print(normalizer.transform(XY))
3.二值化数据
>>> X = [[ , -, ],
... [ , , ],
... [ , , -]]
>>> binarizer = preprocessing.Binarizer().fit(X) # fit does nothing
>>> binarizer
Binarizer(copy=True, threshold=)
>>> binarizer.transform(X)
array([[ , , ],
[ , , ],
[ , , ]])
有阈值
>>> binarizer = preprocessing.Binarizer(threshold=)
>>> binarizer.transform(X)
array([[ 0., 0., 1.],
[ 1., 0., 0.],
[ 0., 0., 0.]])
4.标签二值化
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([, , , , ])
LabelBinarizer(neg_label=, pos_label=)
>>> lb.classes_
array([, , , ])
>>> lb.transform([, ])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
多标签显示
>>> lb.fit_transform([(, ), (,)]) #(,)实例中就包含两个label
array([[1, 1, 0],
[0, 0, 1]])
>>> lb.classes_
array([, , ])