目录
1、导入数据和包
2、预处理数据
3、标准化数据
4、 选择并训练模型
前言
之前文章对项目有了简单的了解 并且使用随机森林模型预测了数据 貌似在60%左右
结果很差
kaggle 项目 预测房价
看了一些其他kaggler的notebook之后 学到了很多知识
最后结果在 25% 左右
在这里写下 供自己以后参考
编辑了一次 没保存 心态炸了
直接上代码
1、导入数据和包
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import ensemble, tree, linear_model
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
NAs = pd.concat([train.isnull().sum(),test.isnull().sum()],axis = 1, keys = ['train','Test'])
NAs[NAs.sum(axis=1) > 0]
train_labels = train.pop('SalePrice')
features = pd.concat([train,test],keys=['train','test'])
features.drop(['Utilities', 'RoofMatl', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'Heating', 'LowQualFinSF',
'BsmtFullBath', 'BsmtHalfBath', 'Functional', 'GarageYrBlt', 'GarageArea', 'GarageCond', 'WoodDeckSF',
'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal'],
axis=1, inplace=True)
2、预处理数据
这一段有必要说的是 处理数据有用的函数
pandas.Series.mode 返回最多的模式 调用的时候需要用索引
fillna 填充na
features['MSSubClass'] = features['MSSubClass'].astype(str)
features['MSZoning'] = features['MSZoning'].fillna(features['MSZoning'].mode()[0])
features['LotFrontage'] = features['LotFrontage'].fillna(features['LotFrontage'].mean())
features['Alley'] = features['Alley'].fillna('NOACCESS')
features.OverallCond = features.OverallCond.astype(str)
features['MasVnrType'] = features['MasVnrType'].fillna(features['MasVnrType'].mode()[0])
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
features[col] = features[col].fillna('NoBSMT')
features['TotalBsmtSF'] = features['TotalBsmtSF'].fillna(0)
features['Electrical'] = features['Electrical'].fillna(features['Electrical'].mode()[0])
features['KitchenAbvGr'] = features['KitchenAbvGr'].astype(str)
features['KitchenQual'] = features['KitchenQual'].fillna(features['KitchenQual'].mode()[0])
features['FireplaceQu'] = features['FireplaceQu'].fillna('NoFP')
for col in ('GarageType', 'GarageFinish', 'GarageQual'):
features[col] = features[col].fillna('NoGRG')
features['GarageCars'] = features['GarageCars'].fillna(0.0)
features['SaleType'] = features['SaleType'].fillna(features['SaleType'].mode()[0])
features['YrSold'] = features['YrSold'].astype(str)
features['MoSold'] = features['MoSold'].astype(str)
features['TotalSF'] = features['TotalBsmtSF'] + features['1stFlrSF'] + features['2ndFlrSF']
features.drop(['TotalBsmtSF', '1stFlrSF', '2ndFlrSF'], axis=1, inplace=True)
看一下 y数据
ax = sns.distplot(train_labels)
train_labels = np.log(train_labels) ax = sns.distplot(train_labels)
3、标准化数据
数值性
numeric_features = features.loc[:,['LotFrontage', 'LotArea', 'GrLivArea', 'TotalSF']] numeric_features_standardized = (numeric_features - numeric_features.mean())/numeric_features.std()
非数值性 转换一下
# Getting Dummies from Condition1 and Condition2
conditions = set([x for x in features['Condition1']] + [x for x in features['Condition2']])
dummies = pd.Dataframe(data=np.zeros((len(features.index), len(conditions))),
index=features.index, columns=conditions)
for i, cond in enumerate(zip(features['Condition1'], features['Condition2'])):
dummies.iloc[i].loc[cond[0]] = 1
dummies.iloc[i].loc[cond[1]] = 1
features = pd.concat([features, dummies.add_prefix('Condition_')], axis=1)
features.drop(['Condition1', 'Condition2'], axis=1, inplace=True)
# Getting Dummies from Exterior1st and Exterior2nd
exteriors = set([x for x in features['Exterior1st']] + [x for x in features['Exterior2nd']])
dummies = pd.Dataframe(data=np.zeros((len(features.index), len(exteriors))),
index=features.index, columns=exteriors)
for i, ext in enumerate(zip(features['Exterior1st'], features['Exterior2nd'])):
dummies.iloc[i].loc[ext[0]] = 1
dummies.iloc[i].loc[ext[1]] = 1
features = pd.concat([features, dummies.add_prefix('Exterior_')], axis=1)
features.drop(['Exterior1st', 'Exterior2nd', 'Exterior_nan'], axis=1, inplace=True)
# # Getting Dummies from all other categorical vars
for col in features.dtypes[features.dtypes == 'object'].index:
for_dummy = features.pop(col)
features = pd.concat([features, pd.get_dummies(for_dummy, prefix=col)], axis=1)
标准化数据的汇总 分割处理
features_standardized = features.copy()
features_standardized.update(numeric_features_standardized)
train_features = features.loc['train'].drop('Id', axis=1).select_dtypes(include=[np.number]).values
test_features = features.loc['test'].drop('Id', axis=1).select_dtypes(include=[np.number]).values
train_features_st = features_standardized.loc['train'].drop('Id', axis=1).select_dtypes(include=[np.number]).values
test_features_st = features_standardized.loc['test'].drop('Id', axis=1).select_dtypes(include=[np.number]).values
train_features_st, train_features, train_labels = shuffle(train_features_st, train_features, train_labels, random_state = 5)
x_train, x_test, y_train, y_test = train_test_split(train_features, train_labels, test_size=0.1, random_state=200)
x_train_st, x_test_st, y_train_st, y_test_st = train_test_split(train_features_st, train_labels, test_size=0.1, random_state=200)
4、 选择并训练模型
这一步我选择面向notebook编程
看了一些notebook之后 选择使用 Gradient boosting 和 Elastic Net 训练之后对结果求均值
Elastic net 的loss function 只有penalty和岭回归 Lasso回归不同 使用的是L1 L2 同时使用
sklearn 包里有两种Elastic net 的模型 选择 CV的 可以帮你选一些最佳超变量
定义一些测试模型的函数
def get_score(prediction,labels):
print('R2:{}'.format(r2_score(prediction,labels)))
print('RMSE:{}'.format(np.sqrt(mean_squared_error(prediction,labels))))
def train_test(estimator,x_trn,x_tst,y_trn,y_tst):
prediction_train = estimator.predict(x_trn)
print(estimator)
get_score(prediction_train, y_trn)
prediction_test = estimator.predict(x_tst)
print("Test")
get_score(prediction_test,y_tst)
ENSTest = linear_model.ElasticNetCV(alphas=[0.0001, 0.0005, 0.001, 0.01, 0.1, 1, 10], l1_ratio=[.01, .1, .5, .9, .99], max_iter=5000).fit(x_train_st, y_train_st) train_test(ENSTest, x_train_st, x_test_st, y_train_st, y_test_st)
scores = cross_val_score(ENSTest, train_features_st, train_labels, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
GBest = ensemble.GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=3, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10, loss='huber').fit(x_train, y_train)
train_test(GBest, x_train, x_test, y_train, y_test)
scores = cross_val_score(GBest, train_features_st, train_labels, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
训练一下 出结果了
GB_model = GBest.fit(train_features, train_labels)
ENST_model = ENSTest.fit(train_features_st, train_labels)
Final_labels = (np.exp(GB_model.predict(test_features)) + np.exp(ENST_model.predict(test_features_st))) / 2
pd.Dataframe({'Id': test.Id, 'SalePrice': Final_labels}).to_csv('./submission.csv', index =False)



