栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

coogle 30ML-CV

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

coogle 30ML-CV

import pandas as pd 
import numpy as np
import warnings
warnings.filterwarnings('ignore')

image_train=np.load('train.npy')
image_test=np.load('test.npy')

key_point=pd.read_csv('train.csv')

import matplotlib.pyplot as plt
XY=key_point.iloc[0,:].values.reshape(-1,2)
plt.scatter(XY[:,0],XY[:,1])
plt.imshow(image_train[:,:,0],cmap='gray')

fig,axes = plt.subplots(4,2,figsize=(8,14))
for i,ax in enumerate(axes.flatten()):
    sns.boxplot(key_point.iloc[:,i],ax=ax)


fig,axes = plt.subplots(4,2,figsize=(8,14))
for i,ax in enumerate(axes.flatten()):
    sns.boxplot(key_point.iloc[:,i],ax=ax)

#其实是可以删除的
key_point.isna().sum()

#众数填充
key_point=key_point.fillna(key_point.mode().iloc[0,:],axis=0)

from sklearn.multioutput import MultiOutputRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split


def split_data(X,Y,random_state=2021):
    y_test=Y.sample(frac=0.2,random_state=random_state)
    
    test_index=y_test.index.tolist()
    train_index=list(set(Y.index)-set(test_index))
    
    y_train=Y.iloc[train_index,:]
    
    x_test=X[:,:,test_index]
    x_train=X[:,:,train_index]
    
    return x_train,y_train,x_test,y_test

x_train,y_train,x_test,y_test=split_data(image_train,key_point)

x_train_=np.zeros((4000,96*96))
x_test_=np.zeros((1000,96*96))

for i in range(4000):
    x_train_[i,:]=x_train[:,:,i].flatten()
    
for i in range(1000):
    x_test_[i,:]=x_test[:,:,i].flatten()

#多输出回归
mor=MultiOutputRegressor(MLPRegressor(random_state=2021)).fit(x_train_,y_train)

y_pred=mor.predict(x_test_)
image_test_=np.zeros((2049,96*96))
for i in range(2049):
    image_test_[i,:]=image_test[:,:,i].flatten()
y_test_pred_sk_mlp=mor.predict(image_test_)

from keras import models,Input,layers
from keras import backend as K
K.clear_session()
input_x = Input(shape=(96, 96,1))
x = layers.Flatten()(input_x)
for _ in range(3):
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dropout(0.2)(x)

out_put_left_eye_center_x = layers.Dense(1)(x)
out_put_left_eye_center_y = layers.Dense(1)(x)
out_put_right_eye_center_x = layers.Dense(1)(x)
out_put_right_eye_center_y = layers.Dense(1)(x)
out_put_nose_tip_x = layers.Dense(1)(x)
out_put_nose_tip_y = layers.Dense(1)(x)
out_put_mouth_center_bottom_lip_x = layers.Dense(1)(x)
out_put_mouth_center_bottom_lip_y = layers.Dense(1)(x)

model = models.Model(inputs=[input_x],
                     outputs=[
                         out_put_left_eye_center_x, out_put_left_eye_center_y,
                         out_put_right_eye_center_x,
                         out_put_right_eye_center_y, out_put_nose_tip_x,
                         out_put_nose_tip_y, out_put_mouth_center_bottom_lip_x,
                         out_put_mouth_center_bottom_lip_y
                     ])
model.compile(optimizer='rmsprop', loss='mae', metrics=['mse'])

from tensorflow.keras.utils import plot_model
plot_model(model)

train_img = np.zeros((5000, 96, 96))

for i in range(5000):
    train_img[i, :, :] = image_train[:, :, i]
train_img = train_img.reshape(5000, 96, 96, 1) / 255

test_img = np.zeros((2049, 96, 96))

for i in range(2049):
    test_img[i, :, :] = image_test[:, :, i]
test_img = test_img.reshape(2049, 96, 96, 1) / 255

model.fit(train_img, [
    key_point[['left_eye_center_x']], key_point[['left_eye_center_y']],
    key_point[['right_eye_center_x']], key_point[['right_eye_center_y']],
    key_point[['nose_tip_x']], key_point[['nose_tip_y']],
    key_point[['mouth_center_bottom_lip_x']], key_point[['mouth_center_bottom_lip_y']]
],
          batch_size=64,
          epochs=10)

K.clear_session()
t=[64,128,128]
input_x = Input(shape=(96, 96,1))
x = layers.Conv2D(32,(3,3),activation='relu')(input_x)
x = layers.MaxPooling2D((2,2))(x)
for i in range(3):
    x = layers.Conv2D(t[i],(3,3),activation='relu')(x)
    x = layers.MaxPooling2D((2,2))(x)

x=layers.Flatten()(x)
x = layers.Dropout(0.2)(x)

out_put_left_eye_center_x = layers.Dense(1)(x)
out_put_left_eye_center_y = layers.Dense(1)(x)
out_put_right_eye_center_x = layers.Dense(1)(x)
out_put_right_eye_center_y = layers.Dense(1)(x)
out_put_nose_tip_x = layers.Dense(1)(x)
out_put_nose_tip_y = layers.Dense(1)(x)
out_put_mouth_center_bottom_lip_x = layers.Dense(1)(x)
out_put_mouth_center_bottom_lip_y = layers.Dense(1)(x)

model = models.Model(inputs=[input_x],
                     outputs=[
                         out_put_left_eye_center_x, out_put_left_eye_center_y,
                         out_put_right_eye_center_x,
                         out_put_right_eye_center_y, out_put_nose_tip_x,
                         out_put_nose_tip_y, out_put_mouth_center_bottom_lip_x,
                         out_put_mouth_center_bottom_lip_y
                     ])
model.compile(optimizer='rmsprop', loss='mae', metrics=['mse'])

model.fit(train_img, [
    key_point[['left_eye_center_x']], key_point[['left_eye_center_y']],
    key_point[['right_eye_center_x']], key_point[['right_eye_center_y']],
    key_point[['nose_tip_x']], key_point[['nose_tip_y']], 
    key_point[['mouth_center_bottom_lip_x']], key_point[['mouth_center_bottom_lip_y']]
],
          batch_size=64,
          epochs=10)

from tensorflow.keras.applications import ResNet50

K.clear_session()
ResNet50_base = ResNet50(include_top=False,input_shape=(96,96,1),weights=None)

input_x = Input(shape=(96, 96, 1))
x = ResNet50_base(input_x)
x = layers.Flatten()(x)
x = layers.Dropout(0.2)(x)

out_put_left_eye_center_x = layers.Dense(1)(x)
out_put_left_eye_center_y = layers.Dense(1)(x)
out_put_right_eye_center_x = layers.Dense(1)(x)
out_put_right_eye_center_y = layers.Dense(1)(x)
out_put_nose_tip_x = layers.Dense(1)(x)
out_put_nose_tip_y = layers.Dense(1)(x)
out_put_mouth_center_bottom_lip_x = layers.Dense(1)(x)
out_put_mouth_center_bottom_lip_y = layers.Dense(1)(x)

model = models.Model(inputs=[input_x],
                     outputs=[
                         out_put_left_eye_center_x, out_put_left_eye_center_y,
                         out_put_right_eye_center_x,
                         out_put_right_eye_center_y, out_put_nose_tip_x,
                         out_put_nose_tip_y, out_put_mouth_center_bottom_lip_x,
                         out_put_mouth_center_bottom_lip_y
                     ])
model.compile(optimizer='rmsprop', loss='mae', metrics=['mse'])

model.fit(train_img, [
    key_point[['left_eye_center_x']], key_point[['left_eye_center_y']],
    key_point[['right_eye_center_x']], key_point[['right_eye_center_y']],
    key_point[['nose_tip_x']], key_point[['nose_tip_y']], 
    key_point[['mouth_center_bottom_lip_x']], key_point[['mouth_center_bottom_lip_y']]
],
          batch_size=64,
          epochs=10)

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/313978.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号