**
一、前言**
关于Docker的安装见博文windows安装Docker
本文涉及的MPI编程为求两个矩阵的乘积,语言为C++
**
**
1.因为我们使用的语言是C++,需要gcc或g++支持C++编程,所以查看Docker内哪些镜像有g++
docker search g++
2.选择其中一个镜像拉取
docker pull eclipse/cpp_gcc
3.查看当前所有镜像
docker images
发现拉取成功。
4.用镜像生成容器,并进入命令行
docker run -it -u root --net=host eclipse/cpp_gcc /bin/bash
5.后续下载MPI编程所需要的配置文件时,需要访问国外仓库,我们可以把源换为国内的,速度更快。
sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list cat /etx/apt/sources.list rm -Rf /var/lib/apt/lists/* apt-get update
6.安装mpich和libeigen3-dev
apt install mpich apt install libeigen3-dev
7.退出容器,查看容器id
exit docker ps -a
8.用容器id查找容器全名Id,并记住全名,在下一步要用到
docker inspect id
**
**
1.退出Docker后,将自己写好的mpi代码从本机复制到镜像文件夹内并查看。注:这里我的mpi代码文件为testmpi.cpp;注意区分容器名(id)和容器全名(Id)
docker cp 本机代码路径 容器全名:/home/user/代码文件名 docker start 容器名 docker exec -it -u root 容器名 bash cd /home/user ls
可以看到该文件
2.复制mpi编译所需的libmpi.so到当前目录下
cp /usr/lib/libmpi.so /home/user/ cd /home/user ls
3.mpi编译
g++ testmpi.cpp libmpi.so -o mpi ls
4.运行结果
mpiexec -n 4 ./mpi
其中,4为设置的进程数
5.testmpi.cpp代码如下
#include#include #include #include #include #include #include #include using namespace std; //生成随机矩阵 int *generate_matrix(int size) { srand((unsigned)time(NULL) + (unsigned)rand()); int *matrix; matrix = (int *)malloc(sizeof(int) * size*size); for (int i = 0; i < size*size; i++) { matrix[i] = rand() % 10; } return matrix; } //输出矩阵 void print_matrx(int *a, int size) { for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { printf("%d ", a[i*size + j]); } printf("n"); } printf("n"); } //矩阵相乘 int * Multiplication(int a[], int b[], int size, int line) { int *result; int temp = 0; result = (int *)malloc(sizeof(int) * size*size); //#pragma omp parallel for num_threads(2) for (int i = 0; i < line; i++) { for (int j = 0; j < size; j++) { temp = 0; for (int k = 0; k < size; k++) temp += a[i*size + k] * b[k*size + j]; result[i*size + j] = temp; } } return result; } int main(int argc, char *argv[]) { clock_t time1, time2; int size = 16, rank, line, num; time1 = clock(); MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &num); int *matrix1; int *matrix2; int *matrix3; int *resultMg; int *revMg; int *resultMg0; line = size / num ; //num为进程数,line为每个进程的行数 matrix1 = (int*)malloc(sizeof(int)*size*size); matrix2 = (int*)malloc(sizeof(int)*size*size); matrix3 = (int*)malloc(sizeof(int)*size*size); resultMg = (int*)malloc(sizeof(int)*size*line); resultMg0 = (int*)malloc(sizeof(int)*size*line); revMg = (int*)malloc(sizeof(int)*size*line); if (rank == 0) { matrix1 = generate_matrix(size); matrix2 = generate_matrix(size); printf("matrix1 is :n"); print_matrx((int *)matrix1, size); printf("matrix2 is :n"); print_matrx((int *)matrix2, size); resultMg0=Multiplication(matrix1,matrix2, size, line); for (int m = 0; m < line; m++) for (int n = 0; n < size; n++) matrix3[m*size + n] = resultMg0[m*size + n]; for (int i = 1; i < num; i++) MPI_Send(matrix2, size*size, MPI_INT, i, 0, MPI_COMM_WORLD); for (int i = 1; i < num; i++) MPI_Send(matrix1 + i*line*size, size*line, MPI_INT, i, 1, MPI_COMM_WORLD); for (int i = 1; i < num; i++) { MPI_Recv(resultMg, line*size, MPI_INT, i, 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE); for (int m = 0; m < line; m++) for (int n = 0; n < size; n++) matrix3[(i*line + m)*size + n] = resultMg[m*size + n]; } time2 = clock(); print_matrx((int *)matrix3, size); cout << time2 - time1 << endl; free(matrix1); free(matrix2); free(matrix3); free(revMg); free(resultMg); } else { MPI_Recv(matrix2, size*size, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(revMg, size*line, MPI_INT, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); resultMg = Multiplication(revMg, matrix2, size, line); MPI_Send(resultMg, line*size, MPI_INT, 0, 3, MPI_COMM_WORLD); } MPI_Finalize(); return 0; }



