-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPyMatMul_MPI+C.py
70 lines (58 loc) · 1.73 KB
/
PyMatMul_MPI+C.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import numpy as np
from mpi4py import MPI
import matmul
import pyextrae.mpi as pyextrae
print('MATRIX MULTIPLICATION (NxN) V0.2')
# Constants
N = 256
TaskMaster = 0
MPIT_MATRIX_A = 2
def wrap_matmul(A, B, chunk):
return matmul.matmul_omp(A, B, chunk)
#return matmul.matmul(A, B, chunk)
# Main
# MPI variables
comm = MPI.COMM_WORLD
mpiRank = comm.Get_rank()
mpiSize = comm.Get_size()
if (N % mpiSize != 0):
print('BAD DIMENSION: N must be divisible by the amount of Tasks')
exit(-1)
# Matmul variables
chunk = int(N / mpiSize)
A, B = None, None
if mpiRank == TaskMaster:
print('MASTER: Number of MPI tasks is: '+str(mpiSize))
print('Creating matrix...')
A = np.random.randint(10, size=(N, N), dtype='int32')
B = np.random.randint(10, size=(N, N), dtype='int32')
# Broadcasting Matrix B
B = comm.bcast(B, root=TaskMaster)
#Distribute Matrix A
if mpiRank == TaskMaster:
print('MASTER: Distributing matrix A')
for i in range(1, mpiSize):
lowerBound = i * chunk
upperBound = (i+1) * chunk
tmp = A[lowerBound:upperBound,:]
comm.send(tmp, dest=i, tag=MPIT_MATRIX_A)
else:
A = comm.recv(source=TaskMaster, tag=MPIT_MATRIX_A)
# Matrix Multiplication
print('TASK '+str(mpiRank)+' | Multiplying...')
C = wrap_matmul(A, B, chunk)
# Gather of all results in MASTER
C = comm.gather(C, root=TaskMaster)
# Master checks the result
if mpiRank == TaskMaster:
R = np.matmul(A, B)
print('MASTER: Verifying...')
# Join the result matrix of all Tasks
C = np.concatenate(C)
for i in range(N):
for j in range(N):
if (C[i][j] != R[i][j]):
print('WRONG multiplication!')
comm.Abort(-1)
print('CORRECT multiplication!')
comm.Barrier()