From e6a9d5a2f0215667df08e3674f669c30e3be1431 Mon Sep 17 00:00:00 2001 From: Niklas Birk Date: Mon, 8 Apr 2024 00:26:20 +0200 Subject: [PATCH 1/6] Add distributed computing for negation and addition (no memory distributing) --- src/matrix.py | 23 ++++++++++- src/matrix_mpi.py | 98 ++++++++++++++++++++++++++++++++++++++++----- test/test_matrix.py | 9 +++++ 3 files changed, 119 insertions(+), 11 deletions(-) diff --git a/src/matrix.py b/src/matrix.py index 9e37544..eeeda3a 100644 --- a/src/matrix.py +++ b/src/matrix.py @@ -84,6 +84,25 @@ class Matrix: """ return self.__data__ + @staticmethod + def flatten(matrices: list): + """ + Flattens a list of matrices into one bigger matrix. + The columns must match the first ``Matrix`` in the list and the rows can be arbitrarily. + + :param matrices: A list of matrices. + :type matrices: list + :return: A ``Matrix`` extended by all matrices in the list. + :rtype: ``Matrix`` + """ + flattened_data = [] + rows = 0 + for matrix in matrices: + flattened_data.extend(matrix.get_data()) + rows += matrix.__shape__[0] + cols = matrices[0].__shape__[1] + return Matrix(flattened_data, (rows, cols)) + def shape(self): """ :return: the shape of the matrix, which is ``(rows, columns)`` @@ -219,7 +238,7 @@ class Matrix: def __rmul__(self, other): return self * other - def __abs_sum__(self): + def __abs_sum_of_squares__(self): rows = self.__shape__[0] cols = self.__shape__[1] abs_sum = 0 @@ -258,7 +277,7 @@ class Matrix: :return: the norm as a number """ if f == "frobenius": - norm = math.sqrt(self.__abs_sum__()) + norm = math.sqrt(self.__abs_sum_of_squares__()) elif f == "col sum": norm = max(self.__col_sums__()) elif f == "row sum": diff --git a/src/matrix_mpi.py b/src/matrix_mpi.py index 49eb7b1..ae424ea 100644 --- a/src/matrix_mpi.py +++ b/src/matrix_mpi.py @@ -5,9 +5,9 @@ from matrix import Matrix class MatrixMPI: - __comm__ = MPI.COMM_WORLD - __size__ = __comm__.Get_size() - __rank__ = __comm__.Get_rank() + __mpi_comm__ = MPI.COMM_WORLD + __mpi_size__ = __mpi_comm__.Get_size() + __mpi_rank__ = __mpi_comm__.Get_rank() __matrix__: Matrix = None __chunk__: list = None @@ -16,23 +16,103 @@ class MatrixMPI: self.__matrix__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n) total_amount_of_rows = self.__matrix__.shape()[0] - chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__size__) - self.__chunk__ = chunks[self.__rank__].tolist() + chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__mpi_size__) + self.__chunk__ = chunks[self.__mpi_rank__].tolist() + + @staticmethod + def of(matrix: Matrix): + return MatrixMPI(matrix.get_data(), matrix.shape()) def __str__(self): return str(self.__matrix__) - def get_rank_data(self): + def get_rank_submatrix(self): rows = len(self.__chunk__) cols = self.__matrix__.shape()[1] return Matrix(self.__matrix__[self.__chunk__], (rows, cols)) def transpose(self): - return self.__matrix__.transpose() + """ + :return: the transpose of the matrix + """ + return MatrixMPI.of(self.__matrix__.transpose()) def T(self): + """ + Same as ``matrix.transpose()`` + + :return: see ``matrix.transpose()`` + """ return self.transpose() + def __eq__(self, other): + """ + Return ``self==value`` -mpi_matrix = MatrixMPI(list(range(1, 25)), (12, 2)) -print(mpi_matrix.transpose()) + :param other: The object to compare to; must be either a ``MatrixMPI``, ``Matrix``, a ``list`` or a ``numpy.ndarray`` + :return: True if data in the matrix are equal to the given data in other for each component, otherwise False + """ + if isinstance(other, MatrixMPI): + return self.__matrix__ == other.__matrix__ + else: + return self.__matrix__ == other + + def __neg__(self): + gathered_data = self.__mpi_comm__.gather(-self.get_rank_submatrix()) + data = self.__mpi_comm__.bcast(gathered_data) + return MatrixMPI.of(Matrix.flatten(data)) + + def __add__(self, other): + if isinstance(other, MatrixMPI): + other = other.get_rank_submatrix() + gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() + other) + data = self.__mpi_comm__.bcast(gathered_data) + return MatrixMPI.of(Matrix.flatten(data)) + + def __radd__(self, other): + return self + other + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + return -self + other + + def __truediv__(self, other): + gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() / other) + data = self.__mpi_comm__.bcast(gathered_data) + return MatrixMPI.of(Matrix.flatten(data)) + + def __mul__(self, other): + if isinstance(other, MatrixMPI): + return self.__matrix__ * other.__matrix__ + else: + return self.__matrix__ * other + + def __rmul__(self, other): + return self * other + + def norm(self, f: str = "frobenius"): + """ + Calculates the norm of the matrix. + + A norm is a positive definit, absolute homogeneous and subadditive function. + For Matrices a norm is also sub-multiplicative. + + :param f: The norm to be used, could be either "frobenius", "row sum" or "col sum" + + :return: the norm as a number + """ + return self.__matrix__.norm(f) + + def __getitem__(self, key): + return self.__matrix__[key] + + def __setitem__(self, key, value): + self.__matrix__[key] = value + + +# m1 = MatrixMPI(numpy.random.uniform(0, 1, 1_000_000), (1000, 1000)) +m1 = MatrixMPI([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6], (5, 3)) + +print(m1 - m1) diff --git a/test/test_matrix.py b/test/test_matrix.py index 071e5d7..a41bb28 100644 --- a/test/test_matrix.py +++ b/test/test_matrix.py @@ -378,3 +378,12 @@ class TestMatrix(TestCase): expected = Matrix([1, 2, 3, 4, 5, 60, 70, 8, 9, 100, 110, 12, 13, 14, 15, 16], (4, 4)) self.assertEqual(expected, actual) + + def test_should_flatten_list_of_matrices(self): + m1 = Matrix([1, 2, 3, 4, 5, 6, 7, 8, 9], (3, 3)) + m2 = Matrix([1, 2, 3, 4, 5, 6], (2, 3)) + + actual = m1.flatten([m1, m2]) + expected = Matrix([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6], (5, 3)) + + self.assertEqual(expected, actual) From 987de6acf7fbf4a6868ad30562fc12a6b60b5686 Mon Sep 17 00:00:00 2001 From: niklas Date: Tue, 9 Apr 2024 17:25:42 +0200 Subject: [PATCH 2/6] More work on parallel stuff --- src/cg.py | 91 ++++++++++ src/main.py | 6 + src/matrix_mpi.py | 17 +- src/vector_mpi.py | 15 +- test/test_parallel.py | 406 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 525 insertions(+), 10 deletions(-) create mode 100644 src/cg.py create mode 100644 test/test_parallel.py diff --git a/src/cg.py b/src/cg.py new file mode 100644 index 0000000..d983f9f --- /dev/null +++ b/src/cg.py @@ -0,0 +1,91 @@ +import matplotlib.pyplot as plt +import numpy as np + + +# Funktion zur Berechnung der 2-Norm +def norm(vec): + sum_of_squares = 0 + for i in range(0, len(vec)): + sum_of_squares = sum_of_squares + vec[i] ** 2 + + return np.sqrt(sum_of_squares) + + +# Test, ob die Matrix M positiv definit ist, mittels Cholesky-Zerlegung +def ist_positiv_definit(m): + try: + np.linalg.cholesky(m) + return True + except np.linalg.LinAlgError: + return False + + +n = 1000 +h = 1 / (n - 1) + +# Initialisierung der Matrix A und des Vektor f für LGS Au = f +A = np.diag(-1 * np.ones(n - 1), k=1) + np.diag(2 * np.ones(n), k=0) + np.diag(-1 * np.ones(n - 1), k=-1) +f = h ** 2 * 2 * np.ones(n) + +# Teste, ob A positiv definitv ist mittels Eigenwerten +if np.all(np.linalg.eigvals(A) <= 0): # Prüfen, ob alle Eigenwerte positiv sind + raise ValueError("A ist nicht positiv definit.") + +# Teste, ob A positiv definit ist mittels Cholesky-Zerlegung +if not (ist_positiv_definit(A)): + raise ValueError("A ist nicht positiv definit.") + +# Teste, ob A symmetrisch ist +if (A != A.T).all(): + raise ValueError("A ist nicht symmetrisch.") + +# Intialisierung des Startvektors x +x = np.ones(n) + +# Toleranz epsilon +eps = 0.001 + +count = 1 + +# Anfangswerte berechnen +r = f - A @ x # Anfangsresiduum +p = r # Anfangsabstiegsrichtung + +print("0. Iterationsschritt: \n") +print("Startvektor:", x) +print("Norm des Anfangsresiduums: ", norm(r)) + +while eps < norm(r) < 1000: + z = A @ p # Matrix-Vektorprodukt berechnen und speichern + + alpha = (np.dot(r, p)) / (np.dot(p, z)) + print(alpha) + + x = x + alpha * p # neue Itterierte x + r = r - alpha * z # neues Residuum + + # Bestimmung der neuen Suchrichtung + beta = - (np.dot(r, z)) / (np.dot(p, z)) + p = r + beta * p # neue konjugierte Abstiegsrichtung + + print(count, ". Iterationsschritt: \n") + print("Aktuelle Iterierte:", x) + print("Norm des Residuums: ", norm(r)) + + count = count + 1 + +# Vergleich mit numpy-interner Lsg +u = np.linalg.solve(A, f) + +print("Lösung mit CG-Verfahren:", x) +print("Numpy interne Lösung:", u) + +if norm(u - x) > eps: + print("Der CG-Algorithmus hat nicht richtig funktioniert!") +else: + print("Der CG-Algorithmus war erfolgreich.") + +plt.plot(x, linewidth=2) +plt.plot(u, linewidth=2) + +plt.show() diff --git a/src/main.py b/src/main.py index e69de29..3bdee1e 100644 --- a/src/main.py +++ b/src/main.py @@ -0,0 +1,6 @@ +# m1 = MatrixMPI(numpy.random.uniform(0, 1, 1_000_000), (1000, 1000)) +from matrix_mpi import MatrixMPI + +m1 = MatrixMPI([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6], (5, 3)) + +print(m1 + m1) diff --git a/src/matrix_mpi.py b/src/matrix_mpi.py index ae424ea..cafe968 100644 --- a/src/matrix_mpi.py +++ b/src/matrix_mpi.py @@ -5,14 +5,18 @@ from matrix import Matrix class MatrixMPI: - __mpi_comm__ = MPI.COMM_WORLD - __mpi_size__ = __mpi_comm__.Get_size() - __mpi_rank__ = __mpi_comm__.Get_rank() + __mpi_comm__ = None + __mpi_size__ = None + __mpi_rank__ = None __matrix__: Matrix = None __chunk__: list = None - def __init__(self, data=None, shape=None, structure=None, model=None, offset=None, n=None): + def __init__(self, mpi_comm, data=None, shape=None, structure=None, model=None, offset=None, n=None): + self.__mpi_comm__ = mpi_comm + self.__mpi_size__ = self.__mpi_comm__.Get_size() + self.__mpi_rank__ = self.__mpi_comm__.Get_rank() + self.__matrix__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n) total_amount_of_rows = self.__matrix__.shape()[0] @@ -111,8 +115,3 @@ class MatrixMPI: def __setitem__(self, key, value): self.__matrix__[key] = value - -# m1 = MatrixMPI(numpy.random.uniform(0, 1, 1_000_000), (1000, 1000)) -m1 = MatrixMPI([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6], (5, 3)) - -print(m1 - m1) diff --git a/src/vector_mpi.py b/src/vector_mpi.py index c943099..ceee57b 100644 --- a/src/vector_mpi.py +++ b/src/vector_mpi.py @@ -1,8 +1,21 @@ +import numpy from mpi4py import MPI +from vector import Vector class VectorMPI: - ... + __mpi_comm__ = None + __mpi_size__ = None + __mpi_rank__ = None + + __vector__: Vector = None + + def __init__(self, mpi_comm, data=None, shape=None): + self.__mpi_comm__ = mpi_comm + self.__mpi_size__ = self.__mpi_comm__.Get_size() + self.__mpi_rank__ = self.__mpi_comm__.Get_rank() + + self.__vector__ = Vector(data=data, shape=shape) # class Vector: # start_idx = 0 # Nullter Eintrag des Vektors auf dem aktuellen Rang diff --git a/test/test_parallel.py b/test/test_parallel.py new file mode 100644 index 0000000..eecab5e --- /dev/null +++ b/test/test_parallel.py @@ -0,0 +1,406 @@ +import numpy as np +from mpi4py import MPI + +from matrix import Matrix +from vector import Vector + +np.random.seed(0) +############### +# Setting up MPI +comm = MPI.COMM_WORLD +rank = comm.Get_rank() +size = comm.Get_size() +for i0 in range(size): + if i0 == rank: + print(f"Hello from rank {i0} of size {size}!") + comm.Barrier() +# Testing the vector class +comm.Barrier() +if rank == 0: + print("\n\nTesting the vector class --------------(output only from rank 0)---------------------\n\n") +############### + +### 1a Initialization +if rank == 0: + print("Start 1a initialization") +# from list +n_x1 = 10 +x1_list = [i0 for i0 in + range(n_x1)] # must work for 4 ranks, too, so even if entries are not evenly distributable between ranks +#vec_x1 = Vector(comm, x1_list) +vec_x1 = Vector(x1_list) +# from numpy array +n_x2 = 10000 +x2_list = np.random.uniform(0, 1, n_x2) +#vec_x2 = Vector(comm, x2_list) +vec_x2 = Vector(x2_list) +if rank == 0: + print("End 1a\n") +comm.Barrier() + +### 1b __str__ function, string representation +if rank == 0: + print("Start 1b string representation") +vec_x1_str = str(vec_x1) +vec_x2_str = str(vec_x2) +if rank == 0: + print(f"vec_x1 values: {vec_x1_str}") + print(f"vec_x2 values: {vec_x2_str}") + print("End 1b\n") +comm.Barrier() + +### 1c shape and transpose +if rank == 0: + print("Start 1c shape and transpose") +# shape property of the vector +vec_x1_shape = vec_x1.shape() +vec_x2_T_shape = vec_x2.T().shape() +vec_x2_T_T_shape = vec_x2.T().T().shape() +if rank == 0: + print(f"vec_x1 has shape {vec_x1_shape} | must be ({n_x1},1)") # Hint: numpy.reshape + # transposition (property) of the vector, only "cosmetic" change + print(f"vec_x2.T() has shape {vec_x2_T_shape} | must be (1,{n_x2})") + print(f"vec_x2.T().T() has shape {vec_x2_T_T_shape} | must be ({n_x2},1)") + print("End 1c\n") +comm.Barrier() + +### 1d addition and substraction +if rank == 0: + print("Start 1d addition and substraction") +# intitialization +n_a = 10 +n_b = 10 +a_list = [i0 for i0 in range(n_a)] +b_list = [1 / (i0 + 1) for i0 in range(n_b)] +#a = Vector(comm, a_list) +a = Vector(a_list) +#b = Vector(comm, b_list) +b = Vector(b_list) +# computation +x = a + b +y = a - b +#numpy_x_compare_norm = (x - Vector(comm, np.array(a_list) + np.array(b_list))).norm() +numpy_x_compare_norm = (x - Vector(np.array(a_list) + np.array(b_list))).norm() +#numpy_y_compare_norm = (y - Vector(comm, np.array(a_list) - np.array(b_list))).norm() +numpy_y_compare_norm = (y - Vector(np.array(a_list) - np.array(b_list))).norm() +if rank == 0: + print(f"norm(a + b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") + print(f"norm(a - b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") +try: + x_false = a + b.T() +except ValueError as e: + if rank == 0: + print("The correct result is this error: " + str(e)) +else: + if rank == 0: + print("ERROR: It is required to raise a system error, e. g., ValueError, since dimensions mismatch!") +try: + x_false = a - b.T() +except ValueError as e: + if rank == 0: + print("The correct result is this error: " + str(e)) +else: + if rank == 0: + print("ERROR:It is required to raise a system error, e. g., ValueError, since dimensions mismatch!") +if rank == 0: + print("End 1d\n") +comm.Barrier() + +### 1e multiplication +if rank == 0: + print("Start 1e multiplication") +# intitialization +n_a = 10 +n_b = 10 +a_list = [i0 for i0 in range(n_a)] +b_list = [1 / (i0 + 1) for i0 in range(n_b)] +#a = Vector(comm, a_list) +a = Vector(a_list) +#b = Vector(comm, b_list) +b = Vector(b_list) +# computation with vectors +x = a.T() * b # scalar +y = a * b # vector +# z = b * a.T() # matrix +numpy_x_compare_norm = np.linalg.norm(x - np.sum(np.array(a_list) * np.array(b_list))) +#numpy_y_compare_norm = (y - Vector(comm, np.array(a_list) * np.array(b_list))).norm() +numpy_y_compare_norm = (y - Vector(np.array(a_list) * np.array(b_list))).norm() +# numpy_z_compare_norm = (z - Matrix(comm,np.outer(np.array(a_list),np.array(b_list)))).norm() +if rank == 0: + print(f"norm(a.T() * b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") + print(f"norm(a * b - numpy) = {numpy_y_compare_norm} | must be < 1e-8") +# print(f"norm(b * a.T() - numpy) = \n{numpy_z_compare_norm} | must be 1e-8") +# computation with scalars +x = a * 5 +y = 0.1 * b.T() +#numpy_x_compare_norm = (x - Vector(comm, np.array(a_list) * 5)).norm() +numpy_x_compare_norm = (x - Vector(np.array(a_list) * 5)).norm() +#numpy_y_compare_norm = (y - Vector(comm, np.array(b_list) * 0.1, transpose=True)).norm() +numpy_y_compare_norm = (y - Vector(np.array(b_list) * 0.1).T()).norm() +if rank == 0: + print(f"norm(a * 5 - numpy) = {numpy_x_compare_norm} | must be < 1e-8") + print(f"norm(0.1 * b.T() - numpy) = {numpy_y_compare_norm} | must be < 1e-8") + print("End 1e\n") +comm.Barrier() + +### 1f divison +if rank == 0: + print("Start 1f divison") +# intitialization +n_a = 10 +n_b = 10 +a_list = [i0 for i0 in range(n_a)] +b_list = [1 / (i0 + 1) for i0 in range(n_b)] +#a = Vector(comm, a_list) +a = Vector(a_list) +#b = Vector(comm, b_list) +b = Vector(b_list) +# computation with vectors +x = a / b +y = a / 5 +#numpy_x_compare_norm = (x - Vector(comm, np.array(a_list) / np.array(b_list))).norm() +numpy_x_compare_norm = (x - Vector(np.array(a_list) / np.array(b_list))).norm() +#numpy_y_compare_norm = (y - Vector(comm, np.array(a_list) / 5)).norm() +numpy_y_compare_norm = (y - Vector(np.array(a_list) / 5)).norm() +if rank == 0: + print(f"norm(a / b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") + print(f"norm(a / 5 - numpy) = {numpy_y_compare_norm} | must be < 1e-8") + print("End 1f\n") +comm.Barrier() + +### 1g norm +if rank == 0: + print("Start 1g norm") +# intitialization +a_list = [1 / (i0 + 1) for i0 in range(10)] +#a = Vector(comm, a_list) +a = Vector(a_list) +# computation +a_norm = a.norm() +a_normalized = a.normalize() +a_normalized_str = str(a_normalized) +#numpy_comparison_norm = (a_normalized - Vector(comm, np.array(a_list) / np.linalg.norm(a_list))).norm() +numpy_comparison_norm = (a_normalized - Vector(np.array(a_list) / np.linalg.norm(a_list))).norm() +if rank == 0: + print(f"a_norm = {a_norm} | must be {np.linalg.norm(a_list)}") + print(f"norm(a_normalize-np.a_normalize) = {numpy_comparison_norm} | must be < 1e-8") + print("End 1g\n") +comm.Barrier() + +### 1h negation +if rank == 0: + print("Start 1h negation") +# intitialization +a_list = [1 / (i0 + 1) for i0 in range(10)] +#a = Vector(comm, a_list) +a = Vector(a_list) +# computation +x = -a +x_str = str(x) +if rank == 0: + print(f"-a = {x_str} | must be {-np.array(a_list)}") + print("End 1h\n") +comm.Barrier() + +### 1i manipulation +if rank == 0: + print("Start 1i manipulation") +# intitialization +n_a = 10 +a_list = [1 / (i0 + 1) for i0 in range(n_a)] +#a = Vector(comm, a_list) +a = Vector(a_list) +a_idx = [1, 2, 9, 7, 8] +a_values = a[a_idx] +if rank == 0: + print( + f"a[{str(a_idx)}] = {str(a_values)} | must be {np.array(a_list).reshape(n_a, 1)[np.array(a_idx)].reshape(len(a_idx), )}") +a[a_idx] = [-1, -1, -1, -1, -1] +a_str = str(a) +np_a = np.array(a_list) +np_a[a_idx] = [-1, -1, -1, -1, -1] +if rank == 0: + print(f"a = {a_str} | must be {np_a}") + print("End 1i\n") +comm.Barrier() + +""" +############### +# Testing the matrix class +comm.Barrier() +if rank == 0: + print("\n\nTesting the matrix class -----------------------------------\n\n") +############### + + +### 2a Initialization +if rank == 0: + print("Start 2a initialization") +n_a1 = 10 +n_a2 = 5 +a_list = np.array([[(i0 + 1) * (i1 + 1) for i0 in range(n_a1)] for i1 in range(n_a2)]) +A = Matrix(comm, a_list) +B = Matrix(comm, structure="tridiagonal", size=12) +c_list = [i0 for i0 in range(n_a1 * n_a1)] +C = Matrix(comm, c_list, shape=(n_a1, n_a1)) +D = Matrix(comm, model="sheet1ex1", size=50) +if rank == 0: + print("End 2a\n") +comm.Barrier() + +### 2b __str__ function, string representation +if rank == 0: + print("Start 2b string representation") +# print(B.__str__(full = True)) +A_str = str(A) +B_str = str(B) +C_str = str(C) +D_str = str(D) +if rank == 0: + print(f"Matrix A (numbers):\n{A_str}") + print(f"Matrix B (tridiagonal):\n{B_str}") + print(f"Matrix C (list of numbers):\n{C_str}") + print(f"Matrix D (sheet1ex1):\n{D_str}") + print("End 2b\n") +comm.Barrier() + +### 2c shape and transpose +if rank == 0: + print("Start 2c shape and transpose") +# Initialization +A_shape = A.shape() +A_T_shape = A.T().shape() +A_T_T_shape = A.T().T().shape() +if rank == 0: + print(f"A has shape {A_shape} | must be {np.array(a_list).shape}") + print(f"A.T() has shape {A_T_shape} | must be {np.array(a_list).T.shape}") + print(f"A.T().T() has shape {A_T_T_shape} | must be {np.array(a_list).T.T.shape}") + print("End 2c\n") +comm.Barrier() + +# ### DEBUG norms ### +# mat = np.array([[(i0+1)*(i1+1) for i0 in range(10)] for i1 in range(5)]) +# local_max_each_row = np.amax(np.abs(mat),axis=1) +# if rank == 0: +# print(f"Matrix\n{str(mat)}\nlocal max each row {str(local_max_each_row)}") +# # frobenius norm +# A = Matrix(comm,mat) +# a_norm_fro = A.norm("frobenius") +# if rank == 0: +# print(f"A.norm('frobenius') = {a_norm_fro} | must be {np.linalg.norm(np.array([[(i0+1)*(i1+1) for i0 in range(10)] for i1 in range(5)]),'fro')}") +# # row sum norm +# a_norm_row = A.norm("row sum") +# if rank == 0: +# print(f"A.norm('row sum') = {a_norm_row} | must be {np.max(local_max_each_row)}") +# # col sum norm + + +### 2d addition and substraction +if rank == 0: + print("Start 2d addition and substraction") +# Initialization +n = 10 +A = Matrix(comm, structure="diagonal", diag_values=[3], offset=0, size=n) +A21 = Matrix(comm, structure="diagonal", diag_values=[-1], offset=-1, size=n) +A12 = Matrix(comm, structure="diagonal", diag_values=[-1], offset=+1, size=n) +B = Matrix(comm, structure="diagonal", diag_values=[1], offset=0, size=n) +# computation +C = A + A21 + A12 - B +D = C - Matrix(comm, structure='tridiagonal', diag_values=[-1, 2, -1], size=n) +d_norm = D.norm() +A_str = str(5 + A - 3) +if rank == 0: + print(f"norm(A + A21 + A12 - B - tridiag) = {d_norm} | must be < 1e-8") + print(f"5+A-3 = \n{A_str}") + print("End 2d\n") +comm.Barrier() + +### 2e multiplication +if rank == 0: + print("Start 2e multiplication") +# initialization +n = 10 +a_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(8)] for i0 in range(n)]) +b_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(n)] for i0 in range(8)]) +c_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(n)] for i0 in range(n)]) +d_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(17)] for i0 in range(n)]) +A = Matrix(comm, a_mat) +B = Matrix(comm, b_mat) +C = Matrix(comm, c_mat) +D = Matrix(comm, d_mat) +x_vec = np.array([i0 + 1 for i0 in range(n)]) +y_vec = np.array([n * (i0 + 1) for i0 in range(n)]) +x = Vector(comm, x_vec) +y = Vector(comm, y_vec) +# computation matrix scalar +norm5 = (5 * A - Matrix(comm, 5 * np.array(a_mat))).norm() +# computation matrix vector +norm6 = (C * x - Vector(comm, np.array(c_mat) @ np.array(x_vec))).norm() +norm7 = (D.T() * x - Vector(comm, np.array(d_mat).T @ np.array(x_vec))).norm() +y_shape = (D.T() * x).shape() +if rank == 0: + print(f"Norm of (5*A - np.(5*A)) is {norm5} | must be < 1e-8") + print(f"Norm of (C*x - np.(C*x)) is {norm6} | must be < 1e-8") + print(f"Norm of (D.T*x - np.(D.T*x)) is {norm7} | must be < 1e-8 | shape(D.T*x) is {y_shape} | must be (17,1)") +# computation matrix multiplication +A_str = str(A) +B_str = str(B) +if rank == 0: + print(f"DEBUG: A\n{A_str}\nDEBUG: B\n{B_str}") +norm1 = (A * B - Matrix(comm, np.array(a_mat) @ np.array(b_mat))).norm() +norm2 = (A.T() * A - Matrix(np.array(a_mat).T @ np.array(a_mat))).norm() +norm3 = (A * A.T() - Matrix(np.array(a_mat) @ np.array(a_mat).T)).norm() +norm4 = (B.T() * A.T() - Matrix(np.array(b_mat).T @ np.array(a_mat).T)).norm() +if rank == 0: + print(f"Norm of (A*B - np.(A*B)) is {norm1} | must be < 1e-8") + print(f"Norm of (A.T()*A - np(A.T()*A)) is {norm2} | must be < 1e-8") + print(f"Norm of (A*A.T() - np(A*A.T())) is {norm3} | must be < 1e-8") + print(f"Norm of (B.T()*A.T() - np.(B.T()*A.T())) is {norm4} | must be < 1e-8") + print("End 2e\n") +comm.Barrier() + +''' +### 2f divison +if rank == 0: + print("Start 2f divison") +# initialization +A = Matrix(a_mat) +# computation +print(f"Norm of (A/5 - np.(A/5)) is {(A/5 - Matrix(np.array(a_mat) / 5)).norm()} | must be < 1e-8") +if rank == 0: + print("End 2f\n") +comm.Barrier() + +### 2g norm +if rank == 0: + print("Start 2g norm") +A = Matrix(structure="tridiagonal",given_size=50,given_values=[-1,2,-1]) +print(f"Frobenius norm of tridiagonal matrix: {A.norm('frobenius')} | must be 17.263") +print(f"Row sum norm of tridiagonal matrix: {A.norm('row sum')} | must be 2") +print(f"Col sum norm of tridiagonal matrix: {A.norm('col sum')} | must be 2") +if rank == 0: + print("End 2g\n") +comm.Barrier() + +### 2h negation +if rank == 0: + print("Start 2h negation") +A = Matrix(structure="tridiagonal", given_size=50, given_values=[-1,2,1]) +print(f"Norm of (A + (-A)) is {(A + (-A)).norm('frobenius')} | must be < 1e-8") +if rank == 0: + print("End 2h\n") +comm.Barrier() + +### 2i manipulation +if rank == 0: + print("Start 2i manipulation") +A = Matrix(structure="tridiagonal", given_size=10, given_values=[-1,2,1]) +A[1,1] = 4 +A[[1,2,3],2] = [-5,-10,100] +print(str(A)) +if rank == 0: + print("End 2i\n") +comm.Barrier() + +''' +""" \ No newline at end of file From 6848bc7b26534110382ec21416f45f8f9e1150eb Mon Sep 17 00:00:00 2001 From: Niklas Birk Date: Tue, 16 Apr 2024 23:21:56 +0200 Subject: [PATCH 3/6] Finalize matrix_mpi.py; Add more vector_mpi.py --- src/main.py | 24 ++- src/matrix.py | 11 +- src/matrix_mpi.py | 50 ++--- src/vector.py | 13 +- src/vector_mpi.py | 452 ++++++++---------------------------------- test/test_matrix.py | 45 +++++ test/test_parallel.py | 179 +++++++---------- test/test_vector.py | 19 ++ 8 files changed, 291 insertions(+), 502 deletions(-) diff --git a/src/main.py b/src/main.py index 3bdee1e..1e4eba8 100644 --- a/src/main.py +++ b/src/main.py @@ -1,6 +1,26 @@ # m1 = MatrixMPI(numpy.random.uniform(0, 1, 1_000_000), (1000, 1000)) + +from mpi4py import MPI from matrix_mpi import MatrixMPI +from vector_mpi import VectorMPI -m1 = MatrixMPI([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6], (5, 3)) +comm = MPI.COMM_WORLD +rank = comm.Get_rank() +size = comm.Get_size() -print(m1 + m1) +m1 = MatrixMPI(list(range(1, 21)), (4, 5)) +m2 = MatrixMPI(list(range(1, 16)), (5, 3)) + +m_mul = m1 * m2 + +v1 = VectorMPI(list(range(1, 21))) +v2 = VectorMPI(list(reversed(list(range(1, 21))))) + +v_add = v1 + v2 +v_mul = v1.T() * v2 + +if rank == 0: + print(m_mul) + print("---") + print(v_add) + print(v_mul) diff --git a/src/matrix.py b/src/matrix.py index eeeda3a..f851a90 100644 --- a/src/matrix.py +++ b/src/matrix.py @@ -210,10 +210,19 @@ class Matrix: else: raise ValueError("A ``Matrix`` can only be divided ba a number") + def __mul_rowmatrix_matrix__internal__(self, other): + cols = other.__shape__[1] + new_data = [0] * cols + for i in range(cols): + new_data[i] = sum([self.__data__[0][j] * other.__data__[j][i] for j in range(self.__shape__[1])]) + return new_data + def __mul_matrix_internal__(self, other): + if self.__shape__[0] == 1: + return self.__mul_rowmatrix_matrix__internal__(other) rows = self.__shape__[0] cols = other.__shape__[1] - new_data = [([0] * rows) for _ in range(cols)] + new_data = [([0] * cols) for _ in range(rows)] for i in range(rows): for k in range(cols): new_data[i][k] = sum([self.__data__[i][j] * other.__data__[j][k] for j in range(self.__shape__[1])]) diff --git a/src/matrix_mpi.py b/src/matrix_mpi.py index cafe968..e1b3396 100644 --- a/src/matrix_mpi.py +++ b/src/matrix_mpi.py @@ -5,21 +5,17 @@ from matrix import Matrix class MatrixMPI: - __mpi_comm__ = None - __mpi_size__ = None - __mpi_rank__ = None + __mpi_comm__ = MPI.COMM_WORLD + __mpi_size__ = __mpi_comm__.Get_size() + __mpi_rank__ = __mpi_comm__.Get_rank() - __matrix__: Matrix = None + __data__: Matrix = None __chunk__: list = None - def __init__(self, mpi_comm, data=None, shape=None, structure=None, model=None, offset=None, n=None): - self.__mpi_comm__ = mpi_comm - self.__mpi_size__ = self.__mpi_comm__.Get_size() - self.__mpi_rank__ = self.__mpi_comm__.Get_rank() + def __init__(self, data=None, shape=None, structure=None, model=None, offset=None, n=None): + self.__data__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n) - self.__matrix__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n) - - total_amount_of_rows = self.__matrix__.shape()[0] + total_amount_of_rows = self.__data__.shape()[0] chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__mpi_size__) self.__chunk__ = chunks[self.__mpi_rank__].tolist() @@ -28,18 +24,24 @@ class MatrixMPI: return MatrixMPI(matrix.get_data(), matrix.shape()) def __str__(self): - return str(self.__matrix__) + return str(self.__data__) + + def shape(self): + return self.__data__.shape() def get_rank_submatrix(self): rows = len(self.__chunk__) - cols = self.__matrix__.shape()[1] - return Matrix(self.__matrix__[self.__chunk__], (rows, cols)) + cols = self.__data__.shape()[1] + return Matrix(self.__data__[self.__chunk__], (rows, cols)) + + def get_data(self): + return self.__data__ def transpose(self): """ :return: the transpose of the matrix """ - return MatrixMPI.of(self.__matrix__.transpose()) + return MatrixMPI.of(self.__data__.transpose()) def T(self): """ @@ -57,9 +59,9 @@ class MatrixMPI: :return: True if data in the matrix are equal to the given data in other for each component, otherwise False """ if isinstance(other, MatrixMPI): - return self.__matrix__ == other.__matrix__ + return self.__data__ == other.__data__ else: - return self.__matrix__ == other + return self.__data__ == other def __neg__(self): gathered_data = self.__mpi_comm__.gather(-self.get_rank_submatrix()) @@ -89,9 +91,10 @@ class MatrixMPI: def __mul__(self, other): if isinstance(other, MatrixMPI): - return self.__matrix__ * other.__matrix__ - else: - return self.__matrix__ * other + other = other.get_data() + gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() * other) + data = self.__mpi_comm__.bcast(gathered_data) + return MatrixMPI.of(Matrix.flatten(data)) def __rmul__(self, other): return self * other @@ -107,11 +110,10 @@ class MatrixMPI: :return: the norm as a number """ - return self.__matrix__.norm(f) + return self.__data__.norm(f) def __getitem__(self, key): - return self.__matrix__[key] + return self.__data__[key] def __setitem__(self, key, value): - self.__matrix__[key] = value - + self.__data__[key] = value diff --git a/src/vector.py b/src/vector.py index c8bf6a8..7f0ea93 100644 --- a/src/vector.py +++ b/src/vector.py @@ -86,7 +86,7 @@ class Vector(Matrix): return Vector(self.__mul_vector_same_shape_internal__(other)) elif self.__shape__ == tuple(reversed(other.__shape__)): if self.__shape__[0] == 1: # Case (_ ... _) * (_\n...\n_) = scalar - return super().__mul_matrix_internal__(other)[0][0] + return super().__mul_matrix_internal__(other)[0] else: # Case (_\n...\n_) * (_ ... _) = Matrix new_data, shape = self.__mul_tensor_internal__(other) return Matrix(new_data, shape) @@ -95,10 +95,19 @@ class Vector(Matrix): elif isinstance(other, int) or isinstance(other, float): return Vector(super().__mul_scalar_internal__(other)) else: - raise ValueError("A ``Vector`` can only be multiplied with an ``Vector`` (dot product or tensor)," + raise ValueError("A ``Vector`` can only be multiplied with an ``Vector`` (dot product or tensor), " "a compatible ``Matrix`` or a scalar") + def __mul_matrix_vector_internal__(self, other): + rows = other.__shape__[0] + new_data = [0] * rows + for i in range(rows): + new_data[i] = sum([other.__data__[i][j] * self.__data__[j][0] for j in range(self.__shape__[0])]) + return new_data + def __rmul__(self, other): + if isinstance(other, Matrix): + return Vector(self.__mul_matrix_vector_internal__(other)) return self * other def __truediv_vector_internal__(self, other): diff --git a/src/vector_mpi.py b/src/vector_mpi.py index ceee57b..795d1bb 100644 --- a/src/vector_mpi.py +++ b/src/vector_mpi.py @@ -1,378 +1,88 @@ -import numpy -from mpi4py import MPI +from matrix_mpi import MatrixMPI from vector import Vector -class VectorMPI: - __mpi_comm__ = None - __mpi_size__ = None - __mpi_rank__ = None +class VectorMPI(MatrixMPI): + __data__: Vector = None - __vector__: Vector = None + def __init__(self, data=None, shape=None): + self.__data__ = Vector(data=data, shape=shape) - def __init__(self, mpi_comm, data=None, shape=None): - self.__mpi_comm__ = mpi_comm - self.__mpi_size__ = self.__mpi_comm__.Get_size() - self.__mpi_rank__ = self.__mpi_comm__.Get_rank() + @staticmethod + def of(vector: Vector): + return VectorMPI(vector.get_data(), vector.shape()) - self.__vector__ = Vector(data=data, shape=shape) + def get_vector(self): + return self.__data__ -# class Vector: -# start_idx = 0 # Nullter Eintrag des Vektors auf dem aktuellen Rang -# end_idx = 0 # Letzer Eintrag des Vektors auf dem aktuellen Rang -# rank_size = 0 # Dimension des Vektors der auf dem aktuellen Rang gespeichert wird -# kind = '' # Art des Vektors, Zeilen oder Spaltenvektor -# vec = np.arange(rank_size) -# vshape = np.arange(2) # Array mit Länge 2, um die Shape des Vektors zu speichern -# dim = 0 # Gesamtdimension des Vektors, Länge des Vektors -# -# comm = MPI.COMM_WORLD -# size = comm.Get_size() -# rank = comm.Get_rank() -# -# # Konstruktor -# def __init__(self, array): -# -# if isinstance(array, np.ndarray): -# form = array.shape -# if len(array) < self.size: -# raise ValueError("ERROR_3: Die Dimension des Vektors ist kleiner als die Anzahl der benutzten Ränge.") -# -# if len(form) > 2: -# raise ValueError("ERROR_2: Falsche Dimension, kann kein 1 x n oder n x 1 Vektor sein.") -# # Array ist Zeilenvektor: -# if len(form) == 1 or (len(form) == 2 and form[0] == 1): -# self.vshape[0] = 1 -# self.vshape[1] = len(array) # Shape des Vectors -# self.start_idx = int(self.rank * len(array) / self.size) -# self.end_idx = int(len(array) / self.size + self.rank * len(array) / self.size) - 1 -# self.rank_size = ( -# self.end_idx - self.start_idx) + 1 # Größe des Teilvektors auf dem akt. Rang: Differenz zw. Start- und Endindex + 1 -# self.vec = array[ -# self.start_idx: self.end_idx + 1] # Auf jedem Rang werden die Einträge vom Start bis zum Endindex gespeichert -# self.kind = 'row' -# self.dim = len(array) -# -# # Array ist Spaltenvektor -# if len(form) == 2 and form[1] == 1: -# self.vshape[0] = len(array) -# self.vshape[1] = 1 -# self.start_idx = int(self.rank * len(array) / self.size) -# self.end_idx = int(len(array) / self.size + self.rank * len(array) / self.size) - 1 -# self.rank_size = ( -# self.end_idx - self.start_idx) + 1 # Größe des Teilvektors auf dem akt. Rang: Differenz zw. Start- und Endindex + 1 -# self.vec = array[ -# self.start_idx: self.end_idx + 1] # Auf jedem Rang werden die Einträge vom Start bis zum Endindex gespeichert -# self.kind = 'column' -# self.dim = len(array) -# -# elif isinstance(array, list): -# self.vshape[0] = 1 -# self.vshape[1] = len(array) -# self.start_idx = int(self.rank * len(array) / self.size) -# self.end_idx = int(len(array) / self.size + self.rank * len(array) / self.size) - 1 -# self.rank_size = (self.end_idx - self.start_idx) + 1 -# self.vec = np.array(array[self.start_idx:self.end_idx + 1]) -# self.kind = 'row' -# self.dim = len(array) -# else: -# raise ValueError( -# "ERROR_1: Die übergebene Variable ist kein Numpy-Array, Keine Initialisierung der Vector-Klasse möglich.") -# -# def __add__(self, other): # Überschreibung der Addition -# if isinstance(self, Vector) and isinstance(other, Vector): -# Add_Vec = Vector(np.arange(self.dim)) -# if self.vshape[0] == other.vshape[0] and self.vshape[1] == other.vshape[1]: -# for i in range(0, self.rank_size): -# Add_Vec.vec[i] = self.vec[i] + other.vec[i] -# else: -# raise ValueError("Die Dimensionen der Vektoren stimmen nicht überein, Addition nicht möglich.") -# elif isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# Add_Vec = Vector(np.arange(self.dim)) -# for i in range(0, self.rank_size): -# Add_Vec.vec[i] = self.vec[i] + other -# elif isinstance(self, (int, float, complex)) and isinstance(other, Vector): -# Add_Vec = Vector(np.arange(other.dim)) -# for i in range(0, other.rank_size): -# Add_Vec.vec[i] = other.vec[i] + self -# else: -# raise ValueError("Ungeeigneter Datentyp für die Addition mit einem Vektor.") -# return Add_Vec -# -# def __radd__(self, other): # Überschreibung der Addition eines Vektors von rechts -# Vector(np.arange(self.dim)) -# if isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# Add_Vec = Vector(np.arange(self.dim)) -# for i in range(0, self.rank_size): -# Add_Vec.vec[i] = self.vec[i] + other -# else: -# raise ValueError("Ungeeigneter Datentyp für die Addition mit einem Vektor.") -# return Add_Vec -# -# def __sub__(self, other): # Überschreibung der Subtraktion -# if isinstance(self, Vector) and isinstance(other, Vector): -# Sub_Vec = Vector(np.arange(self.dim)) -# if self.vshape[0] == other.vshape[0] and self.vshape[1] == other.vshape[1]: -# for i in range(0, self.rank_size): -# Sub_Vec.vec[i] = self.vec[i] - other.vec[i] -# else: -# raise ValueError("Die Dimension der Vektoren stimmen nicht überein, Subtraktion nicht möglich.") -# elif isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# Sub_Vec = Vector(np.arange(self.dim)) -# for i in range(0, self.rank_size): -# Sub_Vec.vec[i] = self.vec[i] - other -# elif isinstance(self, (int, float, complex)) and isinstance(other, Vector): -# Sub_Vec = Vector(np.arange(self.dim)) -# for i in range(0, other.rank_size): -# Sub_Vec.vec[i] = other.vec[i] - self -# else: -# raise ValueError("Ungeeigneter Datentyp für die Subtraktion mit einem Vektor.") -# return Sub_Vec -# -# def __rsub__(self, other): # Subtraktion einer Zahl von einem Vektor -# Sub_Vec = Vector(np.arange(self.dim)) -# if isinstance(self, Vector) and isinstance(other, (float, int, complex)): -# for i in range(0, self.rank_size): -# Sub_Vec.vec[i] = self.vec[i] - other -# else: -# raise ValueError("Ungeeigneter Datentyp für die Subtraktion von einem Vektor.") -# return Sub_Vec -# -# def __mul__(self, other): # Überschreibung der Multiplikation -# if isinstance(self, Vector) and isinstance(other, Vector): -# Mult_Vec = Vector(np.arange(self.dim)) -# if (self.vshape[0] == other.vshape[0] and self.vshape[1] == other.vshape[ -# 1]): # Elementweise Multiplikation -# for i in range(0, self.rank_size): -# Mult_Vec.vec[i] = self.vec[i] * other.vec[i] -# elif self.vshape[1] == other.vshape[0] and self.vshape[0] == 1: # Inneres Produkt (Skalarprodukt) -# skal_prod = 0 -# for i in range(0, self.rank_size): -# skal_prod = skal_prod + self.vec[i] * other.vec[i] -# return skal_prod -# elif self.vshape[0] == other.vshape[1] and self.vshape[1] == 1: -# raise ValueError("Kann erst implementiert werden, wenn Matrix-Klasse existiert.") -# else: -# raise ValueError("Die Dimensionen der Vektoren stimmen nicht überein, Multiplikation nicht möglich.") -# elif isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# Mult_Vec = Vector(np.arange(self.dim)) -# for i in range(0, self.rank_size): -# Mult_Vec.vec[i] = self.vec[i] * other -# elif isinstance(self, (int, float, complex)) and isinstance(other, Vector): -# Mult_Vec = Vector(np.arange(self.dim)) -# for i in range(0, other.rank_size): -# Mult_Vec.vec[i] = other.vec[i] * self -# else: -# raise ValueError("Ungeeigneter Datentyp für die Multiplikation mit einem Vektor.") -# return Mult_Vec -# -# def __rmul__(self, other): # Rechtsseitige Multiplikation von einer Zahl an einen Vektor -# Mult_Vec = Vector(np.arange(self.dim)) -# if isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# for i in range(0, self.rank_size): -# Mult_Vec.vec[i] = self.vec[i] * other -# else: -# raise ValueError("Ungeeigneter Datentyp für die Multiplikation mit einem Vektor.") -# return Mult_Vec -# -# def __truediv__(self, other): -# Div_Vec = Vector(np.arange(self.dim, dtype=np.double)) -# if isinstance(self, Vector) and isinstance(other, Vector): -# if self.vshape[0] == other.vshape[0] and self.vshape[1] == other.vshape[1]: -# for i in range(0, self.rank_size): -# if other.vec[i] == 0: -# raise ValueError("Ein Eintrag des Divisor-Vektors ist 0, Divion nicht möglich.") -# Div_Vec.vec[i] = self.vec[i] / other.vec[i] -# else: -# raise ValueError("Die Dimensionen der Vektoren stimmen nicht überein, Division nicht möglich.") -# elif isinstance(self, (int, float, complex)) and isinstance(other, Vector): -# for i in range(0, other.rank_size): -# if other.vec[i] == 0: -# raise ValueError("Ein Eintrag des Divisor-Vektors ist 0, Divion nicht möglich.") -# Div_Vec.vec[i] = self / other.vec[i] -# elif isinstance(self, Vector) and isinstance(other, (float, int, complex)): -# if other == 0: -# raise ValueError("Division durch Null ist nicht möglich.") -# else: -# for i in range(0, self.rank_size): -# Div_Vec.vec[i] = self.vec[i] / other -# else: -# raise ValueError("ERROR 11: Ungeeigneter Datentyp für die Division mit einem Vektor.") -# return Div_Vec -# -# def __rtruediv__(self, other): -# Div_Vec = Vector(np.arange(self.dim, dtype=np.double)) -# if isinstance(self, Vector) and isinstance(other, (float, int, complex)): -# if other == 0: -# raise ValueError("Division durch Null ist nicht möglich.") -# else: -# for i in range(0, self.rank_size): -# Div_Vec.vec[i] = self.vec[i] / other -# else: -# raise ValueError("ERROR 10: Uneignete Datentyp, um einen Vektor durch diesen zu dividieren") -# return Div_Vec -# -# def __neg__(self): -# Neg_Vec = -1 * self -# return Neg_Vec -# -# def shape(self): -# if self.rank == 0: -# return self.vshape -# -# def T(self): -# Transpose = self -# if self.kind == 'row': -# Transpose.kind = 'column' -# else: -# Transpose.kind = 'row' -# -# # Tauschen der Dimensionen -# var_shift = self.vshape[0] -# Transpose.vshape[0] = self.vshape[1] -# Transpose.vshape[1] = var_shift -# return Transpose -# -# def str(self): # Rückgabe des gesamten Vektors als string -# str_rep = '' -# -# if self.rank == 0: -# if self.kind == 'row': -# str_rep = '[' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = '[' + '\n'.join(map(str, self.vec)) -# if self.size > 1: -# self.comm.send(str_rep, dest=self.rank + 1) -# -# elif self.rank == self.size - 1: -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join(map(str, self.vec)) -# -# else: -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join(map(str, self.vec)) -# self.comm.send(str_rep, dest=self.rank + 1) -# -# str_rep = self.comm.bcast(str_rep, root=self.size - 1) -# if self.rank == 0: -# return str_rep + ']' -# -# def string(self, limit_entry): # Gibt den Vektor als String zurück bis zum Eintrag limit_entry -# str_rep = '' -# -# if limit_entry > self.vec_size: -# raise ValueError("ERROR_4: Die eingegebene Zahl ist größer, als der größte Index des Vectors.") -# -# # Rank 0 -# if self.rank == 0 and limit_entry <= self.end_idx: # Limit_entry befindet sich im Rang 0 -# if self.kind == 'row': -# str_rep = '[' + ','.join(map(str, self.vec[:limit_entry])) -# if self.kind == 'column': -# str_rep = '[' + '\n'.join(map(str, self.vec[:limit_entry])) -# if self.size > 1: -# self.comm.send(str_rep, dest=self.rank + 1) -# if self.rank == 0 and limit_entry > self.end_idx: # Limit_entry befindet sich nicht im Rang 0 -# if self.kind == 'row': -# str_rep = '[' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = '[' + '\n'.join(map(str, self.vec)) -# if self.size > 1: -# self.comm.send(str_rep, dest=self.rank + 1) -# -# # Rank im Intervall [1,size-1] -# if ( -# 0 < self.rank < self.size - 1 and limit_entry <= self.start_idx): # wenn lim_ent == start_idx, dann wurden bereits alle relevanten Indizes im String gespeichert, da Vector nullinitialisiert ist -# str_rep = self.comm.recv(source=self.rank - 1) -# self.comm.send(str_rep, dest=self.rank + 1) -# if ( -# 0 < self.rank < self.size - 1 and self.start_idx < limit_entry <= self.end_idx): -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join( -# map(str, self.vec[:(limit_entry - self.start_idx)])) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join( -# map(str, self.vec[:(limit_entry - self.start_idx)])) + ']' -# self.comm.send(str_rep, dest=self.rank + 1) -# if 0 < self.rank < self.size - 1 and limit_entry > self.end_idx: -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join(map(str, self.vec)) -# self.comm.send(str_rep, dest=self.rank + 1) -# -# # Rank size-1 -# if self.rank == self.size - 1 and limit_entry <= self.start_idx and self.rank > 1: -# str_rep = self.comm.recv(source=self.rank - 1) -# -# if self.rank == self.size - 1 and limit_entry >= self.start_idx and self.rank > 1: -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join( -# map(str, self.vec[:(limit_entry - self.start_idx)])) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join( -# map(str, self.vec[:(limit_entry - self.start_idx)])) -# -# str_rep = self.comm.bcast(str_rep, root=self.size - 1) -# if self.rank == 0: -# return str_rep + ']' -# -# def norm(self): # Berechnung der 2-Norm / euklidischen Norm -# 0 -# sum_of_squares = 0 -# if self.rank == 0: -# for i in range(0, self.rank_size): -# sum_of_squares = sum_of_squares + self.vec[i] ** 2 -# -# if self.size > 1: -# self.comm.send(sum_of_squares, dest=self.rank + 1) -# -# elif self.rank == self.size - 1: -# sum_of_squares = self.comm.recv(source=self.rank - 1) -# for i in range(0, self.rank_size): -# sum_of_squares = sum_of_squares + self.vec[i] ** 2 -# -# else: -# sum_of_squares = self.comm.recv(source=self.rank - 1) -# for i in range(0, self.rank_size): -# sum_of_squares = sum_of_squares + self.vec[i] ** 2 -# self.comm.send(sum_of_squares, dest=self.rank + 1) -# -# sum_of_squares = self.comm.bcast(sum_of_squares, root=self.size - 1) -# norm = np.sqrt(sum_of_squares) -# -# return norm -# -# def normalize(self): # Normalisierung eines Vectors -# norm = self.norm() -# if norm == 0: -# return self -# normalized_vec = self / norm -# return normalized_vec -# -# -# # Main-Funktion -# x = Vector(np.arange(10)) -# print(x.str(), x.shape()) -# print(x.vshape[0], x.vshape[1]) -# minus = -1 * x -# _x = -x -# print(_x.str()) -# print(minus.str()) -# y = Vector(2 * np.arange(10)) -# print(y.str()) -# z = x - y -# print(z.str()) -# ae = x + 5 -# print(ae.str()) -# o = x * y -# print(o.str()) -# -# a = Vector(np.array([[1], [2]])) -# b = Vector(np.array([1, 2])) -# print(a.shape()) -# # c = a * b -# # print(c.vec) + def shape(self): + return self.__data__.shape() + + def __eq__(self, other): + """ + Return ``self==value`` + + :param other: The object to compare to; must be either a ``Vector``, a ``list`` or a ``numpy.ndarray`` + :return: True if data in the same-shaped vectors are equal to the given data in other for each component otherwise False + """ + if isinstance(other, VectorMPI): + return self.__data__ == other.__data__ + else: + return self.__data__ == other + + def transpose(self): + """ + :return: the transpose of the vector + """ + return VectorMPI.of(self.__data__.transpose()) + + def T(self): + return self.transpose() + + def __neg__(self): + return VectorMPI.of(-self.__data__) + + def __add__(self, other): + if isinstance(other, VectorMPI): + other = other.__data__ + return VectorMPI.of(self.__data__ + other) + + def __mul__(self, other): + if isinstance(other, VectorMPI): + other = other.__data__ + result = self.__data__ * other + return VectorMPI.of(result) if isinstance(result, Vector) else result + + def __rmul__(self, other): + if isinstance(other, MatrixMPI): + return VectorMPI.of(other.get_data() * self.get_vector()) + return self * other + + def __truediv__(self, other): + if isinstance(other, VectorMPI): + other = other.__data__ + return VectorMPI.of(self.__data__ / other) + + def norm(self, **kwargs): + """ + Computes the 2-norm of the vector which is the Frobenius-Norm of a nx1 matrix. + + :param kwargs: ignored + :return: the 2-norm of the vector + """ + return self.__data__.norm() + + def normalize(self): + """ + A normalized vector has the length (norm) 1. + To achieve that the vector is divided by the norm of itself. + + :return: the normalized vector + """ + return VectorMPI.of(self.__data__ / self.norm()) + + def __getitem__(self, key): + return self.__data__[key] + + def __setitem__(self, key, value): + self.__data__[key] = value diff --git a/test/test_matrix.py b/test/test_matrix.py index a41bb28..a6b22d8 100644 --- a/test/test_matrix.py +++ b/test/test_matrix.py @@ -230,6 +230,51 @@ class TestMatrix(TestCase): self.assertEqual(expected, actual) + def test_should_mul_matrices_4(self): + m1 = Matrix([1] * 6, (2, 3)) + m2 = Matrix([1] * 15, (3, 5)) + + actual = m1 * m2 + expected = Matrix([3] * 10, (2, 5)) + + self.assertEqual(expected, actual) + + def test_should_mul_matrices_5(self): + m1 = Matrix([1] * 15, (3, 5)) + m2 = Matrix([1] * 20, (5, 4)) + + actual = m1 * m2 + expected = Matrix([5] * 12, (3, 4)) + + self.assertEqual(expected, actual) + + def test_should_mul_matrices_6(self): + m1 = Matrix([1] * 15, (5, 3)) + m2 = Matrix([1] * 12, (3, 4)) + + actual = m1 * m2 + expected = Matrix([3] * 20, (5, 4)) + + self.assertEqual(expected, actual) + + def test_should_mul_matrices_7(self): + m1 = Matrix(list(range(1, 21)), (4, 5)) + m2 = Matrix(list(range(1, 16)), (5, 3)) + + actual = m1 * m2 + expected = Matrix([135, 150, 165, 310, 350, 390, 485, 550, 615, 660, 750, 840], (4, 3)) + + self.assertEqual(expected, actual) + + def test_should_mul_vector_matrix(self): + m1 = Matrix([1, 2, 3], (1, 3)) + m2 = Matrix([6, 5, 4, 3, 2, 1], (3, 2)) + + actual = m1 * m2 + expected = Matrix([20, 14], (1, 2)) + + self.assertEqual(expected, actual) + def test_should_raise_shape_missmatch_error_while_multiplying_matrices(self): m1 = Matrix([1, 2], (2, 1)) m2 = Matrix([3, 4], (2, 1)) diff --git a/test/test_parallel.py b/test/test_parallel.py index eecab5e..8a81792 100644 --- a/test/test_parallel.py +++ b/test/test_parallel.py @@ -1,8 +1,8 @@ import numpy as np from mpi4py import MPI -from matrix import Matrix -from vector import Vector +from matrix_mpi import MatrixMPI +from vector_mpi import VectorMPI np.random.seed(0) ############### @@ -27,13 +27,11 @@ if rank == 0: n_x1 = 10 x1_list = [i0 for i0 in range(n_x1)] # must work for 4 ranks, too, so even if entries are not evenly distributable between ranks -#vec_x1 = Vector(comm, x1_list) -vec_x1 = Vector(x1_list) +vec_x1 = VectorMPI(x1_list) # from numpy array n_x2 = 10000 x2_list = np.random.uniform(0, 1, n_x2) -#vec_x2 = Vector(comm, x2_list) -vec_x2 = Vector(x2_list) +vec_x2 = VectorMPI(x2_list) if rank == 0: print("End 1a\n") comm.Barrier() @@ -64,25 +62,21 @@ if rank == 0: print("End 1c\n") comm.Barrier() -### 1d addition and substraction +### 1d addition and subtraction if rank == 0: - print("Start 1d addition and substraction") -# intitialization + print("Start 1d addition and subtraction ") +# initialization n_a = 10 n_b = 10 a_list = [i0 for i0 in range(n_a)] b_list = [1 / (i0 + 1) for i0 in range(n_b)] -#a = Vector(comm, a_list) -a = Vector(a_list) -#b = Vector(comm, b_list) -b = Vector(b_list) +a = VectorMPI(a_list) +b = VectorMPI(b_list) # computation x = a + b y = a - b -#numpy_x_compare_norm = (x - Vector(comm, np.array(a_list) + np.array(b_list))).norm() -numpy_x_compare_norm = (x - Vector(np.array(a_list) + np.array(b_list))).norm() -#numpy_y_compare_norm = (y - Vector(comm, np.array(a_list) - np.array(b_list))).norm() -numpy_y_compare_norm = (y - Vector(np.array(a_list) - np.array(b_list))).norm() +numpy_x_compare_norm = (x - VectorMPI(np.array(a_list) + np.array(b_list))).norm() +numpy_y_compare_norm = (y - VectorMPI(np.array(a_list) - np.array(b_list))).norm() if rank == 0: print(f"norm(a + b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") print(f"norm(a - b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") @@ -109,23 +103,19 @@ comm.Barrier() ### 1e multiplication if rank == 0: print("Start 1e multiplication") -# intitialization +# initialization n_a = 10 n_b = 10 a_list = [i0 for i0 in range(n_a)] b_list = [1 / (i0 + 1) for i0 in range(n_b)] -#a = Vector(comm, a_list) -a = Vector(a_list) -#b = Vector(comm, b_list) -b = Vector(b_list) +a = VectorMPI(a_list) +b = VectorMPI(b_list) # computation with vectors x = a.T() * b # scalar y = a * b # vector -# z = b * a.T() # matrix numpy_x_compare_norm = np.linalg.norm(x - np.sum(np.array(a_list) * np.array(b_list))) -#numpy_y_compare_norm = (y - Vector(comm, np.array(a_list) * np.array(b_list))).norm() -numpy_y_compare_norm = (y - Vector(np.array(a_list) * np.array(b_list))).norm() -# numpy_z_compare_norm = (z - Matrix(comm,np.outer(np.array(a_list),np.array(b_list)))).norm() +numpy_y_compare_norm = (y - VectorMPI(np.array(a_list) * np.array(b_list))).norm() +# numpy_z_compare_norm = (z - MatrixMPI(comm,np.outer(np.array(a_list),np.array(b_list)))).norm() if rank == 0: print(f"norm(a.T() * b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") print(f"norm(a * b - numpy) = {numpy_y_compare_norm} | must be < 1e-8") @@ -133,35 +123,29 @@ if rank == 0: # computation with scalars x = a * 5 y = 0.1 * b.T() -#numpy_x_compare_norm = (x - Vector(comm, np.array(a_list) * 5)).norm() -numpy_x_compare_norm = (x - Vector(np.array(a_list) * 5)).norm() -#numpy_y_compare_norm = (y - Vector(comm, np.array(b_list) * 0.1, transpose=True)).norm() -numpy_y_compare_norm = (y - Vector(np.array(b_list) * 0.1).T()).norm() +numpy_x_compare_norm = (x - VectorMPI(np.array(a_list) * 5)).norm() +numpy_y_compare_norm = (y - VectorMPI(np.array(b_list) * 0.1).T()).norm() if rank == 0: print(f"norm(a * 5 - numpy) = {numpy_x_compare_norm} | must be < 1e-8") print(f"norm(0.1 * b.T() - numpy) = {numpy_y_compare_norm} | must be < 1e-8") print("End 1e\n") comm.Barrier() -### 1f divison +### 1f division if rank == 0: - print("Start 1f divison") -# intitialization + print("Start 1f division") +# initialization n_a = 10 n_b = 10 a_list = [i0 for i0 in range(n_a)] b_list = [1 / (i0 + 1) for i0 in range(n_b)] -#a = Vector(comm, a_list) -a = Vector(a_list) -#b = Vector(comm, b_list) -b = Vector(b_list) +a = VectorMPI(a_list) +b = VectorMPI(b_list) # computation with vectors x = a / b y = a / 5 -#numpy_x_compare_norm = (x - Vector(comm, np.array(a_list) / np.array(b_list))).norm() -numpy_x_compare_norm = (x - Vector(np.array(a_list) / np.array(b_list))).norm() -#numpy_y_compare_norm = (y - Vector(comm, np.array(a_list) / 5)).norm() -numpy_y_compare_norm = (y - Vector(np.array(a_list) / 5)).norm() +numpy_x_compare_norm = (x - VectorMPI(np.array(a_list) / np.array(b_list))).norm() +numpy_y_compare_norm = (y - VectorMPI(np.array(a_list) / 5)).norm() if rank == 0: print(f"norm(a / b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") print(f"norm(a / 5 - numpy) = {numpy_y_compare_norm} | must be < 1e-8") @@ -171,16 +155,14 @@ comm.Barrier() ### 1g norm if rank == 0: print("Start 1g norm") -# intitialization +# initialization a_list = [1 / (i0 + 1) for i0 in range(10)] -#a = Vector(comm, a_list) -a = Vector(a_list) +a = VectorMPI(a_list) # computation a_norm = a.norm() a_normalized = a.normalize() a_normalized_str = str(a_normalized) -#numpy_comparison_norm = (a_normalized - Vector(comm, np.array(a_list) / np.linalg.norm(a_list))).norm() -numpy_comparison_norm = (a_normalized - Vector(np.array(a_list) / np.linalg.norm(a_list))).norm() +numpy_comparison_norm = (a_normalized - VectorMPI(np.array(a_list) / np.linalg.norm(a_list))).norm() if rank == 0: print(f"a_norm = {a_norm} | must be {np.linalg.norm(a_list)}") print(f"norm(a_normalize-np.a_normalize) = {numpy_comparison_norm} | must be < 1e-8") @@ -190,10 +172,9 @@ comm.Barrier() ### 1h negation if rank == 0: print("Start 1h negation") -# intitialization +# initialization a_list = [1 / (i0 + 1) for i0 in range(10)] -#a = Vector(comm, a_list) -a = Vector(a_list) +a = VectorMPI(a_list) # computation x = -a x_str = str(x) @@ -205,11 +186,10 @@ comm.Barrier() ### 1i manipulation if rank == 0: print("Start 1i manipulation") -# intitialization +# initialization n_a = 10 a_list = [1 / (i0 + 1) for i0 in range(n_a)] -#a = Vector(comm, a_list) -a = Vector(a_list) +a = VectorMPI(a_list) a_idx = [1, 2, 9, 7, 8] a_values = a[a_idx] if rank == 0: @@ -224,7 +204,6 @@ if rank == 0: print("End 1i\n") comm.Barrier() -""" ############### # Testing the matrix class comm.Barrier() @@ -232,18 +211,18 @@ if rank == 0: print("\n\nTesting the matrix class -----------------------------------\n\n") ############### - ### 2a Initialization if rank == 0: print("Start 2a initialization") n_a1 = 10 n_a2 = 5 a_list = np.array([[(i0 + 1) * (i1 + 1) for i0 in range(n_a1)] for i1 in range(n_a2)]) -A = Matrix(comm, a_list) -B = Matrix(comm, structure="tridiagonal", size=12) +A = MatrixMPI(a_list) +B = MatrixMPI(structure="tridiagonal", data=[-1, 2, -1], n=12) c_list = [i0 for i0 in range(n_a1 * n_a1)] -C = Matrix(comm, c_list, shape=(n_a1, n_a1)) -D = Matrix(comm, model="sheet1ex1", size=50) +C = MatrixMPI(c_list, shape=(n_a1, n_a1)) +D = MatrixMPI(structure="tridiagonal", data=[-1, 2, -1], n=50) +# D = MatrixMPI(model="sheet1ex1", n=50) if rank == 0: print("End 2a\n") comm.Barrier() @@ -284,29 +263,29 @@ comm.Barrier() # if rank == 0: # print(f"Matrix\n{str(mat)}\nlocal max each row {str(local_max_each_row)}") # # frobenius norm -# A = Matrix(comm,mat) +# A = MatrixMPI(comm,mat) # a_norm_fro = A.norm("frobenius") # if rank == 0: # print(f"A.norm('frobenius') = {a_norm_fro} | must be {np.linalg.norm(np.array([[(i0+1)*(i1+1) for i0 in range(10)] for i1 in range(5)]),'fro')}") # # row sum norm # a_norm_row = A.norm("row sum") # if rank == 0: -# print(f"A.norm('row sum') = {a_norm_row} | must be {np.max(local_max_each_row)}") +# print(f"A.norm('row sum') = {a_norm_row} | must be {np.max(local_max_each_row)}") # # col sum norm -### 2d addition and substraction +### 2d addition and subtraction if rank == 0: - print("Start 2d addition and substraction") + print("Start 2d addition and subtraction ") # Initialization n = 10 -A = Matrix(comm, structure="diagonal", diag_values=[3], offset=0, size=n) -A21 = Matrix(comm, structure="diagonal", diag_values=[-1], offset=-1, size=n) -A12 = Matrix(comm, structure="diagonal", diag_values=[-1], offset=+1, size=n) -B = Matrix(comm, structure="diagonal", diag_values=[1], offset=0, size=n) +A = MatrixMPI(structure="diagonal", data=[3], offset=0, n=n) +A21 = MatrixMPI(structure="diagonal", data=[-1], offset=-1, n=n) +A12 = MatrixMPI(structure="diagonal", data=[-1], offset=+1, n=n) +B = MatrixMPI(structure="diagonal", data=[1], offset=0, n=n) # computation C = A + A21 + A12 - B -D = C - Matrix(comm, structure='tridiagonal', diag_values=[-1, 2, -1], size=n) +D = C - MatrixMPI(structure='tridiagonal', data=[-1, 2, -1], n=n) d_norm = D.norm() A_str = str(5 + A - 3) if rank == 0: @@ -324,19 +303,19 @@ a_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(8)] for i0 in range(n)]) b_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(n)] for i0 in range(8)]) c_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(n)] for i0 in range(n)]) d_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(17)] for i0 in range(n)]) -A = Matrix(comm, a_mat) -B = Matrix(comm, b_mat) -C = Matrix(comm, c_mat) -D = Matrix(comm, d_mat) +A = MatrixMPI(a_mat) +B = MatrixMPI(b_mat) +C = MatrixMPI(c_mat) +D = MatrixMPI(d_mat) x_vec = np.array([i0 + 1 for i0 in range(n)]) y_vec = np.array([n * (i0 + 1) for i0 in range(n)]) -x = Vector(comm, x_vec) -y = Vector(comm, y_vec) +x = VectorMPI(x_vec) +y = VectorMPI(y_vec) # computation matrix scalar -norm5 = (5 * A - Matrix(comm, 5 * np.array(a_mat))).norm() +norm5 = (5 * A - MatrixMPI(5 * np.array(a_mat))).norm() # computation matrix vector -norm6 = (C * x - Vector(comm, np.array(c_mat) @ np.array(x_vec))).norm() -norm7 = (D.T() * x - Vector(comm, np.array(d_mat).T @ np.array(x_vec))).norm() +norm6 = (C * x - VectorMPI(np.array(c_mat) @ np.array(x_vec))).norm() +norm7 = (D.T() * x - VectorMPI(np.array(d_mat).T @ np.array(x_vec))).norm() y_shape = (D.T() * x).shape() if rank == 0: print(f"Norm of (5*A - np.(5*A)) is {norm5} | must be < 1e-8") @@ -347,10 +326,10 @@ A_str = str(A) B_str = str(B) if rank == 0: print(f"DEBUG: A\n{A_str}\nDEBUG: B\n{B_str}") -norm1 = (A * B - Matrix(comm, np.array(a_mat) @ np.array(b_mat))).norm() -norm2 = (A.T() * A - Matrix(np.array(a_mat).T @ np.array(a_mat))).norm() -norm3 = (A * A.T() - Matrix(np.array(a_mat) @ np.array(a_mat).T)).norm() -norm4 = (B.T() * A.T() - Matrix(np.array(b_mat).T @ np.array(a_mat).T)).norm() +norm1 = (A * B - MatrixMPI(np.array(a_mat) @ np.array(b_mat))).norm() +norm2 = (A.T() * A - MatrixMPI(np.array(a_mat).T @ np.array(a_mat))).norm() +norm3 = (A * A.T() - MatrixMPI(np.array(a_mat) @ np.array(a_mat).T)).norm() +norm4 = (B.T() * A.T() - MatrixMPI(np.array(b_mat).T @ np.array(a_mat).T)).norm() if rank == 0: print(f"Norm of (A*B - np.(A*B)) is {norm1} | must be < 1e-8") print(f"Norm of (A.T()*A - np(A.T()*A)) is {norm2} | must be < 1e-8") @@ -359,48 +338,44 @@ if rank == 0: print("End 2e\n") comm.Barrier() -''' -### 2f divison +### 2f division if rank == 0: - print("Start 2f divison") + print("Start 2f division") # initialization -A = Matrix(a_mat) +A = MatrixMPI(a_mat) # computation -print(f"Norm of (A/5 - np.(A/5)) is {(A/5 - Matrix(np.array(a_mat) / 5)).norm()} | must be < 1e-8") +print(f"Norm of (A/5 - np.(A/5)) is {(A / 5 - MatrixMPI(np.array(a_mat) / 5)).norm()} | must be < 1e-8") if rank == 0: - print("End 2f\n") + print("End 2f\n") comm.Barrier() ### 2g norm if rank == 0: - print("Start 2g norm") -A = Matrix(structure="tridiagonal",given_size=50,given_values=[-1,2,-1]) + print("Start 2g norm") +A = MatrixMPI(structure="tridiagonal", n=50, data=[-1, 2, -1]) print(f"Frobenius norm of tridiagonal matrix: {A.norm('frobenius')} | must be 17.263") -print(f"Row sum norm of tridiagonal matrix: {A.norm('row sum')} | must be 2") -print(f"Col sum norm of tridiagonal matrix: {A.norm('col sum')} | must be 2") +print(f"Row sum norm of tridiagonal matrix: {A.norm('row sum')} | must be 4") +print(f"Col sum norm of tridiagonal matrix: {A.norm('col sum')} | must be 4") if rank == 0: - print("End 2g\n") + print("End 2g\n") comm.Barrier() ### 2h negation if rank == 0: - print("Start 2h negation") -A = Matrix(structure="tridiagonal", given_size=50, given_values=[-1,2,1]) + print("Start 2h negation") +A = MatrixMPI(structure="tridiagonal", n=50, data=[-1, 2, 1]) print(f"Norm of (A + (-A)) is {(A + (-A)).norm('frobenius')} | must be < 1e-8") if rank == 0: - print("End 2h\n") + print("End 2h\n") comm.Barrier() ### 2i manipulation if rank == 0: - print("Start 2i manipulation") -A = Matrix(structure="tridiagonal", given_size=10, given_values=[-1,2,1]) -A[1,1] = 4 -A[[1,2,3],2] = [-5,-10,100] + print("Start 2i manipulation") +A = MatrixMPI(structure="tridiagonal", n=10, data=[-1, 2, 1]) +A[1, 1] = 4 +A[[1, 2, 3], 2] = [-5, -10, 100] print(str(A)) if rank == 0: - print("End 2i\n") + print("End 2i\n") comm.Barrier() - -''' -""" \ No newline at end of file diff --git a/test/test_vector.py b/test/test_vector.py index b31f9b0..1bb07b8 100644 --- a/test/test_vector.py +++ b/test/test_vector.py @@ -1,5 +1,6 @@ from unittest import TestCase +from matrix import Matrix from vector import Vector @@ -169,6 +170,24 @@ class TestVector(TestCase): self.assertEqual(expected, actual) + def test_should_mul_matrix_vector(self): + m = Matrix([1, 2, 3, 4, 5, 6], (2, 3)) + v = Vector([1, 2, 3]) + + expected = Vector([14, 32]) + actual = m * v + + self.assertEqual(expected, actual) + + def test_should_mul_matrix_vector_2(self): + m = Matrix([1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4], (3, 4)) + v = Vector([1, 2, 3, 4]) + + expected = Vector([30, 30, 30]) + actual = m * v + + self.assertEqual(expected, actual) + def test_should_raise_value_missmatch_error_while_mul_vectors(self): v1 = Vector([1, 2]) v2 = '0' From 23df9f37f2235d62dafcdcd5e946943df95c53bf Mon Sep 17 00:00:00 2001 From: niklas Date: Tue, 23 Apr 2024 17:55:18 +0200 Subject: [PATCH 4/6] More work on parallel stuff --- src/cg.py | 127 +++++++++++++++++----------------------------- src/main.py | 32 +++++------- src/matrix.py | 2 +- src/matrix_mpi.py | 15 +++++- src/vector_mpi.py | 13 ++++- 5 files changed, 85 insertions(+), 104 deletions(-) diff --git a/src/cg.py b/src/cg.py index d983f9f..e60b501 100644 --- a/src/cg.py +++ b/src/cg.py @@ -1,91 +1,58 @@ -import matplotlib.pyplot as plt -import numpy as np +from mpi4py import MPI + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() -# Funktion zur Berechnung der 2-Norm -def norm(vec): - sum_of_squares = 0 - for i in range(0, len(vec)): - sum_of_squares = sum_of_squares + vec[i] ** 2 +def cg(n: int, A: Matrix, f: Vector, tol: float): + # Intialisierung des Startvektors x + x = Vector([1] * n) - return np.sqrt(sum_of_squares) + # Anzahl der Schritte + count = 0 + # Anfangswerte berechnen + r = f - A * x # Anfangsresiduum + p = r # Anfangsabstiegsrichtung -# Test, ob die Matrix M positiv definit ist, mittels Cholesky-Zerlegung -def ist_positiv_definit(m): - try: - np.linalg.cholesky(m) - return True - except np.linalg.LinAlgError: - return False + while r.norm() > tol and count < 1000: + print(f"{count}. Iterationsschritt:\n") + # print("Iterierte:", x) + # print("Residuumsnorm: ", r.norm()) + z = A * p # Matrix-Vektorprodukt berechnen und speichern -n = 1000 -h = 1 / (n - 1) + # Minimiere phi in Richung p um neue Iterierte x zu finden + alpha = (r.T() * p) / (p.T() * z) # (np.dot(r , p)) / (np.dot(p , z)) + # print(alpha) -# Initialisierung der Matrix A und des Vektor f für LGS Au = f -A = np.diag(-1 * np.ones(n - 1), k=1) + np.diag(2 * np.ones(n), k=0) + np.diag(-1 * np.ones(n - 1), k=-1) -f = h ** 2 * 2 * np.ones(n) + x = x + alpha * p # neue Itterierte x + r = r - alpha * z # neues Residuum -# Teste, ob A positiv definitv ist mittels Eigenwerten -if np.all(np.linalg.eigvals(A) <= 0): # Prüfen, ob alle Eigenwerte positiv sind - raise ValueError("A ist nicht positiv definit.") + # Bestimmung der neuen Suchrichtung + beta = - (r.T() * z) / (p.T() * z) # (np.dot(r , z)) / (np.dot(p , z)) + p = r + beta * p # neue konjugierte Abstiegsrichtung -# Teste, ob A positiv definit ist mittels Cholesky-Zerlegung -if not (ist_positiv_definit(A)): - raise ValueError("A ist nicht positiv definit.") + count = count + 1 -# Teste, ob A symmetrisch ist -if (A != A.T).all(): - raise ValueError("A ist nicht symmetrisch.") - -# Intialisierung des Startvektors x -x = np.ones(n) - -# Toleranz epsilon -eps = 0.001 - -count = 1 - -# Anfangswerte berechnen -r = f - A @ x # Anfangsresiduum -p = r # Anfangsabstiegsrichtung - -print("0. Iterationsschritt: \n") -print("Startvektor:", x) -print("Norm des Anfangsresiduums: ", norm(r)) - -while eps < norm(r) < 1000: - z = A @ p # Matrix-Vektorprodukt berechnen und speichern - - alpha = (np.dot(r, p)) / (np.dot(p, z)) - print(alpha) - - x = x + alpha * p # neue Itterierte x - r = r - alpha * z # neues Residuum - - # Bestimmung der neuen Suchrichtung - beta = - (np.dot(r, z)) / (np.dot(p, z)) - p = r + beta * p # neue konjugierte Abstiegsrichtung - - print(count, ". Iterationsschritt: \n") - print("Aktuelle Iterierte:", x) - print("Norm des Residuums: ", norm(r)) - - count = count + 1 - -# Vergleich mit numpy-interner Lsg -u = np.linalg.solve(A, f) - -print("Lösung mit CG-Verfahren:", x) -print("Numpy interne Lösung:", u) - -if norm(u - x) > eps: - print("Der CG-Algorithmus hat nicht richtig funktioniert!") -else: - print("Der CG-Algorithmus war erfolgreich.") - -plt.plot(x, linewidth=2) -plt.plot(u, linewidth=2) - -plt.show() + print(f"{rank} APFELSTRUDEL") + # if rank == 0: + # # Vergleich mit numpy-interner Lsg + # u = np.linalg.solve(np.array(A.get_data()), np.array(f.get_data())) + # + # print("Lösung mit CG-Verfahren:", x) + # print("Numpy interne Lösung:", u) + # + # if (Vector(u) - x).norm() > eps: + # print("Der CG-Algorithmus hat nicht richtig funktioniert!") + # else: + # print("Der CG-Algorithmus war erfolgreich.") + # + # plt.plot(x.get_data(), linewidth=2) + # plt.plot(u, linewidth=2) + # + # plt.show() diff --git a/src/main.py b/src/main.py index 1e4eba8..bab7dd2 100644 --- a/src/main.py +++ b/src/main.py @@ -1,26 +1,18 @@ -# m1 = MatrixMPI(numpy.random.uniform(0, 1, 1_000_000), (1000, 1000)) +import numpy as np -from mpi4py import MPI -from matrix_mpi import MatrixMPI -from vector_mpi import VectorMPI +import cg -comm = MPI.COMM_WORLD -rank = comm.Get_rank() -size = comm.Get_size() +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector -m1 = MatrixMPI(list(range(1, 21)), (4, 5)) -m2 = MatrixMPI(list(range(1, 16)), (5, 3)) +n = 1_00 +h = 1 / (n - 1) -m_mul = m1 * m2 +# Initialisierung der Matrix A und des Vektor f für LGS Au = f +A = Matrix(np.diag(-1 * np.ones(n - 1), k=1) + np.diag(2 * np.ones(n), k=0) + np.diag(-1 * np.ones(n - 1), k=-1)) +f = Vector([h ** 2 * 2] * n) -v1 = VectorMPI(list(range(1, 21))) -v2 = VectorMPI(list(reversed(list(range(1, 21))))) +# Toleranz epsilon +tol = 0.001 -v_add = v1 + v2 -v_mul = v1.T() * v2 - -if rank == 0: - print(m_mul) - print("---") - print(v_add) - print(v_mul) +cg.cg(n, A, f, tol) diff --git a/src/matrix.py b/src/matrix.py index f851a90..06f0eb4 100644 --- a/src/matrix.py +++ b/src/matrix.py @@ -98,7 +98,7 @@ class Matrix: flattened_data = [] rows = 0 for matrix in matrices: - flattened_data.extend(matrix.get_data()) + flattened_data.extend(matrix.get_matrix()) rows += matrix.__shape__[0] cols = matrices[0].__shape__[1] return Matrix(flattened_data, (rows, cols)) diff --git a/src/matrix_mpi.py b/src/matrix_mpi.py index e1b3396..98c436b 100644 --- a/src/matrix_mpi.py +++ b/src/matrix_mpi.py @@ -34,9 +34,20 @@ class MatrixMPI: cols = self.__data__.shape()[1] return Matrix(self.__data__[self.__chunk__], (rows, cols)) - def get_data(self): + def get_matrix(self): + """ + Returns the ``Matrix`` that is used internally + :return: The ``Matrix`` that is used internally + """ return self.__data__ + def get_data(self): + """ + Returns the raw data of the internal data structure + :return: The raw data of the internal data structure + """ + return self.__data__.get_data() + def transpose(self): """ :return: the transpose of the matrix @@ -91,7 +102,7 @@ class MatrixMPI: def __mul__(self, other): if isinstance(other, MatrixMPI): - other = other.get_data() + other = other.get_matrix() gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() * other) data = self.__mpi_comm__.bcast(gathered_data) return MatrixMPI.of(Matrix.flatten(data)) diff --git a/src/vector_mpi.py b/src/vector_mpi.py index 795d1bb..b609288 100644 --- a/src/vector_mpi.py +++ b/src/vector_mpi.py @@ -13,8 +13,19 @@ class VectorMPI(MatrixMPI): return VectorMPI(vector.get_data(), vector.shape()) def get_vector(self): + """ + Returns the ``Vector`` that is used internally + :return: The ``Vector`` that is used internally + """ return self.__data__ + def get_data(self): + """ + Returns the raw data of the internal data structure + :return: The raw data of the internal data structure + """ + return self.__data__.get_data() + def shape(self): return self.__data__.shape() @@ -55,7 +66,7 @@ class VectorMPI(MatrixMPI): def __rmul__(self, other): if isinstance(other, MatrixMPI): - return VectorMPI.of(other.get_data() * self.get_vector()) + return VectorMPI.of(other.get_matrix() * self.get_vector()) return self * other def __truediv__(self, other): From 219570e4f10813069dc2c83eb39a35aaa4c08b2a Mon Sep 17 00:00:00 2001 From: Niklas Birk Date: Sat, 27 Apr 2024 18:21:33 +0200 Subject: [PATCH 5/6] Refactoring and more parallel stuff --- src/cg.py | 73 +++++++++++++------------------------- src/main.py | 21 +++++++---- src/matrix.py | 23 +++++++----- src/matrix_mpi.py | 90 ++++++++++++++++++++++++++++++++--------------- src/vector.py | 13 +++++++ src/vector_mpi.py | 80 +++++++++++++++++------------------------ 6 files changed, 162 insertions(+), 138 deletions(-) diff --git a/src/cg.py b/src/cg.py index e60b501..3302519 100644 --- a/src/cg.py +++ b/src/cg.py @@ -1,58 +1,35 @@ -from mpi4py import MPI - from matrix_mpi import MatrixMPI as Matrix from vector_mpi import VectorMPI as Vector -comm = MPI.COMM_WORLD -size = comm.Get_size() -rank = comm.Get_rank() +# from matrix import Matrix +# from vector import Vector -def cg(n: int, A: Matrix, f: Vector, tol: float): - # Intialisierung des Startvektors x - x = Vector([1] * n) +def cg(A: Matrix, x0: Vector, b: Vector, tolerance: float = 1e-3, max_iterations: int = 1_000): + """ + Solves a system of linear equations of the form Ax = b numerically. + + :param A: The transformation matrix A + :param x0: A vector to start the algorithm with + :param b: The solution vector of the system of linear equations, the right hand side + :param tolerance: The tolerance at which to stop the algorithm, default is 0.001 + :param max_iterations: Maximum number of iterations, default is 1000 + """ + iterations = 0 - # Anzahl der Schritte - count = 0 + x = x0 + r = b - A * x + d = r - # Anfangswerte berechnen - r = f - A * x # Anfangsresiduum - p = r # Anfangsabstiegsrichtung + while r.norm() >= tolerance and iterations < max_iterations: + z = A * d - while r.norm() > tol and count < 1000: - print(f"{count}. Iterationsschritt:\n") - # print("Iterierte:", x) - # print("Residuumsnorm: ", r.norm()) + alpha = (r.T() * d) / (d.T() * z) + x = x + alpha * d + r = r - alpha * z - z = A * p # Matrix-Vektorprodukt berechnen und speichern + beta = -(r.T() * z) / (d.T() * z) + d = r + beta * d - # Minimiere phi in Richung p um neue Iterierte x zu finden - alpha = (r.T() * p) / (p.T() * z) # (np.dot(r , p)) / (np.dot(p , z)) - # print(alpha) - - x = x + alpha * p # neue Itterierte x - r = r - alpha * z # neues Residuum - - # Bestimmung der neuen Suchrichtung - beta = - (r.T() * z) / (p.T() * z) # (np.dot(r , z)) / (np.dot(p , z)) - p = r + beta * p # neue konjugierte Abstiegsrichtung - - count = count + 1 - - print(f"{rank} APFELSTRUDEL") - # if rank == 0: - # # Vergleich mit numpy-interner Lsg - # u = np.linalg.solve(np.array(A.get_data()), np.array(f.get_data())) - # - # print("Lösung mit CG-Verfahren:", x) - # print("Numpy interne Lösung:", u) - # - # if (Vector(u) - x).norm() > eps: - # print("Der CG-Algorithmus hat nicht richtig funktioniert!") - # else: - # print("Der CG-Algorithmus war erfolgreich.") - # - # plt.plot(x.get_data(), linewidth=2) - # plt.plot(u, linewidth=2) - # - # plt.show() + iterations = iterations + 1 + return x diff --git a/src/main.py b/src/main.py index bab7dd2..05314ae 100644 --- a/src/main.py +++ b/src/main.py @@ -1,18 +1,25 @@ -import numpy as np +from mpi4py import MPI import cg from matrix_mpi import MatrixMPI as Matrix from vector_mpi import VectorMPI as Vector +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + n = 1_00 h = 1 / (n - 1) -# Initialisierung der Matrix A und des Vektor f für LGS Au = f -A = Matrix(np.diag(-1 * np.ones(n - 1), k=1) + np.diag(2 * np.ones(n), k=0) + np.diag(-1 * np.ones(n - 1), k=-1)) -f = Vector([h ** 2 * 2] * n) +A = Matrix([-1, 2, -1], structure="tridiagonal", n=n) +x0 = Vector([1] * n) +b = Vector([h**2 * 2] * n) -# Toleranz epsilon -tol = 0.001 +x = cg.cg(A, x0, b) -cg.cg(n, A, f, tol) +if rank == 0: + print(x) diff --git a/src/matrix.py b/src/matrix.py index 06f0eb4..a5e776c 100644 --- a/src/matrix.py +++ b/src/matrix.py @@ -22,7 +22,6 @@ class Matrix: - ``Matrix(list, str, int)``: will create a new square matrix of given size and structure of either \"unity\", \"diagonal\" or \"tridiagonal\" - ``Matrix(str, int)``: will create a new square matrix of given size and TODO - :param data: Either a list or an numpy ndarray :param shape: A tuple containing the amount of rows and columns :param structure: Either \"unity\", \"diagonal\" or \"tridiagonal\" @@ -84,6 +83,16 @@ class Matrix: """ return self.__data__ + @staticmethod + def flatten_internal(matrices): + flattened_data = [] + rows = 0 + for matrix in matrices: + flattened_data.extend(matrix.get_data()) + rows += matrix.__shape__[0] + cols = matrices[0].__shape__[1] + return flattened_data, (rows, cols) + @staticmethod def flatten(matrices: list): """ @@ -95,13 +104,8 @@ class Matrix: :return: A ``Matrix`` extended by all matrices in the list. :rtype: ``Matrix`` """ - flattened_data = [] - rows = 0 - for matrix in matrices: - flattened_data.extend(matrix.get_matrix()) - rows += matrix.__shape__[0] - cols = matrices[0].__shape__[1] - return Matrix(flattened_data, (rows, cols)) + flattened_data, shape = Matrix.flatten_internal(matrices) + return Matrix(flattened_data, shape) def shape(self): """ @@ -247,6 +251,9 @@ class Matrix: def __rmul__(self, other): return self * other + def get_abs_sum_of_squares(self): + return self.__abs_sum_of_squares__() + def __abs_sum_of_squares__(self): rows = self.__shape__[0] cols = self.__shape__[1] diff --git a/src/matrix_mpi.py b/src/matrix_mpi.py index 98c436b..c23f53f 100644 --- a/src/matrix_mpi.py +++ b/src/matrix_mpi.py @@ -1,3 +1,5 @@ +import math + import numpy from mpi4py import MPI @@ -9,39 +11,75 @@ class MatrixMPI: __mpi_size__ = __mpi_comm__.Get_size() __mpi_rank__ = __mpi_comm__.Get_rank() - __data__: Matrix = None + __data__ = None + __rank_subdata__ = None __chunk__: list = None def __init__(self, data=None, shape=None, structure=None, model=None, offset=None, n=None): - self.__data__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n) + """ + Creates a new matrix. + The type of the matrix depends on the signature and arguments. + - ``MatrixMPI(list)``: will create a new matrix with the given data in the list and its shape. + - ``MatrixMPI(numpy.ndarray)``: will create a new matrix with the given data in ndarray and its shape. + - ``MatrixMPI(list, (int,int))``: will create a new nxm matrix with the given rows and columns and data in list. + - ``MatrixMPI(list, str, int, int)``: will create a new square matrix of given size and structure of \"diagonal\" + - ``MatrixMPI(list, str, int)``: will create a new square matrix of given size and structure of either \"unity\", \"diagonal\" or \"tridiagonal\" + - ``MatrixMPI(str, int)``: will create a new square matrix of given size and TODO + + :param data: Either a list or an numpy ndarray + :param shape: A tuple containing the amount of rows and columns + :param structure: Either \"unity\", \"diagonal\" or \"tridiagonal\" + :param model: TODO + :param offset: Offset to diagonal axis + :param n: Amount of rows of a square matrix or offset in case of diagonal structure + + :type data: Matrix | list | numpy.ndarray + :type shape: (int, int) + :type structure: str + :type model: str + :type offset: int + :type n: int + + :rtype: MatrixMPI + """ + if isinstance(data, Matrix): + self.__data__ = data + else: + self.__data__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n) + + # Calculate how much rows are delegated to the rank total_amount_of_rows = self.__data__.shape()[0] chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__mpi_size__) self.__chunk__ = chunks[self.__mpi_rank__].tolist() + # Store the delegated rows explicitly for calculations + rows = len(self.__chunk__) + cols = self.__data__.shape()[1] + self.__rank_subdata__ = Matrix(self.__data__[self.__chunk__], (rows, cols)) + @staticmethod def of(matrix: Matrix): - return MatrixMPI(matrix.get_data(), matrix.shape()) - - def __str__(self): - return str(self.__data__) + return MatrixMPI(matrix) def shape(self): return self.__data__.shape() - def get_rank_submatrix(self): - rows = len(self.__chunk__) - cols = self.__data__.shape()[1] - return Matrix(self.__data__[self.__chunk__], (rows, cols)) - - def get_matrix(self): + def get_rank_subdata(self): """ - Returns the ``Matrix`` that is used internally + Returns only the delegated rows of the rank as ``Matrix`` + :return: The delegated rows as ``Matrix`` + """ + return self.__rank_subdata__ + + def get_data(self): + """ + Returns the whole ``Matrix`` that is used internally :return: The ``Matrix`` that is used internally """ return self.__data__ - def get_data(self): + def get_internal_data(self): """ Returns the raw data of the internal data structure :return: The raw data of the internal data structure @@ -75,16 +113,12 @@ class MatrixMPI: return self.__data__ == other def __neg__(self): - gathered_data = self.__mpi_comm__.gather(-self.get_rank_submatrix()) - data = self.__mpi_comm__.bcast(gathered_data) - return MatrixMPI.of(Matrix.flatten(data)) + return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(-self.__rank_subdata__))) def __add__(self, other): if isinstance(other, MatrixMPI): - other = other.get_rank_submatrix() - gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() + other) - data = self.__mpi_comm__.bcast(gathered_data) - return MatrixMPI.of(Matrix.flatten(data)) + other = other.__rank_subdata__ + return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ + other))) def __radd__(self, other): return self + other @@ -96,16 +130,12 @@ class MatrixMPI: return -self + other def __truediv__(self, other): - gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() / other) - data = self.__mpi_comm__.bcast(gathered_data) - return MatrixMPI.of(Matrix.flatten(data)) + return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / other))) def __mul__(self, other): if isinstance(other, MatrixMPI): - other = other.get_matrix() - gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() * other) - data = self.__mpi_comm__.bcast(gathered_data) - return MatrixMPI.of(Matrix.flatten(data)) + other = other.get_data() + return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ * other))) def __rmul__(self, other): return self * other @@ -121,6 +151,10 @@ class MatrixMPI: :return: the norm as a number """ + if f == "frobenius": + return math.sqrt(self.__mpi_comm__.allreduce(self.__rank_subdata__.get_abs_sum_of_squares())) + elif f == "row sum": + return max(self.__mpi_comm__.allgather(self.__rank_subdata__.norm(f))) return self.__data__.norm(f) def __getitem__(self, key): diff --git a/src/vector.py b/src/vector.py index 7f0ea93..ad7455a 100644 --- a/src/vector.py +++ b/src/vector.py @@ -24,6 +24,19 @@ class Vector(Matrix): else: raise ValueError("data must be a ``list``, a ``numpy.ndarray`` or an integer for dimension") + @staticmethod + def flatten(vectors: list): + """ + Flattens a list of matrices into one bigger matrix. + The columns must match the first ``Matrix`` in the list and the rows can be arbitrarily. + + :param vectors: A list of vectors. + :type vectors: list + :return: A ``Vector`` extended by all matrices in the list. + """ + flattened_data, shape = Matrix.flatten_internal(vectors) + return Vector(flattened_data, shape) + def __eq__(self, other): """ Return ``self==value`` diff --git a/src/vector_mpi.py b/src/vector_mpi.py index b609288..35341e0 100644 --- a/src/vector_mpi.py +++ b/src/vector_mpi.py @@ -1,45 +1,30 @@ +import math + +import numpy +from mpi4py import MPI + from matrix_mpi import MatrixMPI from vector import Vector class VectorMPI(MatrixMPI): - __data__: Vector = None - def __init__(self, data=None, shape=None): - self.__data__ = Vector(data=data, shape=shape) + if isinstance(data, Vector): + self.__data__ = data + else: + self.__data__ = Vector(data=data, shape=shape) + + # Calculate how much rows are delegated to the rank + total_amount_of_rows = self.__data__.shape()[0] + chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__mpi_size__) + self.__chunk__ = chunks[self.__mpi_rank__].tolist() + + # Store the delegated rows explicitly for calculations + self.__rank_subdata__ = Vector(self.__data__[self.__chunk__]) @staticmethod def of(vector: Vector): - return VectorMPI(vector.get_data(), vector.shape()) - - def get_vector(self): - """ - Returns the ``Vector`` that is used internally - :return: The ``Vector`` that is used internally - """ - return self.__data__ - - def get_data(self): - """ - Returns the raw data of the internal data structure - :return: The raw data of the internal data structure - """ - return self.__data__.get_data() - - def shape(self): - return self.__data__.shape() - - def __eq__(self, other): - """ - Return ``self==value`` - - :param other: The object to compare to; must be either a ``Vector``, a ``list`` or a ``numpy.ndarray`` - :return: True if data in the same-shaped vectors are equal to the given data in other for each component otherwise False - """ - if isinstance(other, VectorMPI): - return self.__data__ == other.__data__ - else: - return self.__data__ == other + return VectorMPI(vector) def transpose(self): """ @@ -50,29 +35,36 @@ class VectorMPI(MatrixMPI): def T(self): return self.transpose() + def __str__(self): + return str(self.__data__) + def __neg__(self): return VectorMPI.of(-self.__data__) def __add__(self, other): if isinstance(other, VectorMPI): - other = other.__data__ - return VectorMPI.of(self.__data__ + other) + other = other.__rank_subdata__ + return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ + other))) def __mul__(self, other): if isinstance(other, VectorMPI): other = other.__data__ - result = self.__data__ * other + + if isinstance(other, int) or isinstance(other, float): + result = Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ * other)) + else: + result = self.__data__ * other return VectorMPI.of(result) if isinstance(result, Vector) else result def __rmul__(self, other): if isinstance(other, MatrixMPI): - return VectorMPI.of(other.get_matrix() * self.get_vector()) + return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(other.get_rank_subdata() * self.get_data()))) return self * other def __truediv__(self, other): if isinstance(other, VectorMPI): - other = other.__data__ - return VectorMPI.of(self.__data__ / other) + other = other.__rank_subdata__ + return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / other))) def norm(self, **kwargs): """ @@ -81,7 +73,7 @@ class VectorMPI(MatrixMPI): :param kwargs: ignored :return: the 2-norm of the vector """ - return self.__data__.norm() + return math.sqrt(self.__mpi_comm__.allreduce(self.__rank_subdata__.get_abs_sum_of_squares())) def normalize(self): """ @@ -90,10 +82,4 @@ class VectorMPI(MatrixMPI): :return: the normalized vector """ - return VectorMPI.of(self.__data__ / self.norm()) - - def __getitem__(self, key): - return self.__data__[key] - - def __setitem__(self, key, value): - self.__data__[key] = value + return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / self.norm()))) From 02263d602a1db6d5e27013e9be7a4b065b53ba10 Mon Sep 17 00:00:00 2001 From: niklas Date: Sat, 27 Jul 2024 15:17:21 +0200 Subject: [PATCH 6/6] More work on parallel stuff --- src/{main.py => main_cg.py} | 6 +++--- src/main_cg_timeit.py | 27 +++++++++++++++++++++++++++ src/main_diag_vec.py | 21 +++++++++++++++++++++ src/main_diag_vec_timeit.py | 23 +++++++++++++++++++++++ src/main_matrix_vec.py | 22 ++++++++++++++++++++++ src/main_matrix_vec_timeit.py | 24 ++++++++++++++++++++++++ src/matrix_mpi.py | 3 +++ src/vector_mpi.py | 1 - 8 files changed, 123 insertions(+), 4 deletions(-) rename src/{main.py => main_cg.py} (85%) create mode 100644 src/main_cg_timeit.py create mode 100644 src/main_diag_vec.py create mode 100644 src/main_diag_vec_timeit.py create mode 100644 src/main_matrix_vec.py create mode 100644 src/main_matrix_vec_timeit.py diff --git a/src/main.py b/src/main_cg.py similarity index 85% rename from src/main.py rename to src/main_cg.py index 05314ae..23eaac4 100644 --- a/src/main.py +++ b/src/main_cg.py @@ -12,7 +12,7 @@ comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() -n = 1_00 +n = 1_000 h = 1 / (n - 1) A = Matrix([-1, 2, -1], structure="tridiagonal", n=n) @@ -21,5 +21,5 @@ b = Vector([h**2 * 2] * n) x = cg.cg(A, x0, b) -if rank == 0: - print(x) +# if rank == 0: +# print(f"ranks = {size}: x = {x}") diff --git a/src/main_cg_timeit.py b/src/main_cg_timeit.py new file mode 100644 index 0000000..e8f3000 --- /dev/null +++ b/src/main_cg_timeit.py @@ -0,0 +1,27 @@ +from mpi4py import MPI +import sys +import timeit + +import cg + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = int(sys.argv[1]) +h = 1 / (n - 1) + +A = Matrix([-1, 2, -1], structure="tridiagonal", n=n) +x0 = Vector([1] * n) +b = Vector([h**2 * 2] * n) + +time = timeit.timeit(lambda: cg.cg(A, x0, b), number=1) + +if rank == 0: + print(f"ranks = {size}: time = {time}") diff --git a/src/main_diag_vec.py b/src/main_diag_vec.py new file mode 100644 index 0000000..7cd4db1 --- /dev/null +++ b/src/main_diag_vec.py @@ -0,0 +1,21 @@ +from mpi4py import MPI + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = 10_000 + +A = Matrix([3], structure="diagonal", offset=0, n=n) +v = Vector([7] * n) + +x = A * v + +# if rank == 0: +# print(f"ranks = {size}: x = {x}") diff --git a/src/main_diag_vec_timeit.py b/src/main_diag_vec_timeit.py new file mode 100644 index 0000000..c4be531 --- /dev/null +++ b/src/main_diag_vec_timeit.py @@ -0,0 +1,23 @@ +from mpi4py import MPI +import sys +import timeit + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = int(sys.argv[1]) + +A = Matrix([3], structure="diagonal", offset=0, n=n) +v = Vector([7] * n) + +time = timeit.timeit(lambda: A * v, number=1) + +if rank == 0: + print(f"ranks = {size}: time = {time}s") diff --git a/src/main_matrix_vec.py b/src/main_matrix_vec.py new file mode 100644 index 0000000..4366170 --- /dev/null +++ b/src/main_matrix_vec.py @@ -0,0 +1,22 @@ +from mpi4py import MPI + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = 10_000 + +m_data = [(i / k) for i in range(1, n+1) for k in range(1, n+1)] +A = Matrix(m_data, (n, n)) +v = Vector(list(range(1, n+1))) + +x = A * v + +# if rank == 0: +# print(f"ranks = {size}: x = {x}") diff --git a/src/main_matrix_vec_timeit.py b/src/main_matrix_vec_timeit.py new file mode 100644 index 0000000..2903676 --- /dev/null +++ b/src/main_matrix_vec_timeit.py @@ -0,0 +1,24 @@ +from mpi4py import MPI +import sys +import timeit + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = int(sys.argv[1]) + +m_data = [(i / k) for i in range(1, n+1) for k in range(1, n+1)] +A = Matrix(m_data, (n, n)) +v = Vector(list(range(1, n+1))) + +time = timeit.timeit(lambda: A * v, number=1) + +if rank == 0: + print(f"ranks = {size}: time = {time}s") diff --git a/src/matrix_mpi.py b/src/matrix_mpi.py index c23f53f..5ef8883 100644 --- a/src/matrix_mpi.py +++ b/src/matrix_mpi.py @@ -100,6 +100,9 @@ class MatrixMPI: """ return self.transpose() + def __str__(self): + return str(self.__data__) + def __eq__(self, other): """ Return ``self==value`` diff --git a/src/vector_mpi.py b/src/vector_mpi.py index 35341e0..bcb8f21 100644 --- a/src/vector_mpi.py +++ b/src/vector_mpi.py @@ -1,7 +1,6 @@ import math import numpy -from mpi4py import MPI from matrix_mpi import MatrixMPI from vector import Vector