diff --git a/src/cg.py b/src/cg.py new file mode 100644 index 0000000..3302519 --- /dev/null +++ b/src/cg.py @@ -0,0 +1,35 @@ +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + + +def cg(A: Matrix, x0: Vector, b: Vector, tolerance: float = 1e-3, max_iterations: int = 1_000): + """ + Solves a system of linear equations of the form Ax = b numerically. + + :param A: The transformation matrix A + :param x0: A vector to start the algorithm with + :param b: The solution vector of the system of linear equations, the right hand side + :param tolerance: The tolerance at which to stop the algorithm, default is 0.001 + :param max_iterations: Maximum number of iterations, default is 1000 + """ + iterations = 0 + + x = x0 + r = b - A * x + d = r + + while r.norm() >= tolerance and iterations < max_iterations: + z = A * d + + alpha = (r.T() * d) / (d.T() * z) + x = x + alpha * d + r = r - alpha * z + + beta = -(r.T() * z) / (d.T() * z) + d = r + beta * d + + iterations = iterations + 1 + return x diff --git a/src/main.py b/src/main.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/main_cg.py b/src/main_cg.py new file mode 100644 index 0000000..23eaac4 --- /dev/null +++ b/src/main_cg.py @@ -0,0 +1,25 @@ +from mpi4py import MPI + +import cg + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = 1_000 +h = 1 / (n - 1) + +A = Matrix([-1, 2, -1], structure="tridiagonal", n=n) +x0 = Vector([1] * n) +b = Vector([h**2 * 2] * n) + +x = cg.cg(A, x0, b) + +# if rank == 0: +# print(f"ranks = {size}: x = {x}") diff --git a/src/main_cg_timeit.py b/src/main_cg_timeit.py new file mode 100644 index 0000000..e8f3000 --- /dev/null +++ b/src/main_cg_timeit.py @@ -0,0 +1,27 @@ +from mpi4py import MPI +import sys +import timeit + +import cg + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = int(sys.argv[1]) +h = 1 / (n - 1) + +A = Matrix([-1, 2, -1], structure="tridiagonal", n=n) +x0 = Vector([1] * n) +b = Vector([h**2 * 2] * n) + +time = timeit.timeit(lambda: cg.cg(A, x0, b), number=1) + +if rank == 0: + print(f"ranks = {size}: time = {time}") diff --git a/src/main_diag_vec.py b/src/main_diag_vec.py new file mode 100644 index 0000000..7cd4db1 --- /dev/null +++ b/src/main_diag_vec.py @@ -0,0 +1,21 @@ +from mpi4py import MPI + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = 10_000 + +A = Matrix([3], structure="diagonal", offset=0, n=n) +v = Vector([7] * n) + +x = A * v + +# if rank == 0: +# print(f"ranks = {size}: x = {x}") diff --git a/src/main_diag_vec_timeit.py b/src/main_diag_vec_timeit.py new file mode 100644 index 0000000..c4be531 --- /dev/null +++ b/src/main_diag_vec_timeit.py @@ -0,0 +1,23 @@ +from mpi4py import MPI +import sys +import timeit + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = int(sys.argv[1]) + +A = Matrix([3], structure="diagonal", offset=0, n=n) +v = Vector([7] * n) + +time = timeit.timeit(lambda: A * v, number=1) + +if rank == 0: + print(f"ranks = {size}: time = {time}s") diff --git a/src/main_matrix_vec.py b/src/main_matrix_vec.py new file mode 100644 index 0000000..4366170 --- /dev/null +++ b/src/main_matrix_vec.py @@ -0,0 +1,22 @@ +from mpi4py import MPI + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = 10_000 + +m_data = [(i / k) for i in range(1, n+1) for k in range(1, n+1)] +A = Matrix(m_data, (n, n)) +v = Vector(list(range(1, n+1))) + +x = A * v + +# if rank == 0: +# print(f"ranks = {size}: x = {x}") diff --git a/src/main_matrix_vec_timeit.py b/src/main_matrix_vec_timeit.py new file mode 100644 index 0000000..2903676 --- /dev/null +++ b/src/main_matrix_vec_timeit.py @@ -0,0 +1,24 @@ +from mpi4py import MPI +import sys +import timeit + +from matrix_mpi import MatrixMPI as Matrix +from vector_mpi import VectorMPI as Vector + +# from matrix import Matrix +# from vector import Vector + +comm = MPI.COMM_WORLD +size = comm.Get_size() +rank = comm.Get_rank() + +n = int(sys.argv[1]) + +m_data = [(i / k) for i in range(1, n+1) for k in range(1, n+1)] +A = Matrix(m_data, (n, n)) +v = Vector(list(range(1, n+1))) + +time = timeit.timeit(lambda: A * v, number=1) + +if rank == 0: + print(f"ranks = {size}: time = {time}s") diff --git a/src/matrix.py b/src/matrix.py index 9e37544..a5e776c 100644 --- a/src/matrix.py +++ b/src/matrix.py @@ -22,7 +22,6 @@ class Matrix: - ``Matrix(list, str, int)``: will create a new square matrix of given size and structure of either \"unity\", \"diagonal\" or \"tridiagonal\" - ``Matrix(str, int)``: will create a new square matrix of given size and TODO - :param data: Either a list or an numpy ndarray :param shape: A tuple containing the amount of rows and columns :param structure: Either \"unity\", \"diagonal\" or \"tridiagonal\" @@ -84,6 +83,30 @@ class Matrix: """ return self.__data__ + @staticmethod + def flatten_internal(matrices): + flattened_data = [] + rows = 0 + for matrix in matrices: + flattened_data.extend(matrix.get_data()) + rows += matrix.__shape__[0] + cols = matrices[0].__shape__[1] + return flattened_data, (rows, cols) + + @staticmethod + def flatten(matrices: list): + """ + Flattens a list of matrices into one bigger matrix. + The columns must match the first ``Matrix`` in the list and the rows can be arbitrarily. + + :param matrices: A list of matrices. + :type matrices: list + :return: A ``Matrix`` extended by all matrices in the list. + :rtype: ``Matrix`` + """ + flattened_data, shape = Matrix.flatten_internal(matrices) + return Matrix(flattened_data, shape) + def shape(self): """ :return: the shape of the matrix, which is ``(rows, columns)`` @@ -191,10 +214,19 @@ class Matrix: else: raise ValueError("A ``Matrix`` can only be divided ba a number") + def __mul_rowmatrix_matrix__internal__(self, other): + cols = other.__shape__[1] + new_data = [0] * cols + for i in range(cols): + new_data[i] = sum([self.__data__[0][j] * other.__data__[j][i] for j in range(self.__shape__[1])]) + return new_data + def __mul_matrix_internal__(self, other): + if self.__shape__[0] == 1: + return self.__mul_rowmatrix_matrix__internal__(other) rows = self.__shape__[0] cols = other.__shape__[1] - new_data = [([0] * rows) for _ in range(cols)] + new_data = [([0] * cols) for _ in range(rows)] for i in range(rows): for k in range(cols): new_data[i][k] = sum([self.__data__[i][j] * other.__data__[j][k] for j in range(self.__shape__[1])]) @@ -219,7 +251,10 @@ class Matrix: def __rmul__(self, other): return self * other - def __abs_sum__(self): + def get_abs_sum_of_squares(self): + return self.__abs_sum_of_squares__() + + def __abs_sum_of_squares__(self): rows = self.__shape__[0] cols = self.__shape__[1] abs_sum = 0 @@ -258,7 +293,7 @@ class Matrix: :return: the norm as a number """ if f == "frobenius": - norm = math.sqrt(self.__abs_sum__()) + norm = math.sqrt(self.__abs_sum_of_squares__()) elif f == "col sum": norm = max(self.__col_sums__()) elif f == "row sum": diff --git a/src/matrix_mpi.py b/src/matrix_mpi.py index 49eb7b1..5ef8883 100644 --- a/src/matrix_mpi.py +++ b/src/matrix_mpi.py @@ -1,3 +1,5 @@ +import math + import numpy from mpi4py import MPI @@ -5,34 +7,161 @@ from matrix import Matrix class MatrixMPI: - __comm__ = MPI.COMM_WORLD - __size__ = __comm__.Get_size() - __rank__ = __comm__.Get_rank() + __mpi_comm__ = MPI.COMM_WORLD + __mpi_size__ = __mpi_comm__.Get_size() + __mpi_rank__ = __mpi_comm__.Get_rank() - __matrix__: Matrix = None + __data__ = None + __rank_subdata__ = None __chunk__: list = None def __init__(self, data=None, shape=None, structure=None, model=None, offset=None, n=None): - self.__matrix__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n) + """ + Creates a new matrix. + The type of the matrix depends on the signature and arguments. - total_amount_of_rows = self.__matrix__.shape()[0] - chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__size__) - self.__chunk__ = chunks[self.__rank__].tolist() + - ``MatrixMPI(list)``: will create a new matrix with the given data in the list and its shape. + - ``MatrixMPI(numpy.ndarray)``: will create a new matrix with the given data in ndarray and its shape. + - ``MatrixMPI(list, (int,int))``: will create a new nxm matrix with the given rows and columns and data in list. + - ``MatrixMPI(list, str, int, int)``: will create a new square matrix of given size and structure of \"diagonal\" + - ``MatrixMPI(list, str, int)``: will create a new square matrix of given size and structure of either \"unity\", \"diagonal\" or \"tridiagonal\" + - ``MatrixMPI(str, int)``: will create a new square matrix of given size and TODO - def __str__(self): - return str(self.__matrix__) + :param data: Either a list or an numpy ndarray + :param shape: A tuple containing the amount of rows and columns + :param structure: Either \"unity\", \"diagonal\" or \"tridiagonal\" + :param model: TODO + :param offset: Offset to diagonal axis + :param n: Amount of rows of a square matrix or offset in case of diagonal structure - def get_rank_data(self): + :type data: Matrix | list | numpy.ndarray + :type shape: (int, int) + :type structure: str + :type model: str + :type offset: int + :type n: int + + :rtype: MatrixMPI + """ + if isinstance(data, Matrix): + self.__data__ = data + else: + self.__data__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n) + + # Calculate how much rows are delegated to the rank + total_amount_of_rows = self.__data__.shape()[0] + chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__mpi_size__) + self.__chunk__ = chunks[self.__mpi_rank__].tolist() + + # Store the delegated rows explicitly for calculations rows = len(self.__chunk__) - cols = self.__matrix__.shape()[1] - return Matrix(self.__matrix__[self.__chunk__], (rows, cols)) + cols = self.__data__.shape()[1] + self.__rank_subdata__ = Matrix(self.__data__[self.__chunk__], (rows, cols)) + + @staticmethod + def of(matrix: Matrix): + return MatrixMPI(matrix) + + def shape(self): + return self.__data__.shape() + + def get_rank_subdata(self): + """ + Returns only the delegated rows of the rank as ``Matrix`` + :return: The delegated rows as ``Matrix`` + """ + return self.__rank_subdata__ + + def get_data(self): + """ + Returns the whole ``Matrix`` that is used internally + :return: The ``Matrix`` that is used internally + """ + return self.__data__ + + def get_internal_data(self): + """ + Returns the raw data of the internal data structure + :return: The raw data of the internal data structure + """ + return self.__data__.get_data() def transpose(self): - return self.__matrix__.transpose() + """ + :return: the transpose of the matrix + """ + return MatrixMPI.of(self.__data__.transpose()) def T(self): + """ + Same as ``matrix.transpose()`` + + :return: see ``matrix.transpose()`` + """ return self.transpose() + def __str__(self): + return str(self.__data__) -mpi_matrix = MatrixMPI(list(range(1, 25)), (12, 2)) -print(mpi_matrix.transpose()) + def __eq__(self, other): + """ + Return ``self==value`` + + :param other: The object to compare to; must be either a ``MatrixMPI``, ``Matrix``, a ``list`` or a ``numpy.ndarray`` + :return: True if data in the matrix are equal to the given data in other for each component, otherwise False + """ + if isinstance(other, MatrixMPI): + return self.__data__ == other.__data__ + else: + return self.__data__ == other + + def __neg__(self): + return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(-self.__rank_subdata__))) + + def __add__(self, other): + if isinstance(other, MatrixMPI): + other = other.__rank_subdata__ + return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ + other))) + + def __radd__(self, other): + return self + other + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + return -self + other + + def __truediv__(self, other): + return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / other))) + + def __mul__(self, other): + if isinstance(other, MatrixMPI): + other = other.get_data() + return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ * other))) + + def __rmul__(self, other): + return self * other + + def norm(self, f: str = "frobenius"): + """ + Calculates the norm of the matrix. + + A norm is a positive definit, absolute homogeneous and subadditive function. + For Matrices a norm is also sub-multiplicative. + + :param f: The norm to be used, could be either "frobenius", "row sum" or "col sum" + + :return: the norm as a number + """ + if f == "frobenius": + return math.sqrt(self.__mpi_comm__.allreduce(self.__rank_subdata__.get_abs_sum_of_squares())) + elif f == "row sum": + return max(self.__mpi_comm__.allgather(self.__rank_subdata__.norm(f))) + return self.__data__.norm(f) + + def __getitem__(self, key): + return self.__data__[key] + + def __setitem__(self, key, value): + self.__data__[key] = value diff --git a/src/vector.py b/src/vector.py index c8bf6a8..ad7455a 100644 --- a/src/vector.py +++ b/src/vector.py @@ -24,6 +24,19 @@ class Vector(Matrix): else: raise ValueError("data must be a ``list``, a ``numpy.ndarray`` or an integer for dimension") + @staticmethod + def flatten(vectors: list): + """ + Flattens a list of matrices into one bigger matrix. + The columns must match the first ``Matrix`` in the list and the rows can be arbitrarily. + + :param vectors: A list of vectors. + :type vectors: list + :return: A ``Vector`` extended by all matrices in the list. + """ + flattened_data, shape = Matrix.flatten_internal(vectors) + return Vector(flattened_data, shape) + def __eq__(self, other): """ Return ``self==value`` @@ -86,7 +99,7 @@ class Vector(Matrix): return Vector(self.__mul_vector_same_shape_internal__(other)) elif self.__shape__ == tuple(reversed(other.__shape__)): if self.__shape__[0] == 1: # Case (_ ... _) * (_\n...\n_) = scalar - return super().__mul_matrix_internal__(other)[0][0] + return super().__mul_matrix_internal__(other)[0] else: # Case (_\n...\n_) * (_ ... _) = Matrix new_data, shape = self.__mul_tensor_internal__(other) return Matrix(new_data, shape) @@ -95,10 +108,19 @@ class Vector(Matrix): elif isinstance(other, int) or isinstance(other, float): return Vector(super().__mul_scalar_internal__(other)) else: - raise ValueError("A ``Vector`` can only be multiplied with an ``Vector`` (dot product or tensor)," + raise ValueError("A ``Vector`` can only be multiplied with an ``Vector`` (dot product or tensor), " "a compatible ``Matrix`` or a scalar") + def __mul_matrix_vector_internal__(self, other): + rows = other.__shape__[0] + new_data = [0] * rows + for i in range(rows): + new_data[i] = sum([other.__data__[i][j] * self.__data__[j][0] for j in range(self.__shape__[0])]) + return new_data + def __rmul__(self, other): + if isinstance(other, Matrix): + return Vector(self.__mul_matrix_vector_internal__(other)) return self * other def __truediv_vector_internal__(self, other): diff --git a/src/vector_mpi.py b/src/vector_mpi.py index c943099..bcb8f21 100644 --- a/src/vector_mpi.py +++ b/src/vector_mpi.py @@ -1,365 +1,84 @@ -from mpi4py import MPI +import math + +import numpy + +from matrix_mpi import MatrixMPI +from vector import Vector -class VectorMPI: - ... +class VectorMPI(MatrixMPI): + def __init__(self, data=None, shape=None): + if isinstance(data, Vector): + self.__data__ = data + else: + self.__data__ = Vector(data=data, shape=shape) -# class Vector: -# start_idx = 0 # Nullter Eintrag des Vektors auf dem aktuellen Rang -# end_idx = 0 # Letzer Eintrag des Vektors auf dem aktuellen Rang -# rank_size = 0 # Dimension des Vektors der auf dem aktuellen Rang gespeichert wird -# kind = '' # Art des Vektors, Zeilen oder Spaltenvektor -# vec = np.arange(rank_size) -# vshape = np.arange(2) # Array mit Länge 2, um die Shape des Vektors zu speichern -# dim = 0 # Gesamtdimension des Vektors, Länge des Vektors -# -# comm = MPI.COMM_WORLD -# size = comm.Get_size() -# rank = comm.Get_rank() -# -# # Konstruktor -# def __init__(self, array): -# -# if isinstance(array, np.ndarray): -# form = array.shape -# if len(array) < self.size: -# raise ValueError("ERROR_3: Die Dimension des Vektors ist kleiner als die Anzahl der benutzten Ränge.") -# -# if len(form) > 2: -# raise ValueError("ERROR_2: Falsche Dimension, kann kein 1 x n oder n x 1 Vektor sein.") -# # Array ist Zeilenvektor: -# if len(form) == 1 or (len(form) == 2 and form[0] == 1): -# self.vshape[0] = 1 -# self.vshape[1] = len(array) # Shape des Vectors -# self.start_idx = int(self.rank * len(array) / self.size) -# self.end_idx = int(len(array) / self.size + self.rank * len(array) / self.size) - 1 -# self.rank_size = ( -# self.end_idx - self.start_idx) + 1 # Größe des Teilvektors auf dem akt. Rang: Differenz zw. Start- und Endindex + 1 -# self.vec = array[ -# self.start_idx: self.end_idx + 1] # Auf jedem Rang werden die Einträge vom Start bis zum Endindex gespeichert -# self.kind = 'row' -# self.dim = len(array) -# -# # Array ist Spaltenvektor -# if len(form) == 2 and form[1] == 1: -# self.vshape[0] = len(array) -# self.vshape[1] = 1 -# self.start_idx = int(self.rank * len(array) / self.size) -# self.end_idx = int(len(array) / self.size + self.rank * len(array) / self.size) - 1 -# self.rank_size = ( -# self.end_idx - self.start_idx) + 1 # Größe des Teilvektors auf dem akt. Rang: Differenz zw. Start- und Endindex + 1 -# self.vec = array[ -# self.start_idx: self.end_idx + 1] # Auf jedem Rang werden die Einträge vom Start bis zum Endindex gespeichert -# self.kind = 'column' -# self.dim = len(array) -# -# elif isinstance(array, list): -# self.vshape[0] = 1 -# self.vshape[1] = len(array) -# self.start_idx = int(self.rank * len(array) / self.size) -# self.end_idx = int(len(array) / self.size + self.rank * len(array) / self.size) - 1 -# self.rank_size = (self.end_idx - self.start_idx) + 1 -# self.vec = np.array(array[self.start_idx:self.end_idx + 1]) -# self.kind = 'row' -# self.dim = len(array) -# else: -# raise ValueError( -# "ERROR_1: Die übergebene Variable ist kein Numpy-Array, Keine Initialisierung der Vector-Klasse möglich.") -# -# def __add__(self, other): # Überschreibung der Addition -# if isinstance(self, Vector) and isinstance(other, Vector): -# Add_Vec = Vector(np.arange(self.dim)) -# if self.vshape[0] == other.vshape[0] and self.vshape[1] == other.vshape[1]: -# for i in range(0, self.rank_size): -# Add_Vec.vec[i] = self.vec[i] + other.vec[i] -# else: -# raise ValueError("Die Dimensionen der Vektoren stimmen nicht überein, Addition nicht möglich.") -# elif isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# Add_Vec = Vector(np.arange(self.dim)) -# for i in range(0, self.rank_size): -# Add_Vec.vec[i] = self.vec[i] + other -# elif isinstance(self, (int, float, complex)) and isinstance(other, Vector): -# Add_Vec = Vector(np.arange(other.dim)) -# for i in range(0, other.rank_size): -# Add_Vec.vec[i] = other.vec[i] + self -# else: -# raise ValueError("Ungeeigneter Datentyp für die Addition mit einem Vektor.") -# return Add_Vec -# -# def __radd__(self, other): # Überschreibung der Addition eines Vektors von rechts -# Vector(np.arange(self.dim)) -# if isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# Add_Vec = Vector(np.arange(self.dim)) -# for i in range(0, self.rank_size): -# Add_Vec.vec[i] = self.vec[i] + other -# else: -# raise ValueError("Ungeeigneter Datentyp für die Addition mit einem Vektor.") -# return Add_Vec -# -# def __sub__(self, other): # Überschreibung der Subtraktion -# if isinstance(self, Vector) and isinstance(other, Vector): -# Sub_Vec = Vector(np.arange(self.dim)) -# if self.vshape[0] == other.vshape[0] and self.vshape[1] == other.vshape[1]: -# for i in range(0, self.rank_size): -# Sub_Vec.vec[i] = self.vec[i] - other.vec[i] -# else: -# raise ValueError("Die Dimension der Vektoren stimmen nicht überein, Subtraktion nicht möglich.") -# elif isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# Sub_Vec = Vector(np.arange(self.dim)) -# for i in range(0, self.rank_size): -# Sub_Vec.vec[i] = self.vec[i] - other -# elif isinstance(self, (int, float, complex)) and isinstance(other, Vector): -# Sub_Vec = Vector(np.arange(self.dim)) -# for i in range(0, other.rank_size): -# Sub_Vec.vec[i] = other.vec[i] - self -# else: -# raise ValueError("Ungeeigneter Datentyp für die Subtraktion mit einem Vektor.") -# return Sub_Vec -# -# def __rsub__(self, other): # Subtraktion einer Zahl von einem Vektor -# Sub_Vec = Vector(np.arange(self.dim)) -# if isinstance(self, Vector) and isinstance(other, (float, int, complex)): -# for i in range(0, self.rank_size): -# Sub_Vec.vec[i] = self.vec[i] - other -# else: -# raise ValueError("Ungeeigneter Datentyp für die Subtraktion von einem Vektor.") -# return Sub_Vec -# -# def __mul__(self, other): # Überschreibung der Multiplikation -# if isinstance(self, Vector) and isinstance(other, Vector): -# Mult_Vec = Vector(np.arange(self.dim)) -# if (self.vshape[0] == other.vshape[0] and self.vshape[1] == other.vshape[ -# 1]): # Elementweise Multiplikation -# for i in range(0, self.rank_size): -# Mult_Vec.vec[i] = self.vec[i] * other.vec[i] -# elif self.vshape[1] == other.vshape[0] and self.vshape[0] == 1: # Inneres Produkt (Skalarprodukt) -# skal_prod = 0 -# for i in range(0, self.rank_size): -# skal_prod = skal_prod + self.vec[i] * other.vec[i] -# return skal_prod -# elif self.vshape[0] == other.vshape[1] and self.vshape[1] == 1: -# raise ValueError("Kann erst implementiert werden, wenn Matrix-Klasse existiert.") -# else: -# raise ValueError("Die Dimensionen der Vektoren stimmen nicht überein, Multiplikation nicht möglich.") -# elif isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# Mult_Vec = Vector(np.arange(self.dim)) -# for i in range(0, self.rank_size): -# Mult_Vec.vec[i] = self.vec[i] * other -# elif isinstance(self, (int, float, complex)) and isinstance(other, Vector): -# Mult_Vec = Vector(np.arange(self.dim)) -# for i in range(0, other.rank_size): -# Mult_Vec.vec[i] = other.vec[i] * self -# else: -# raise ValueError("Ungeeigneter Datentyp für die Multiplikation mit einem Vektor.") -# return Mult_Vec -# -# def __rmul__(self, other): # Rechtsseitige Multiplikation von einer Zahl an einen Vektor -# Mult_Vec = Vector(np.arange(self.dim)) -# if isinstance(self, Vector) and isinstance(other, (int, float, complex)): -# for i in range(0, self.rank_size): -# Mult_Vec.vec[i] = self.vec[i] * other -# else: -# raise ValueError("Ungeeigneter Datentyp für die Multiplikation mit einem Vektor.") -# return Mult_Vec -# -# def __truediv__(self, other): -# Div_Vec = Vector(np.arange(self.dim, dtype=np.double)) -# if isinstance(self, Vector) and isinstance(other, Vector): -# if self.vshape[0] == other.vshape[0] and self.vshape[1] == other.vshape[1]: -# for i in range(0, self.rank_size): -# if other.vec[i] == 0: -# raise ValueError("Ein Eintrag des Divisor-Vektors ist 0, Divion nicht möglich.") -# Div_Vec.vec[i] = self.vec[i] / other.vec[i] -# else: -# raise ValueError("Die Dimensionen der Vektoren stimmen nicht überein, Division nicht möglich.") -# elif isinstance(self, (int, float, complex)) and isinstance(other, Vector): -# for i in range(0, other.rank_size): -# if other.vec[i] == 0: -# raise ValueError("Ein Eintrag des Divisor-Vektors ist 0, Divion nicht möglich.") -# Div_Vec.vec[i] = self / other.vec[i] -# elif isinstance(self, Vector) and isinstance(other, (float, int, complex)): -# if other == 0: -# raise ValueError("Division durch Null ist nicht möglich.") -# else: -# for i in range(0, self.rank_size): -# Div_Vec.vec[i] = self.vec[i] / other -# else: -# raise ValueError("ERROR 11: Ungeeigneter Datentyp für die Division mit einem Vektor.") -# return Div_Vec -# -# def __rtruediv__(self, other): -# Div_Vec = Vector(np.arange(self.dim, dtype=np.double)) -# if isinstance(self, Vector) and isinstance(other, (float, int, complex)): -# if other == 0: -# raise ValueError("Division durch Null ist nicht möglich.") -# else: -# for i in range(0, self.rank_size): -# Div_Vec.vec[i] = self.vec[i] / other -# else: -# raise ValueError("ERROR 10: Uneignete Datentyp, um einen Vektor durch diesen zu dividieren") -# return Div_Vec -# -# def __neg__(self): -# Neg_Vec = -1 * self -# return Neg_Vec -# -# def shape(self): -# if self.rank == 0: -# return self.vshape -# -# def T(self): -# Transpose = self -# if self.kind == 'row': -# Transpose.kind = 'column' -# else: -# Transpose.kind = 'row' -# -# # Tauschen der Dimensionen -# var_shift = self.vshape[0] -# Transpose.vshape[0] = self.vshape[1] -# Transpose.vshape[1] = var_shift -# return Transpose -# -# def str(self): # Rückgabe des gesamten Vektors als string -# str_rep = '' -# -# if self.rank == 0: -# if self.kind == 'row': -# str_rep = '[' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = '[' + '\n'.join(map(str, self.vec)) -# if self.size > 1: -# self.comm.send(str_rep, dest=self.rank + 1) -# -# elif self.rank == self.size - 1: -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join(map(str, self.vec)) -# -# else: -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join(map(str, self.vec)) -# self.comm.send(str_rep, dest=self.rank + 1) -# -# str_rep = self.comm.bcast(str_rep, root=self.size - 1) -# if self.rank == 0: -# return str_rep + ']' -# -# def string(self, limit_entry): # Gibt den Vektor als String zurück bis zum Eintrag limit_entry -# str_rep = '' -# -# if limit_entry > self.vec_size: -# raise ValueError("ERROR_4: Die eingegebene Zahl ist größer, als der größte Index des Vectors.") -# -# # Rank 0 -# if self.rank == 0 and limit_entry <= self.end_idx: # Limit_entry befindet sich im Rang 0 -# if self.kind == 'row': -# str_rep = '[' + ','.join(map(str, self.vec[:limit_entry])) -# if self.kind == 'column': -# str_rep = '[' + '\n'.join(map(str, self.vec[:limit_entry])) -# if self.size > 1: -# self.comm.send(str_rep, dest=self.rank + 1) -# if self.rank == 0 and limit_entry > self.end_idx: # Limit_entry befindet sich nicht im Rang 0 -# if self.kind == 'row': -# str_rep = '[' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = '[' + '\n'.join(map(str, self.vec)) -# if self.size > 1: -# self.comm.send(str_rep, dest=self.rank + 1) -# -# # Rank im Intervall [1,size-1] -# if ( -# 0 < self.rank < self.size - 1 and limit_entry <= self.start_idx): # wenn lim_ent == start_idx, dann wurden bereits alle relevanten Indizes im String gespeichert, da Vector nullinitialisiert ist -# str_rep = self.comm.recv(source=self.rank - 1) -# self.comm.send(str_rep, dest=self.rank + 1) -# if ( -# 0 < self.rank < self.size - 1 and self.start_idx < limit_entry <= self.end_idx): -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join( -# map(str, self.vec[:(limit_entry - self.start_idx)])) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join( -# map(str, self.vec[:(limit_entry - self.start_idx)])) + ']' -# self.comm.send(str_rep, dest=self.rank + 1) -# if 0 < self.rank < self.size - 1 and limit_entry > self.end_idx: -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join(map(str, self.vec)) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join(map(str, self.vec)) -# self.comm.send(str_rep, dest=self.rank + 1) -# -# # Rank size-1 -# if self.rank == self.size - 1 and limit_entry <= self.start_idx and self.rank > 1: -# str_rep = self.comm.recv(source=self.rank - 1) -# -# if self.rank == self.size - 1 and limit_entry >= self.start_idx and self.rank > 1: -# if self.kind == 'row': -# str_rep = self.comm.recv(source=self.rank - 1) + ',' + ','.join( -# map(str, self.vec[:(limit_entry - self.start_idx)])) -# if self.kind == 'column': -# str_rep = self.comm.recv(source=self.rank - 1) + '\n' + '\n'.join( -# map(str, self.vec[:(limit_entry - self.start_idx)])) -# -# str_rep = self.comm.bcast(str_rep, root=self.size - 1) -# if self.rank == 0: -# return str_rep + ']' -# -# def norm(self): # Berechnung der 2-Norm / euklidischen Norm -# 0 -# sum_of_squares = 0 -# if self.rank == 0: -# for i in range(0, self.rank_size): -# sum_of_squares = sum_of_squares + self.vec[i] ** 2 -# -# if self.size > 1: -# self.comm.send(sum_of_squares, dest=self.rank + 1) -# -# elif self.rank == self.size - 1: -# sum_of_squares = self.comm.recv(source=self.rank - 1) -# for i in range(0, self.rank_size): -# sum_of_squares = sum_of_squares + self.vec[i] ** 2 -# -# else: -# sum_of_squares = self.comm.recv(source=self.rank - 1) -# for i in range(0, self.rank_size): -# sum_of_squares = sum_of_squares + self.vec[i] ** 2 -# self.comm.send(sum_of_squares, dest=self.rank + 1) -# -# sum_of_squares = self.comm.bcast(sum_of_squares, root=self.size - 1) -# norm = np.sqrt(sum_of_squares) -# -# return norm -# -# def normalize(self): # Normalisierung eines Vectors -# norm = self.norm() -# if norm == 0: -# return self -# normalized_vec = self / norm -# return normalized_vec -# -# -# # Main-Funktion -# x = Vector(np.arange(10)) -# print(x.str(), x.shape()) -# print(x.vshape[0], x.vshape[1]) -# minus = -1 * x -# _x = -x -# print(_x.str()) -# print(minus.str()) -# y = Vector(2 * np.arange(10)) -# print(y.str()) -# z = x - y -# print(z.str()) -# ae = x + 5 -# print(ae.str()) -# o = x * y -# print(o.str()) -# -# a = Vector(np.array([[1], [2]])) -# b = Vector(np.array([1, 2])) -# print(a.shape()) -# # c = a * b -# # print(c.vec) + # Calculate how much rows are delegated to the rank + total_amount_of_rows = self.__data__.shape()[0] + chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__mpi_size__) + self.__chunk__ = chunks[self.__mpi_rank__].tolist() + + # Store the delegated rows explicitly for calculations + self.__rank_subdata__ = Vector(self.__data__[self.__chunk__]) + + @staticmethod + def of(vector: Vector): + return VectorMPI(vector) + + def transpose(self): + """ + :return: the transpose of the vector + """ + return VectorMPI.of(self.__data__.transpose()) + + def T(self): + return self.transpose() + + def __str__(self): + return str(self.__data__) + + def __neg__(self): + return VectorMPI.of(-self.__data__) + + def __add__(self, other): + if isinstance(other, VectorMPI): + other = other.__rank_subdata__ + return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ + other))) + + def __mul__(self, other): + if isinstance(other, VectorMPI): + other = other.__data__ + + if isinstance(other, int) or isinstance(other, float): + result = Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ * other)) + else: + result = self.__data__ * other + return VectorMPI.of(result) if isinstance(result, Vector) else result + + def __rmul__(self, other): + if isinstance(other, MatrixMPI): + return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(other.get_rank_subdata() * self.get_data()))) + return self * other + + def __truediv__(self, other): + if isinstance(other, VectorMPI): + other = other.__rank_subdata__ + return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / other))) + + def norm(self, **kwargs): + """ + Computes the 2-norm of the vector which is the Frobenius-Norm of a nx1 matrix. + + :param kwargs: ignored + :return: the 2-norm of the vector + """ + return math.sqrt(self.__mpi_comm__.allreduce(self.__rank_subdata__.get_abs_sum_of_squares())) + + def normalize(self): + """ + A normalized vector has the length (norm) 1. + To achieve that the vector is divided by the norm of itself. + + :return: the normalized vector + """ + return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / self.norm()))) diff --git a/test/test_matrix.py b/test/test_matrix.py index 071e5d7..a6b22d8 100644 --- a/test/test_matrix.py +++ b/test/test_matrix.py @@ -230,6 +230,51 @@ class TestMatrix(TestCase): self.assertEqual(expected, actual) + def test_should_mul_matrices_4(self): + m1 = Matrix([1] * 6, (2, 3)) + m2 = Matrix([1] * 15, (3, 5)) + + actual = m1 * m2 + expected = Matrix([3] * 10, (2, 5)) + + self.assertEqual(expected, actual) + + def test_should_mul_matrices_5(self): + m1 = Matrix([1] * 15, (3, 5)) + m2 = Matrix([1] * 20, (5, 4)) + + actual = m1 * m2 + expected = Matrix([5] * 12, (3, 4)) + + self.assertEqual(expected, actual) + + def test_should_mul_matrices_6(self): + m1 = Matrix([1] * 15, (5, 3)) + m2 = Matrix([1] * 12, (3, 4)) + + actual = m1 * m2 + expected = Matrix([3] * 20, (5, 4)) + + self.assertEqual(expected, actual) + + def test_should_mul_matrices_7(self): + m1 = Matrix(list(range(1, 21)), (4, 5)) + m2 = Matrix(list(range(1, 16)), (5, 3)) + + actual = m1 * m2 + expected = Matrix([135, 150, 165, 310, 350, 390, 485, 550, 615, 660, 750, 840], (4, 3)) + + self.assertEqual(expected, actual) + + def test_should_mul_vector_matrix(self): + m1 = Matrix([1, 2, 3], (1, 3)) + m2 = Matrix([6, 5, 4, 3, 2, 1], (3, 2)) + + actual = m1 * m2 + expected = Matrix([20, 14], (1, 2)) + + self.assertEqual(expected, actual) + def test_should_raise_shape_missmatch_error_while_multiplying_matrices(self): m1 = Matrix([1, 2], (2, 1)) m2 = Matrix([3, 4], (2, 1)) @@ -378,3 +423,12 @@ class TestMatrix(TestCase): expected = Matrix([1, 2, 3, 4, 5, 60, 70, 8, 9, 100, 110, 12, 13, 14, 15, 16], (4, 4)) self.assertEqual(expected, actual) + + def test_should_flatten_list_of_matrices(self): + m1 = Matrix([1, 2, 3, 4, 5, 6, 7, 8, 9], (3, 3)) + m2 = Matrix([1, 2, 3, 4, 5, 6], (2, 3)) + + actual = m1.flatten([m1, m2]) + expected = Matrix([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6], (5, 3)) + + self.assertEqual(expected, actual) diff --git a/test/test_parallel.py b/test/test_parallel.py new file mode 100644 index 0000000..8a81792 --- /dev/null +++ b/test/test_parallel.py @@ -0,0 +1,381 @@ +import numpy as np +from mpi4py import MPI + +from matrix_mpi import MatrixMPI +from vector_mpi import VectorMPI + +np.random.seed(0) +############### +# Setting up MPI +comm = MPI.COMM_WORLD +rank = comm.Get_rank() +size = comm.Get_size() +for i0 in range(size): + if i0 == rank: + print(f"Hello from rank {i0} of size {size}!") + comm.Barrier() +# Testing the vector class +comm.Barrier() +if rank == 0: + print("\n\nTesting the vector class --------------(output only from rank 0)---------------------\n\n") +############### + +### 1a Initialization +if rank == 0: + print("Start 1a initialization") +# from list +n_x1 = 10 +x1_list = [i0 for i0 in + range(n_x1)] # must work for 4 ranks, too, so even if entries are not evenly distributable between ranks +vec_x1 = VectorMPI(x1_list) +# from numpy array +n_x2 = 10000 +x2_list = np.random.uniform(0, 1, n_x2) +vec_x2 = VectorMPI(x2_list) +if rank == 0: + print("End 1a\n") +comm.Barrier() + +### 1b __str__ function, string representation +if rank == 0: + print("Start 1b string representation") +vec_x1_str = str(vec_x1) +vec_x2_str = str(vec_x2) +if rank == 0: + print(f"vec_x1 values: {vec_x1_str}") + print(f"vec_x2 values: {vec_x2_str}") + print("End 1b\n") +comm.Barrier() + +### 1c shape and transpose +if rank == 0: + print("Start 1c shape and transpose") +# shape property of the vector +vec_x1_shape = vec_x1.shape() +vec_x2_T_shape = vec_x2.T().shape() +vec_x2_T_T_shape = vec_x2.T().T().shape() +if rank == 0: + print(f"vec_x1 has shape {vec_x1_shape} | must be ({n_x1},1)") # Hint: numpy.reshape + # transposition (property) of the vector, only "cosmetic" change + print(f"vec_x2.T() has shape {vec_x2_T_shape} | must be (1,{n_x2})") + print(f"vec_x2.T().T() has shape {vec_x2_T_T_shape} | must be ({n_x2},1)") + print("End 1c\n") +comm.Barrier() + +### 1d addition and subtraction +if rank == 0: + print("Start 1d addition and subtraction ") +# initialization +n_a = 10 +n_b = 10 +a_list = [i0 for i0 in range(n_a)] +b_list = [1 / (i0 + 1) for i0 in range(n_b)] +a = VectorMPI(a_list) +b = VectorMPI(b_list) +# computation +x = a + b +y = a - b +numpy_x_compare_norm = (x - VectorMPI(np.array(a_list) + np.array(b_list))).norm() +numpy_y_compare_norm = (y - VectorMPI(np.array(a_list) - np.array(b_list))).norm() +if rank == 0: + print(f"norm(a + b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") + print(f"norm(a - b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") +try: + x_false = a + b.T() +except ValueError as e: + if rank == 0: + print("The correct result is this error: " + str(e)) +else: + if rank == 0: + print("ERROR: It is required to raise a system error, e. g., ValueError, since dimensions mismatch!") +try: + x_false = a - b.T() +except ValueError as e: + if rank == 0: + print("The correct result is this error: " + str(e)) +else: + if rank == 0: + print("ERROR:It is required to raise a system error, e. g., ValueError, since dimensions mismatch!") +if rank == 0: + print("End 1d\n") +comm.Barrier() + +### 1e multiplication +if rank == 0: + print("Start 1e multiplication") +# initialization +n_a = 10 +n_b = 10 +a_list = [i0 for i0 in range(n_a)] +b_list = [1 / (i0 + 1) for i0 in range(n_b)] +a = VectorMPI(a_list) +b = VectorMPI(b_list) +# computation with vectors +x = a.T() * b # scalar +y = a * b # vector +numpy_x_compare_norm = np.linalg.norm(x - np.sum(np.array(a_list) * np.array(b_list))) +numpy_y_compare_norm = (y - VectorMPI(np.array(a_list) * np.array(b_list))).norm() +# numpy_z_compare_norm = (z - MatrixMPI(comm,np.outer(np.array(a_list),np.array(b_list)))).norm() +if rank == 0: + print(f"norm(a.T() * b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") + print(f"norm(a * b - numpy) = {numpy_y_compare_norm} | must be < 1e-8") +# print(f"norm(b * a.T() - numpy) = \n{numpy_z_compare_norm} | must be 1e-8") +# computation with scalars +x = a * 5 +y = 0.1 * b.T() +numpy_x_compare_norm = (x - VectorMPI(np.array(a_list) * 5)).norm() +numpy_y_compare_norm = (y - VectorMPI(np.array(b_list) * 0.1).T()).norm() +if rank == 0: + print(f"norm(a * 5 - numpy) = {numpy_x_compare_norm} | must be < 1e-8") + print(f"norm(0.1 * b.T() - numpy) = {numpy_y_compare_norm} | must be < 1e-8") + print("End 1e\n") +comm.Barrier() + +### 1f division +if rank == 0: + print("Start 1f division") +# initialization +n_a = 10 +n_b = 10 +a_list = [i0 for i0 in range(n_a)] +b_list = [1 / (i0 + 1) for i0 in range(n_b)] +a = VectorMPI(a_list) +b = VectorMPI(b_list) +# computation with vectors +x = a / b +y = a / 5 +numpy_x_compare_norm = (x - VectorMPI(np.array(a_list) / np.array(b_list))).norm() +numpy_y_compare_norm = (y - VectorMPI(np.array(a_list) / 5)).norm() +if rank == 0: + print(f"norm(a / b - numpy) = {numpy_x_compare_norm} | must be < 1e-8") + print(f"norm(a / 5 - numpy) = {numpy_y_compare_norm} | must be < 1e-8") + print("End 1f\n") +comm.Barrier() + +### 1g norm +if rank == 0: + print("Start 1g norm") +# initialization +a_list = [1 / (i0 + 1) for i0 in range(10)] +a = VectorMPI(a_list) +# computation +a_norm = a.norm() +a_normalized = a.normalize() +a_normalized_str = str(a_normalized) +numpy_comparison_norm = (a_normalized - VectorMPI(np.array(a_list) / np.linalg.norm(a_list))).norm() +if rank == 0: + print(f"a_norm = {a_norm} | must be {np.linalg.norm(a_list)}") + print(f"norm(a_normalize-np.a_normalize) = {numpy_comparison_norm} | must be < 1e-8") + print("End 1g\n") +comm.Barrier() + +### 1h negation +if rank == 0: + print("Start 1h negation") +# initialization +a_list = [1 / (i0 + 1) for i0 in range(10)] +a = VectorMPI(a_list) +# computation +x = -a +x_str = str(x) +if rank == 0: + print(f"-a = {x_str} | must be {-np.array(a_list)}") + print("End 1h\n") +comm.Barrier() + +### 1i manipulation +if rank == 0: + print("Start 1i manipulation") +# initialization +n_a = 10 +a_list = [1 / (i0 + 1) for i0 in range(n_a)] +a = VectorMPI(a_list) +a_idx = [1, 2, 9, 7, 8] +a_values = a[a_idx] +if rank == 0: + print( + f"a[{str(a_idx)}] = {str(a_values)} | must be {np.array(a_list).reshape(n_a, 1)[np.array(a_idx)].reshape(len(a_idx), )}") +a[a_idx] = [-1, -1, -1, -1, -1] +a_str = str(a) +np_a = np.array(a_list) +np_a[a_idx] = [-1, -1, -1, -1, -1] +if rank == 0: + print(f"a = {a_str} | must be {np_a}") + print("End 1i\n") +comm.Barrier() + +############### +# Testing the matrix class +comm.Barrier() +if rank == 0: + print("\n\nTesting the matrix class -----------------------------------\n\n") +############### + +### 2a Initialization +if rank == 0: + print("Start 2a initialization") +n_a1 = 10 +n_a2 = 5 +a_list = np.array([[(i0 + 1) * (i1 + 1) for i0 in range(n_a1)] for i1 in range(n_a2)]) +A = MatrixMPI(a_list) +B = MatrixMPI(structure="tridiagonal", data=[-1, 2, -1], n=12) +c_list = [i0 for i0 in range(n_a1 * n_a1)] +C = MatrixMPI(c_list, shape=(n_a1, n_a1)) +D = MatrixMPI(structure="tridiagonal", data=[-1, 2, -1], n=50) +# D = MatrixMPI(model="sheet1ex1", n=50) +if rank == 0: + print("End 2a\n") +comm.Barrier() + +### 2b __str__ function, string representation +if rank == 0: + print("Start 2b string representation") +# print(B.__str__(full = True)) +A_str = str(A) +B_str = str(B) +C_str = str(C) +D_str = str(D) +if rank == 0: + print(f"Matrix A (numbers):\n{A_str}") + print(f"Matrix B (tridiagonal):\n{B_str}") + print(f"Matrix C (list of numbers):\n{C_str}") + print(f"Matrix D (sheet1ex1):\n{D_str}") + print("End 2b\n") +comm.Barrier() + +### 2c shape and transpose +if rank == 0: + print("Start 2c shape and transpose") +# Initialization +A_shape = A.shape() +A_T_shape = A.T().shape() +A_T_T_shape = A.T().T().shape() +if rank == 0: + print(f"A has shape {A_shape} | must be {np.array(a_list).shape}") + print(f"A.T() has shape {A_T_shape} | must be {np.array(a_list).T.shape}") + print(f"A.T().T() has shape {A_T_T_shape} | must be {np.array(a_list).T.T.shape}") + print("End 2c\n") +comm.Barrier() + +# ### DEBUG norms ### +# mat = np.array([[(i0+1)*(i1+1) for i0 in range(10)] for i1 in range(5)]) +# local_max_each_row = np.amax(np.abs(mat),axis=1) +# if rank == 0: +# print(f"Matrix\n{str(mat)}\nlocal max each row {str(local_max_each_row)}") +# # frobenius norm +# A = MatrixMPI(comm,mat) +# a_norm_fro = A.norm("frobenius") +# if rank == 0: +# print(f"A.norm('frobenius') = {a_norm_fro} | must be {np.linalg.norm(np.array([[(i0+1)*(i1+1) for i0 in range(10)] for i1 in range(5)]),'fro')}") +# # row sum norm +# a_norm_row = A.norm("row sum") +# if rank == 0: +# print(f"A.norm('row sum') = {a_norm_row} | must be {np.max(local_max_each_row)}") +# # col sum norm + + +### 2d addition and subtraction +if rank == 0: + print("Start 2d addition and subtraction ") +# Initialization +n = 10 +A = MatrixMPI(structure="diagonal", data=[3], offset=0, n=n) +A21 = MatrixMPI(structure="diagonal", data=[-1], offset=-1, n=n) +A12 = MatrixMPI(structure="diagonal", data=[-1], offset=+1, n=n) +B = MatrixMPI(structure="diagonal", data=[1], offset=0, n=n) +# computation +C = A + A21 + A12 - B +D = C - MatrixMPI(structure='tridiagonal', data=[-1, 2, -1], n=n) +d_norm = D.norm() +A_str = str(5 + A - 3) +if rank == 0: + print(f"norm(A + A21 + A12 - B - tridiag) = {d_norm} | must be < 1e-8") + print(f"5+A-3 = \n{A_str}") + print("End 2d\n") +comm.Barrier() + +### 2e multiplication +if rank == 0: + print("Start 2e multiplication") +# initialization +n = 10 +a_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(8)] for i0 in range(n)]) +b_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(n)] for i0 in range(8)]) +c_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(n)] for i0 in range(n)]) +d_mat = np.array([[(i0 + 1) / (i1 + 1) for i1 in range(17)] for i0 in range(n)]) +A = MatrixMPI(a_mat) +B = MatrixMPI(b_mat) +C = MatrixMPI(c_mat) +D = MatrixMPI(d_mat) +x_vec = np.array([i0 + 1 for i0 in range(n)]) +y_vec = np.array([n * (i0 + 1) for i0 in range(n)]) +x = VectorMPI(x_vec) +y = VectorMPI(y_vec) +# computation matrix scalar +norm5 = (5 * A - MatrixMPI(5 * np.array(a_mat))).norm() +# computation matrix vector +norm6 = (C * x - VectorMPI(np.array(c_mat) @ np.array(x_vec))).norm() +norm7 = (D.T() * x - VectorMPI(np.array(d_mat).T @ np.array(x_vec))).norm() +y_shape = (D.T() * x).shape() +if rank == 0: + print(f"Norm of (5*A - np.(5*A)) is {norm5} | must be < 1e-8") + print(f"Norm of (C*x - np.(C*x)) is {norm6} | must be < 1e-8") + print(f"Norm of (D.T*x - np.(D.T*x)) is {norm7} | must be < 1e-8 | shape(D.T*x) is {y_shape} | must be (17,1)") +# computation matrix multiplication +A_str = str(A) +B_str = str(B) +if rank == 0: + print(f"DEBUG: A\n{A_str}\nDEBUG: B\n{B_str}") +norm1 = (A * B - MatrixMPI(np.array(a_mat) @ np.array(b_mat))).norm() +norm2 = (A.T() * A - MatrixMPI(np.array(a_mat).T @ np.array(a_mat))).norm() +norm3 = (A * A.T() - MatrixMPI(np.array(a_mat) @ np.array(a_mat).T)).norm() +norm4 = (B.T() * A.T() - MatrixMPI(np.array(b_mat).T @ np.array(a_mat).T)).norm() +if rank == 0: + print(f"Norm of (A*B - np.(A*B)) is {norm1} | must be < 1e-8") + print(f"Norm of (A.T()*A - np(A.T()*A)) is {norm2} | must be < 1e-8") + print(f"Norm of (A*A.T() - np(A*A.T())) is {norm3} | must be < 1e-8") + print(f"Norm of (B.T()*A.T() - np.(B.T()*A.T())) is {norm4} | must be < 1e-8") + print("End 2e\n") +comm.Barrier() + +### 2f division +if rank == 0: + print("Start 2f division") +# initialization +A = MatrixMPI(a_mat) +# computation +print(f"Norm of (A/5 - np.(A/5)) is {(A / 5 - MatrixMPI(np.array(a_mat) / 5)).norm()} | must be < 1e-8") +if rank == 0: + print("End 2f\n") +comm.Barrier() + +### 2g norm +if rank == 0: + print("Start 2g norm") +A = MatrixMPI(structure="tridiagonal", n=50, data=[-1, 2, -1]) +print(f"Frobenius norm of tridiagonal matrix: {A.norm('frobenius')} | must be 17.263") +print(f"Row sum norm of tridiagonal matrix: {A.norm('row sum')} | must be 4") +print(f"Col sum norm of tridiagonal matrix: {A.norm('col sum')} | must be 4") +if rank == 0: + print("End 2g\n") +comm.Barrier() + +### 2h negation +if rank == 0: + print("Start 2h negation") +A = MatrixMPI(structure="tridiagonal", n=50, data=[-1, 2, 1]) +print(f"Norm of (A + (-A)) is {(A + (-A)).norm('frobenius')} | must be < 1e-8") +if rank == 0: + print("End 2h\n") +comm.Barrier() + +### 2i manipulation +if rank == 0: + print("Start 2i manipulation") +A = MatrixMPI(structure="tridiagonal", n=10, data=[-1, 2, 1]) +A[1, 1] = 4 +A[[1, 2, 3], 2] = [-5, -10, 100] +print(str(A)) +if rank == 0: + print("End 2i\n") +comm.Barrier() diff --git a/test/test_vector.py b/test/test_vector.py index b31f9b0..1bb07b8 100644 --- a/test/test_vector.py +++ b/test/test_vector.py @@ -1,5 +1,6 @@ from unittest import TestCase +from matrix import Matrix from vector import Vector @@ -169,6 +170,24 @@ class TestVector(TestCase): self.assertEqual(expected, actual) + def test_should_mul_matrix_vector(self): + m = Matrix([1, 2, 3, 4, 5, 6], (2, 3)) + v = Vector([1, 2, 3]) + + expected = Vector([14, 32]) + actual = m * v + + self.assertEqual(expected, actual) + + def test_should_mul_matrix_vector_2(self): + m = Matrix([1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4], (3, 4)) + v = Vector([1, 2, 3, 4]) + + expected = Vector([30, 30, 30]) + actual = m * v + + self.assertEqual(expected, actual) + def test_should_raise_value_missmatch_error_while_mul_vectors(self): v1 = Vector([1, 2]) v2 = '0'