More work on parallel stuff
This commit is contained in:
		
							
								
								
									
										111
									
								
								src/cg.py
									
									
									
									
									
								
							
							
						
						
									
										111
									
								
								src/cg.py
									
									
									
									
									
								
							| @@ -1,91 +1,58 @@ | ||||
| import matplotlib.pyplot as plt | ||||
| import numpy as np | ||||
| from mpi4py import MPI | ||||
|  | ||||
| from matrix_mpi import MatrixMPI as Matrix | ||||
| from vector_mpi import VectorMPI as Vector | ||||
|  | ||||
| comm = MPI.COMM_WORLD | ||||
| size = comm.Get_size() | ||||
| rank = comm.Get_rank() | ||||
|  | ||||
|  | ||||
| # Funktion zur Berechnung der 2-Norm | ||||
| def norm(vec): | ||||
|     sum_of_squares = 0 | ||||
|     for i in range(0, len(vec)): | ||||
|         sum_of_squares = sum_of_squares + vec[i] ** 2 | ||||
|  | ||||
|     return np.sqrt(sum_of_squares) | ||||
|  | ||||
|  | ||||
| # Test, ob die Matrix M positiv definit ist, mittels Cholesky-Zerlegung | ||||
| def ist_positiv_definit(m): | ||||
|     try: | ||||
|         np.linalg.cholesky(m) | ||||
|         return True | ||||
|     except np.linalg.LinAlgError: | ||||
|         return False | ||||
|  | ||||
|  | ||||
| n = 1000 | ||||
| h = 1 / (n - 1) | ||||
|  | ||||
| # Initialisierung der Matrix A und des Vektor f für LGS Au = f | ||||
| A = np.diag(-1 * np.ones(n - 1), k=1) + np.diag(2 * np.ones(n), k=0) + np.diag(-1 * np.ones(n - 1), k=-1) | ||||
| f = h ** 2 * 2 * np.ones(n) | ||||
|  | ||||
| # Teste, ob A positiv definitv ist mittels Eigenwerten | ||||
| if np.all(np.linalg.eigvals(A) <= 0):  # Prüfen, ob alle Eigenwerte positiv sind | ||||
|     raise ValueError("A ist nicht positiv definit.") | ||||
|  | ||||
| # Teste, ob A positiv definit ist mittels Cholesky-Zerlegung | ||||
| if not (ist_positiv_definit(A)): | ||||
|     raise ValueError("A ist nicht positiv definit.") | ||||
|  | ||||
| # Teste, ob A symmetrisch ist | ||||
| if (A != A.T).all(): | ||||
|     raise ValueError("A ist nicht symmetrisch.") | ||||
|  | ||||
| def cg(n: int, A: Matrix, f: Vector, tol: float): | ||||
|     # Intialisierung des Startvektors x | ||||
| x = np.ones(n) | ||||
|     x = Vector([1] * n) | ||||
|  | ||||
| # Toleranz epsilon | ||||
| eps = 0.001 | ||||
|  | ||||
| count = 1 | ||||
|     # Anzahl der Schritte | ||||
|     count = 0 | ||||
|  | ||||
|     # Anfangswerte berechnen | ||||
| r = f - A @ x  # Anfangsresiduum | ||||
|     r = f - A * x  # Anfangsresiduum | ||||
|     p = r  # Anfangsabstiegsrichtung | ||||
|  | ||||
| print("0. Iterationsschritt: \n") | ||||
| print("Startvektor:", x) | ||||
| print("Norm des Anfangsresiduums: ", norm(r)) | ||||
|     while r.norm() > tol and count < 1000: | ||||
|         print(f"{count}. Iterationsschritt:\n") | ||||
|         # print("Iterierte:", x) | ||||
|         # print("Residuumsnorm: ", r.norm()) | ||||
|  | ||||
| while eps < norm(r) < 1000: | ||||
|     z = A @ p  # Matrix-Vektorprodukt berechnen und speichern | ||||
|         z = A * p  # Matrix-Vektorprodukt berechnen und speichern | ||||
|  | ||||
|     alpha = (np.dot(r, p)) / (np.dot(p, z)) | ||||
|     print(alpha) | ||||
|         # Minimiere phi in Richung p um neue Iterierte x zu finden | ||||
|         alpha = (r.T() * p) / (p.T() * z)  # (np.dot(r , p)) / (np.dot(p , z)) | ||||
|         # print(alpha) | ||||
|  | ||||
|         x = x + alpha * p  # neue Itterierte x | ||||
|         r = r - alpha * z  # neues Residuum | ||||
|  | ||||
|         # Bestimmung der neuen Suchrichtung | ||||
|     beta = - (np.dot(r, z)) / (np.dot(p, z)) | ||||
|         beta = - (r.T() * z) / (p.T() * z)  # (np.dot(r , z)) / (np.dot(p , z)) | ||||
|         p = r + beta * p  # neue konjugierte Abstiegsrichtung | ||||
|  | ||||
|     print(count, ". Iterationsschritt: \n") | ||||
|     print("Aktuelle Iterierte:", x) | ||||
|     print("Norm des Residuums: ", norm(r)) | ||||
|  | ||||
|         count = count + 1 | ||||
|  | ||||
| # Vergleich mit numpy-interner Lsg | ||||
| u = np.linalg.solve(A, f) | ||||
|  | ||||
| print("Lösung mit CG-Verfahren:", x) | ||||
| print("Numpy interne Lösung:", u) | ||||
|  | ||||
| if norm(u - x) > eps: | ||||
|     print("Der CG-Algorithmus hat nicht richtig funktioniert!") | ||||
| else: | ||||
|     print("Der CG-Algorithmus war erfolgreich.") | ||||
|  | ||||
| plt.plot(x, linewidth=2) | ||||
| plt.plot(u, linewidth=2) | ||||
|  | ||||
| plt.show() | ||||
|     print(f"{rank} APFELSTRUDEL") | ||||
|     # if rank == 0: | ||||
|     #     # Vergleich mit numpy-interner Lsg | ||||
|     #     u = np.linalg.solve(np.array(A.get_data()), np.array(f.get_data())) | ||||
|     # | ||||
|     #     print("Lösung mit CG-Verfahren:", x) | ||||
|     #     print("Numpy interne Lösung:", u) | ||||
|     # | ||||
|     #     if (Vector(u) - x).norm() > eps: | ||||
|     #         print("Der CG-Algorithmus hat nicht richtig funktioniert!") | ||||
|     #     else: | ||||
|     #         print("Der CG-Algorithmus war erfolgreich.") | ||||
|     # | ||||
|     #     plt.plot(x.get_data(), linewidth=2) | ||||
|     #     plt.plot(u, linewidth=2) | ||||
|     # | ||||
|     #     plt.show() | ||||
|   | ||||
							
								
								
									
										32
									
								
								src/main.py
									
									
									
									
									
								
							
							
						
						
									
										32
									
								
								src/main.py
									
									
									
									
									
								
							| @@ -1,26 +1,18 @@ | ||||
| # m1 = MatrixMPI(numpy.random.uniform(0, 1, 1_000_000), (1000, 1000)) | ||||
| import numpy as np | ||||
|  | ||||
| from mpi4py import MPI | ||||
| from matrix_mpi import MatrixMPI | ||||
| from vector_mpi import VectorMPI | ||||
| import cg | ||||
|  | ||||
| comm = MPI.COMM_WORLD | ||||
| rank = comm.Get_rank() | ||||
| size = comm.Get_size() | ||||
| from matrix_mpi import MatrixMPI as Matrix | ||||
| from vector_mpi import VectorMPI as Vector | ||||
|  | ||||
| m1 = MatrixMPI(list(range(1, 21)), (4, 5)) | ||||
| m2 = MatrixMPI(list(range(1, 16)), (5, 3)) | ||||
| n = 1_00 | ||||
| h = 1 / (n - 1) | ||||
|  | ||||
| m_mul = m1 * m2 | ||||
| # Initialisierung der Matrix A und des Vektor f für LGS Au = f | ||||
| A = Matrix(np.diag(-1 * np.ones(n - 1), k=1) + np.diag(2 * np.ones(n), k=0) + np.diag(-1 * np.ones(n - 1), k=-1)) | ||||
| f = Vector([h ** 2 * 2] * n) | ||||
|  | ||||
| v1 = VectorMPI(list(range(1, 21))) | ||||
| v2 = VectorMPI(list(reversed(list(range(1, 21))))) | ||||
| # Toleranz epsilon | ||||
| tol = 0.001 | ||||
|  | ||||
| v_add = v1 + v2 | ||||
| v_mul = v1.T() * v2 | ||||
|  | ||||
| if rank == 0: | ||||
|     print(m_mul) | ||||
|     print("---") | ||||
|     print(v_add) | ||||
|     print(v_mul) | ||||
| cg.cg(n, A, f, tol) | ||||
|   | ||||
| @@ -98,7 +98,7 @@ class Matrix: | ||||
|         flattened_data = [] | ||||
|         rows = 0 | ||||
|         for matrix in matrices: | ||||
|             flattened_data.extend(matrix.get_data()) | ||||
|             flattened_data.extend(matrix.get_matrix()) | ||||
|             rows += matrix.__shape__[0] | ||||
|         cols = matrices[0].__shape__[1] | ||||
|         return Matrix(flattened_data, (rows, cols)) | ||||
|   | ||||
| @@ -34,9 +34,20 @@ class MatrixMPI: | ||||
|         cols = self.__data__.shape()[1] | ||||
|         return Matrix(self.__data__[self.__chunk__], (rows, cols)) | ||||
|  | ||||
|     def get_data(self): | ||||
|     def get_matrix(self): | ||||
|         """ | ||||
|         Returns the ``Matrix`` that is used internally | ||||
|         :return: The ``Matrix`` that is used internally | ||||
|         """ | ||||
|         return self.__data__ | ||||
|  | ||||
|     def get_data(self): | ||||
|         """ | ||||
|         Returns the raw data of the internal data structure | ||||
|         :return: The raw data of the internal data structure | ||||
|         """ | ||||
|         return self.__data__.get_data() | ||||
|  | ||||
|     def transpose(self): | ||||
|         """ | ||||
|         :return: the transpose of the matrix | ||||
| @@ -91,7 +102,7 @@ class MatrixMPI: | ||||
|  | ||||
|     def __mul__(self, other): | ||||
|         if isinstance(other, MatrixMPI): | ||||
|             other = other.get_data() | ||||
|             other = other.get_matrix() | ||||
|         gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() * other) | ||||
|         data = self.__mpi_comm__.bcast(gathered_data) | ||||
|         return MatrixMPI.of(Matrix.flatten(data)) | ||||
|   | ||||
| @@ -13,8 +13,19 @@ class VectorMPI(MatrixMPI): | ||||
|         return VectorMPI(vector.get_data(), vector.shape()) | ||||
|  | ||||
|     def get_vector(self): | ||||
|         """ | ||||
|         Returns the ``Vector`` that is used internally | ||||
|         :return: The ``Vector`` that is used internally | ||||
|         """ | ||||
|         return self.__data__ | ||||
|  | ||||
|     def get_data(self): | ||||
|         """ | ||||
|         Returns the raw data of the internal data structure | ||||
|         :return: The raw data of the internal data structure | ||||
|         """ | ||||
|         return self.__data__.get_data() | ||||
|  | ||||
|     def shape(self): | ||||
|         return self.__data__.shape() | ||||
|  | ||||
| @@ -55,7 +66,7 @@ class VectorMPI(MatrixMPI): | ||||
|  | ||||
|     def __rmul__(self, other): | ||||
|         if isinstance(other, MatrixMPI): | ||||
|             return VectorMPI.of(other.get_data() * self.get_vector()) | ||||
|             return VectorMPI.of(other.get_matrix() * self.get_vector()) | ||||
|         return self * other | ||||
|  | ||||
|     def __truediv__(self, other): | ||||
|   | ||||
		Reference in New Issue
	
	Block a user