Compare commits
No commits in common. "0fc5b61844e67e2b8afdeaf633ad55a075f4a1fa" and "23df9f37f2235d62dafcdcd5e946943df95c53bf" have entirely different histories.
0fc5b61844
...
23df9f37f2
30
README.md
30
README.md
@ -1,32 +1,6 @@
|
||||
# Project for PWR
|
||||
|
||||
Project in python for module "Praktikum Wissenschaftliches Rechnen" in "Applied Mathematics" at *TU Bergakademie Freiberg*.
|
||||
|
||||
# Task
|
||||
Implement MPI parallel Matrix and Vector classes in Python and apply them to a numerical problem / algorithm.
|
||||
1. Diagonalmatrix times vector
|
||||
2. Matrix from exercise 1 times vector
|
||||
3. Conjugated Gradient
|
||||
|
||||
# Structure of mains
|
||||
## Timings in Shell
|
||||
If you want to measure time with `time` and the whole program, then use the `main_[name].py` files.
|
||||
Within the files you can set the size `n` of the matrix and vector.
|
||||
See pbs script [pwr_project.script](./pbs_scripts/pwr_project.script) for automatic use on the cluster.
|
||||
Following pythin scripts are the entry point for the tasks:
|
||||
- [Diagonalmatrix times Vector](./src/main_diag_vec.py): [main_diag_vec.py](./src/main_diag_vec.py)
|
||||
- [Matrix from exercise 1 times Vector](./src/main_matrix_vec.py): [main_matrix_vec.py](./src/main_matrix_vec.py)
|
||||
- [CG](./src/main_cg.py): [main_cg.py](./src/main_cg.py)
|
||||
|
||||
## Specific timings
|
||||
If you want to measure only the times a operation needs (without initializing),
|
||||
then use the `main_[name]_timeit.py` files.
|
||||
The matrix/vector size `n` must be provided via command line,
|
||||
e.g. `python3 main_cg.py 100` for the CG with a matrix size of `100`.
|
||||
See pbs script [pwr_project_timeit.script](./pbs_scripts/pwr_project_timeit.script) for automatic use on the cluster.
|
||||
Following pythin scripts are the entry point for the tasks:
|
||||
- [Diagonalmatrix times Vector](./src/main_diag_vec_timeit.py): [main_diag_vec.py](./src/main_diag_vec_timeit.py)
|
||||
- [Matrix from exercise 1 times Vector](./src/main_matrix_vec_timeit.py): [main_matrix_vec.py](./src/main_matrix_vec_timeit.py)
|
||||
- [CG](./src/main_cg_timeit.py): [main_cg.py](./src/main_cg_timeit.py)
|
||||
|
||||
## Weak scaling
|
||||
For the weak scaling measurements use pbs script [pwr_project_timeit_weak.script](./pbs_scripts/pwr_project_timeit_weak.script).
|
||||
Implement MPI parallel Matrix and Vector classes in Python and apply them to a numerical problem / algorithm.
|
@ -1,63 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
## parameters
|
||||
#PBS -N pwr_project
|
||||
#PBS -q entry_teachingq
|
||||
#PBS -l select=1:ncpus=32:mpiprocs=32:mem=512gb
|
||||
#PBS -o pwr_project_log.out
|
||||
#PBS -e pwr_project_log.err
|
||||
|
||||
module load python/gcc
|
||||
module load openmpi/gcc
|
||||
|
||||
## environment
|
||||
cd ~/pwr_project
|
||||
|
||||
## delete previous runs
|
||||
rm pwr_project_log.*
|
||||
|
||||
## execute Diag-Vec
|
||||
echo "---------------------" > times_diag_vec.txt
|
||||
echo "Durchlauf `date`:" >> times_diag_vec.txt
|
||||
|
||||
echo "Invoke with size of 8:" >> times_diag_vec.txt
|
||||
{ time mpiexec -n 8 python3 ./src/main_diag_vec.py ; } 2>> times_diag_vec.txt
|
||||
|
||||
echo "Invoke with size of 16:" >> times_diag_vec.txt
|
||||
{ time mpiexec -n 16 python3 ./src/main_diag_vec.py ; } 2>> times_diag_vec.txt
|
||||
|
||||
echo "Invoke with size of 32:" >> times_diag_vec.txt
|
||||
{ time mpiexec -n 32 python3 ./src/main_diag_vec.py ; } 2>> times_diag_vec.txt
|
||||
|
||||
echo "---------------------" >> times_diag_vec.txt
|
||||
|
||||
## execute Matrix-Vector
|
||||
echo "---------------------" > times_matrix_vec.txt
|
||||
echo "Durchlauf `date`:" >> times_matrix_vec.txt
|
||||
|
||||
echo "Invoke with size of 8:" >> times_matrix_vec.txt
|
||||
{ time mpiexec -n 8 python3 ./src/main_matrix_vec.py ; } 2>> times_matrix_vec.txt
|
||||
|
||||
echo "Invoke with size of 16:" >> times_matrix_vec.txt
|
||||
{ time mpiexec -n 16 python3 ./src/main_matrix_vec.py ; } 2>> times_matrix_vec.txt
|
||||
|
||||
echo "Invoke with size of 32:" >> times_matrix_vec.txt
|
||||
{ time mpiexec -n 32 python3 ./src/main_matrix_vec.py ; } 2>> times_matrix_vec.txt
|
||||
|
||||
echo "---------------------" >> times_matrix_vec.txt
|
||||
|
||||
## execute CG
|
||||
echo "---------------------" > times_cg.txt
|
||||
echo "Durchlauf `date`:" >> times_cg.txt
|
||||
|
||||
echo "Invoke with size of 8:" >> times_cg.txt
|
||||
{ time mpiexec -n 8 python3 ./src/main_cg.py ; } 2>> times_cg.txt
|
||||
|
||||
echo "Invoke with size of 16:" >> times_cg.txt
|
||||
{ time mpiexec -n 16 python3 ./src/main_cg.py ; } 2>> times_cg.txt
|
||||
|
||||
echo "Invoke with size of 32:" >> times_cg.txt
|
||||
{ time mpiexec -n 32 python3 ./src/main_cg.py ; } 2>> times_cg.txt
|
||||
|
||||
echo "---------------------" >> times_cg.txt
|
||||
echo "" >> times_cg.txt
|
@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
## parameters
|
||||
#PBS -N pwr_project_timeit
|
||||
#PBS -q entry_teachingq
|
||||
#PBS -l select=1:ncpus=32:mpiprocs=32:mem=512gb
|
||||
#PBS -o pwr_project_timeit_log.out
|
||||
#PBS -e pwr_project_timeit_log.err
|
||||
|
||||
module load python/gcc
|
||||
module load openmpi/gcc
|
||||
|
||||
## environment
|
||||
cd ~/pwr_project
|
||||
|
||||
## delete previous runs
|
||||
rm pwr_project_timeit_log.*
|
||||
|
||||
N=10000
|
||||
|
||||
## execute Diag-Vec
|
||||
echo "Diag-Vec"
|
||||
mpiexec -n 8 python3 ./src/main_diag_vec_timeit.py $N
|
||||
mpiexec -n 16 python3 ./src/main_diag_vec_timeit.py $N
|
||||
mpiexec -n 32 python3 ./src/main_diag_vec_timeit.py $N
|
||||
|
||||
## execute Matrix-Vector
|
||||
echo "Matrix-Vec"
|
||||
mpiexec -n 8 python3 ./src/main_matrix_vec_timeit.py $N
|
||||
mpiexec -n 16 python3 ./src/main_matrix_vec_timeit.py $N
|
||||
mpiexec -n 32 python3 ./src/main_matrix_vec_timeit.py $N
|
||||
|
||||
## execute CG
|
||||
echo "CG"
|
||||
mpiexec -n 8 python3 ./src/main_cg_timeit.py $(expr $N / 10)
|
||||
mpiexec -n 16 python3 ./src/main_cg_timeit.py $(expr $N / 10)
|
||||
mpiexec -n 32 python3 ./src/main_cg_timeit.py $(expr $N / 10)
|
@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
## parameters
|
||||
#PBS -N pwr_project_timeit_weak
|
||||
#PBS -q entry_teachingq
|
||||
#PBS -l select=1:ncpus=32:mpiprocs=32:mem=512gb
|
||||
#PBS -o pwr_project_timeit_weak_log.out
|
||||
#PBS -e pwr_project_timeit_weak_log.err
|
||||
|
||||
module load python/gcc
|
||||
module load openmpi/gcc
|
||||
|
||||
## environment
|
||||
cd ~/pwr_project
|
||||
|
||||
## delete previous runs
|
||||
rm pwr_project_timeit_weak_log.*
|
||||
|
||||
N=10000
|
||||
DN=$(expr 2 \* $N)
|
||||
|
||||
## execute Diag-Vec
|
||||
echo "Diag-Vec"
|
||||
mpiexec -n 8 python3 ./src/main_diag_vec_timeit.py $N
|
||||
mpiexec -n 32 python3 ./src/main_diag_vec_timeit.py $DN
|
||||
|
||||
## execute Matrix-Vector
|
||||
echo "Matrix-Vec"
|
||||
mpiexec -n 8 python3 ./src/main_matrix_vec_timeit.py $N
|
||||
mpiexec -n 32 python3 ./src/main_matrix_vec_timeit.py $DN
|
||||
|
||||
|
||||
## execute CG
|
||||
echo "CG"
|
||||
mpiexec -n 8 python3 ./src/main_cg_timeit.py $(expr $N / 10)
|
||||
mpiexec -n 32 python3 ./src/main_cg_timeit.py $(expr $DN / 10)
|
73
src/cg.py
73
src/cg.py
@ -1,35 +1,58 @@
|
||||
from mpi4py import MPI
|
||||
|
||||
from matrix_mpi import MatrixMPI as Matrix
|
||||
from vector_mpi import VectorMPI as Vector
|
||||
|
||||
# from matrix import Matrix
|
||||
# from vector import Vector
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
|
||||
def cg(A: Matrix, x0: Vector, b: Vector, tolerance: float = 1e-3, max_iterations: int = 1_000):
|
||||
"""
|
||||
Solves a system of linear equations of the form Ax = b numerically.
|
||||
|
||||
:param A: The transformation matrix A
|
||||
:param x0: A vector to start the algorithm with
|
||||
:param b: The solution vector of the system of linear equations, the right hand side
|
||||
:param tolerance: The tolerance at which to stop the algorithm, default is 0.001
|
||||
:param max_iterations: Maximum number of iterations, default is 1000
|
||||
"""
|
||||
iterations = 0
|
||||
def cg(n: int, A: Matrix, f: Vector, tol: float):
|
||||
# Intialisierung des Startvektors x
|
||||
x = Vector([1] * n)
|
||||
|
||||
x = x0
|
||||
r = b - A * x
|
||||
d = r
|
||||
# Anzahl der Schritte
|
||||
count = 0
|
||||
|
||||
while r.norm() >= tolerance and iterations < max_iterations:
|
||||
z = A * d
|
||||
# Anfangswerte berechnen
|
||||
r = f - A * x # Anfangsresiduum
|
||||
p = r # Anfangsabstiegsrichtung
|
||||
|
||||
alpha = (r.T() * d) / (d.T() * z)
|
||||
x = x + alpha * d
|
||||
r = r - alpha * z
|
||||
while r.norm() > tol and count < 1000:
|
||||
print(f"{count}. Iterationsschritt:\n")
|
||||
# print("Iterierte:", x)
|
||||
# print("Residuumsnorm: ", r.norm())
|
||||
|
||||
beta = -(r.T() * z) / (d.T() * z)
|
||||
d = r + beta * d
|
||||
z = A * p # Matrix-Vektorprodukt berechnen und speichern
|
||||
|
||||
iterations = iterations + 1
|
||||
return x
|
||||
# Minimiere phi in Richung p um neue Iterierte x zu finden
|
||||
alpha = (r.T() * p) / (p.T() * z) # (np.dot(r , p)) / (np.dot(p , z))
|
||||
# print(alpha)
|
||||
|
||||
x = x + alpha * p # neue Itterierte x
|
||||
r = r - alpha * z # neues Residuum
|
||||
|
||||
# Bestimmung der neuen Suchrichtung
|
||||
beta = - (r.T() * z) / (p.T() * z) # (np.dot(r , z)) / (np.dot(p , z))
|
||||
p = r + beta * p # neue konjugierte Abstiegsrichtung
|
||||
|
||||
count = count + 1
|
||||
|
||||
print(f"{rank} APFELSTRUDEL")
|
||||
# if rank == 0:
|
||||
# # Vergleich mit numpy-interner Lsg
|
||||
# u = np.linalg.solve(np.array(A.get_data()), np.array(f.get_data()))
|
||||
#
|
||||
# print("Lösung mit CG-Verfahren:", x)
|
||||
# print("Numpy interne Lösung:", u)
|
||||
#
|
||||
# if (Vector(u) - x).norm() > eps:
|
||||
# print("Der CG-Algorithmus hat nicht richtig funktioniert!")
|
||||
# else:
|
||||
# print("Der CG-Algorithmus war erfolgreich.")
|
||||
#
|
||||
# plt.plot(x.get_data(), linewidth=2)
|
||||
# plt.plot(u, linewidth=2)
|
||||
#
|
||||
# plt.show()
|
||||
|
18
src/main.py
Normal file
18
src/main.py
Normal file
@ -0,0 +1,18 @@
|
||||
import numpy as np
|
||||
|
||||
import cg
|
||||
|
||||
from matrix_mpi import MatrixMPI as Matrix
|
||||
from vector_mpi import VectorMPI as Vector
|
||||
|
||||
n = 1_00
|
||||
h = 1 / (n - 1)
|
||||
|
||||
# Initialisierung der Matrix A und des Vektor f für LGS Au = f
|
||||
A = Matrix(np.diag(-1 * np.ones(n - 1), k=1) + np.diag(2 * np.ones(n), k=0) + np.diag(-1 * np.ones(n - 1), k=-1))
|
||||
f = Vector([h ** 2 * 2] * n)
|
||||
|
||||
# Toleranz epsilon
|
||||
tol = 0.001
|
||||
|
||||
cg.cg(n, A, f, tol)
|
@ -1,25 +0,0 @@
|
||||
from mpi4py import MPI
|
||||
|
||||
import cg
|
||||
|
||||
from matrix_mpi import MatrixMPI as Matrix
|
||||
from vector_mpi import VectorMPI as Vector
|
||||
|
||||
# from matrix import Matrix
|
||||
# from vector import Vector
|
||||
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
n = 1_000
|
||||
h = 1 / (n - 1)
|
||||
|
||||
A = Matrix([-1, 2, -1], structure="tridiagonal", n=n)
|
||||
x0 = Vector([1] * n)
|
||||
b = Vector([h**2 * 2] * n)
|
||||
|
||||
x = cg.cg(A, x0, b)
|
||||
|
||||
# if rank == 0:
|
||||
# print(f"ranks = {size}: x = {x}")
|
@ -1,27 +0,0 @@
|
||||
from mpi4py import MPI
|
||||
import sys
|
||||
import timeit
|
||||
|
||||
import cg
|
||||
|
||||
from matrix_mpi import MatrixMPI as Matrix
|
||||
from vector_mpi import VectorMPI as Vector
|
||||
|
||||
# from matrix import Matrix
|
||||
# from vector import Vector
|
||||
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
n = int(sys.argv[1])
|
||||
h = 1 / (n - 1)
|
||||
|
||||
A = Matrix([-1, 2, -1], structure="tridiagonal", n=n)
|
||||
x0 = Vector([1] * n)
|
||||
b = Vector([h**2 * 2] * n)
|
||||
|
||||
time = timeit.timeit(lambda: cg.cg(A, x0, b), number=1)
|
||||
|
||||
if rank == 0:
|
||||
print(f"ranks = {size}: time = {time}")
|
@ -1,21 +0,0 @@
|
||||
from mpi4py import MPI
|
||||
|
||||
from matrix_mpi import MatrixMPI as Matrix
|
||||
from vector_mpi import VectorMPI as Vector
|
||||
|
||||
# from matrix import Matrix
|
||||
# from vector import Vector
|
||||
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
n = 10_000
|
||||
|
||||
A = Matrix([3], structure="diagonal", offset=0, n=n)
|
||||
v = Vector([7] * n)
|
||||
|
||||
x = A * v
|
||||
|
||||
# if rank == 0:
|
||||
# print(f"ranks = {size}: x = {x}")
|
@ -1,23 +0,0 @@
|
||||
from mpi4py import MPI
|
||||
import sys
|
||||
import timeit
|
||||
|
||||
from matrix_mpi import MatrixMPI as Matrix
|
||||
from vector_mpi import VectorMPI as Vector
|
||||
|
||||
# from matrix import Matrix
|
||||
# from vector import Vector
|
||||
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
n = int(sys.argv[1])
|
||||
|
||||
A = Matrix([3], structure="diagonal", offset=0, n=n)
|
||||
v = Vector([7] * n)
|
||||
|
||||
time = timeit.timeit(lambda: A * v, number=1)
|
||||
|
||||
if rank == 0:
|
||||
print(f"ranks = {size}: time = {time}s")
|
@ -1,22 +0,0 @@
|
||||
from mpi4py import MPI
|
||||
|
||||
from matrix_mpi import MatrixMPI as Matrix
|
||||
from vector_mpi import VectorMPI as Vector
|
||||
|
||||
# from matrix import Matrix
|
||||
# from vector import Vector
|
||||
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
n = 10_000
|
||||
|
||||
m_data = [(i / k) for i in range(1, n+1) for k in range(1, n+1)]
|
||||
A = Matrix(m_data, (n, n))
|
||||
v = Vector(list(range(1, n+1)))
|
||||
|
||||
x = A * v
|
||||
|
||||
# if rank == 0:
|
||||
# print(f"ranks = {size}: x = {x}")
|
@ -1,24 +0,0 @@
|
||||
from mpi4py import MPI
|
||||
import sys
|
||||
import timeit
|
||||
|
||||
from matrix_mpi import MatrixMPI as Matrix
|
||||
from vector_mpi import VectorMPI as Vector
|
||||
|
||||
# from matrix import Matrix
|
||||
# from vector import Vector
|
||||
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
n = int(sys.argv[1])
|
||||
|
||||
m_data = [(i / k) for i in range(1, n+1) for k in range(1, n+1)]
|
||||
A = Matrix(m_data, (n, n))
|
||||
v = Vector(list(range(1, n+1)))
|
||||
|
||||
time = timeit.timeit(lambda: A * v, number=1)
|
||||
|
||||
if rank == 0:
|
||||
print(f"ranks = {size}: time = {time}s")
|
109
src/matrix.py
109
src/matrix.py
@ -22,6 +22,7 @@ class Matrix:
|
||||
- ``Matrix(list, str, int)``: will create a new square matrix of given size and structure of either \"unity\", \"diagonal\" or \"tridiagonal\"
|
||||
- ``Matrix(str, int)``: will create a new square matrix of given size and TODO
|
||||
|
||||
|
||||
:param data: Either a list or an numpy ndarray
|
||||
:param shape: A tuple containing the amount of rows and columns
|
||||
:param structure: Either \"unity\", \"diagonal\" or \"tridiagonal\"
|
||||
@ -83,13 +84,6 @@ class Matrix:
|
||||
"""
|
||||
return self.__data__
|
||||
|
||||
@staticmethod
|
||||
def flatten_internal(matrices):
|
||||
flattened_data = [element for matrix in matrices for row in matrix.get_data() for element in row]
|
||||
rows = sum(matrix.__shape__[0] for matrix in matrices)
|
||||
cols = matrices[0].__shape__[1]
|
||||
return flattened_data, (rows, cols)
|
||||
|
||||
@staticmethod
|
||||
def flatten(matrices: list):
|
||||
"""
|
||||
@ -101,8 +95,13 @@ class Matrix:
|
||||
:return: A ``Matrix`` extended by all matrices in the list.
|
||||
:rtype: ``Matrix``
|
||||
"""
|
||||
flattened_data, shape = Matrix.flatten_internal(matrices)
|
||||
return Matrix(flattened_data, shape)
|
||||
flattened_data = []
|
||||
rows = 0
|
||||
for matrix in matrices:
|
||||
flattened_data.extend(matrix.get_matrix())
|
||||
rows += matrix.__shape__[0]
|
||||
cols = matrices[0].__shape__[1]
|
||||
return Matrix(flattened_data, (rows, cols))
|
||||
|
||||
def shape(self):
|
||||
"""
|
||||
@ -111,8 +110,13 @@ class Matrix:
|
||||
return self.__shape__
|
||||
|
||||
def __transpose_internal__(self):
|
||||
rows, cols = self.__shape__
|
||||
return [[self.__data__[i][j] for i in range(rows)] for j in range(cols)], (cols, rows)
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
transposed_data = [([0] * rows) for _ in range(cols)]
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
transposed_data[j][i] = self.__data__[i][j]
|
||||
return transposed_data, (cols, rows)
|
||||
|
||||
def transpose(self):
|
||||
"""
|
||||
@ -136,9 +140,6 @@ class Matrix:
|
||||
:param other: The object to compare to; must be either a ``Matrix``, a ``list`` or a ``numpy.ndarray``
|
||||
:return: True if data in the matrix are equal to the given data in other for each component, otherwise False
|
||||
"""
|
||||
if not isinstance(other, (Matrix, list, numpy.ndarray)):
|
||||
raise ValueError("Matrix type is not comparable to type of given ``other``")
|
||||
data_to_compare = other
|
||||
if isinstance(other, Matrix):
|
||||
if self.__shape__ != other.__shape__:
|
||||
return False
|
||||
@ -149,33 +150,45 @@ class Matrix:
|
||||
return False
|
||||
elif isinstance(other, numpy.ndarray):
|
||||
data_to_compare = other.tolist()
|
||||
return all(value == other_value
|
||||
for row, other_row in zip(self.__data__, data_to_compare)
|
||||
for value, other_value in zip(row, other_row))
|
||||
else:
|
||||
raise ValueError("Matrix type is not comparable to type of given ``other``")
|
||||
|
||||
for i in range(len(self.__data__)):
|
||||
for j in range(len(self.__data__[i])):
|
||||
if self.__data__[i][j] != data_to_compare[i][j]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __str__(self):
|
||||
return str(numpy.array(self.__data__))
|
||||
|
||||
def __neg_internal__(self):
|
||||
return list(map(lambda row: [-value for value in row], self.__data__))
|
||||
rows = range(self.__shape__[0])
|
||||
cols = range(self.__shape__[1])
|
||||
return [[-(self.__data__[i][j]) for j in cols] for i in rows]
|
||||
|
||||
def __neg__(self):
|
||||
return Matrix(self.__neg_internal__(), self.__shape__)
|
||||
|
||||
def __add_matrix_internal__(self, other):
|
||||
return [list(map(sum, zip(*rows))) for rows in zip(self.__data__, other.__data__)]
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
return [[(self.__data__[i][j] + other.__data__[i][j]) for j in range(cols)] for i in range(rows)]
|
||||
|
||||
def __add_scalar_internal__(self, other):
|
||||
return [[value + other for value in row] for row in self.__data__]
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
return [[(self.__data__[i][j] + other) for j in range(cols)] for i in range(rows)]
|
||||
|
||||
def __add__(self, other):
|
||||
if not isinstance(other, (Matrix, int, float)):
|
||||
raise ValueError("Only a number or another ``Matrix`` can be added to a ``Matrix``")
|
||||
if isinstance(other, Matrix):
|
||||
if self.__shape__ != other.__shape__:
|
||||
raise ValueError("The shape of the operands must be the same")
|
||||
return Matrix(self.__add_matrix_internal__(other), self.__shape__)
|
||||
return Matrix(self.__add_scalar_internal__(other), self.__shape__)
|
||||
elif isinstance(other, int) or isinstance(other, float):
|
||||
return Matrix(self.__add_scalar_internal__(other), self.__shape__)
|
||||
else:
|
||||
raise ValueError("Only a number or another ``Matrix`` can be added to a ``Matrix``")
|
||||
|
||||
def __radd__(self, other):
|
||||
return self + other
|
||||
@ -187,21 +200,28 @@ class Matrix:
|
||||
return -self + other
|
||||
|
||||
def __truediv_scalar_internal__(self, other):
|
||||
return [list(map(lambda value: value / other, row)) for row in self.__data__]
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
return [[(self.__data__[i][j] / other) for j in range(cols)] for i in range(rows)]
|
||||
|
||||
def __truediv__(self, other):
|
||||
if not isinstance(other, (int, float)):
|
||||
if isinstance(other, int) or isinstance(other, float):
|
||||
return Matrix(self.__truediv_scalar_internal__(other), self.__shape__)
|
||||
else:
|
||||
raise ValueError("A ``Matrix`` can only be divided ba a number")
|
||||
return Matrix(self.__truediv_scalar_internal__(other), self.__shape__)
|
||||
|
||||
def __mul_rowmatrix_matrix__internal__(self, other):
|
||||
rows, cols = self.__shape__[1], other.__shape__[1]
|
||||
return [sum(self.__data__[0][j] * other.__data__[j][i] for j in range(rows)) for i in range(cols)]
|
||||
cols = other.__shape__[1]
|
||||
new_data = [0] * cols
|
||||
for i in range(cols):
|
||||
new_data[i] = sum([self.__data__[0][j] * other.__data__[j][i] for j in range(self.__shape__[1])])
|
||||
return new_data
|
||||
|
||||
def __mul_matrix_internal__(self, other):
|
||||
if self.__shape__[0] == 1:
|
||||
return self.__mul_rowmatrix_matrix__internal__(other)
|
||||
rows, cols = self.__shape__[0], other.__shape__[1]
|
||||
rows = self.__shape__[0]
|
||||
cols = other.__shape__[1]
|
||||
new_data = [([0] * cols) for _ in range(rows)]
|
||||
for i in range(rows):
|
||||
for k in range(cols):
|
||||
@ -209,7 +229,9 @@ class Matrix:
|
||||
return new_data
|
||||
|
||||
def __mul_scalar_internal__(self, other):
|
||||
return [list(map(lambda value: value * other, row)) for row in self.__data__]
|
||||
rows = range(self.__shape__[0])
|
||||
cols = range(self.__shape__[1])
|
||||
return [[(self.__data__[i][j] * other) for j in cols] for i in rows]
|
||||
|
||||
def __mul__(self, other):
|
||||
if isinstance(other, Matrix):
|
||||
@ -225,17 +247,32 @@ class Matrix:
|
||||
def __rmul__(self, other):
|
||||
return self * other
|
||||
|
||||
def get_abs_sum_of_squares(self):
|
||||
return self.__abs_sum_of_squares__()
|
||||
|
||||
def __abs_sum_of_squares__(self):
|
||||
return sum(abs(element) ** 2 for row in self.__data__ for element in row)
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
abs_sum = 0
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
abs_sum += abs(self.__data__[i][j]) ** 2
|
||||
return abs_sum
|
||||
|
||||
def __col_sums__(self):
|
||||
return [sum(abs(row[j]) for row in self.__data__) for j in range(self.__shape__[1])]
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
col_sums = [0] * cols
|
||||
for j in range(cols):
|
||||
for i in range(rows):
|
||||
col_sums[j] += abs(self.__data__[i][j])
|
||||
return col_sums
|
||||
|
||||
def __row_sums__(self):
|
||||
return [sum(abs(value) for value in row) for row in self.__data__]
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
row_sums = [0] * rows
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
row_sums[i] += abs(self.__data__[i][j])
|
||||
return row_sums
|
||||
|
||||
def norm(self, f: str = "frobenius"):
|
||||
"""
|
||||
|
@ -1,5 +1,3 @@
|
||||
import math
|
||||
|
||||
import numpy
|
||||
from mpi4py import MPI
|
||||
|
||||
@ -11,75 +9,39 @@ class MatrixMPI:
|
||||
__mpi_size__ = __mpi_comm__.Get_size()
|
||||
__mpi_rank__ = __mpi_comm__.Get_rank()
|
||||
|
||||
__data__ = None
|
||||
__rank_subdata__ = None
|
||||
__data__: Matrix = None
|
||||
__chunk__: list = None
|
||||
|
||||
def __init__(self, data=None, shape=None, structure=None, model=None, offset=None, n=None):
|
||||
"""
|
||||
Creates a new matrix.
|
||||
The type of the matrix depends on the signature and arguments.
|
||||
self.__data__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n)
|
||||
|
||||
- ``MatrixMPI(list)``: will create a new matrix with the given data in the list and its shape.
|
||||
- ``MatrixMPI(numpy.ndarray)``: will create a new matrix with the given data in ndarray and its shape.
|
||||
- ``MatrixMPI(list, (int,int))``: will create a new nxm matrix with the given rows and columns and data in list.
|
||||
- ``MatrixMPI(list, str, int, int)``: will create a new square matrix of given size and structure of \"diagonal\"
|
||||
- ``MatrixMPI(list, str, int)``: will create a new square matrix of given size and structure of either \"unity\", \"diagonal\" or \"tridiagonal\"
|
||||
- ``MatrixMPI(str, int)``: will create a new square matrix of given size and TODO
|
||||
|
||||
:param data: Either a list or an numpy ndarray
|
||||
:param shape: A tuple containing the amount of rows and columns
|
||||
:param structure: Either \"unity\", \"diagonal\" or \"tridiagonal\"
|
||||
:param model: TODO
|
||||
:param offset: Offset to diagonal axis
|
||||
:param n: Amount of rows of a square matrix or offset in case of diagonal structure
|
||||
|
||||
:type data: Matrix | list | numpy.ndarray
|
||||
:type shape: (int, int)
|
||||
:type structure: str
|
||||
:type model: str
|
||||
:type offset: int
|
||||
:type n: int
|
||||
|
||||
:rtype: MatrixMPI
|
||||
"""
|
||||
if isinstance(data, Matrix):
|
||||
self.__data__ = data
|
||||
else:
|
||||
self.__data__ = Matrix(data=data, shape=shape, structure=structure, model=model, offset=offset, n=n)
|
||||
|
||||
# Calculate how much rows are delegated to the rank
|
||||
total_amount_of_rows = self.__data__.shape()[0]
|
||||
chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__mpi_size__)
|
||||
self.__chunk__ = chunks[self.__mpi_rank__].tolist()
|
||||
|
||||
# Store the delegated rows explicitly for calculations
|
||||
rows = len(self.__chunk__)
|
||||
cols = self.__data__.shape()[1]
|
||||
self.__rank_subdata__ = Matrix(self.__data__[self.__chunk__], (rows, cols))
|
||||
|
||||
@staticmethod
|
||||
def of(matrix: Matrix):
|
||||
return MatrixMPI(matrix)
|
||||
return MatrixMPI(matrix.get_data(), matrix.shape())
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__data__)
|
||||
|
||||
def shape(self):
|
||||
return self.__data__.shape()
|
||||
|
||||
def get_rank_subdata(self):
|
||||
"""
|
||||
Returns only the delegated rows of the rank as ``Matrix``
|
||||
:return: The delegated rows as ``Matrix``
|
||||
"""
|
||||
return self.__rank_subdata__
|
||||
def get_rank_submatrix(self):
|
||||
rows = len(self.__chunk__)
|
||||
cols = self.__data__.shape()[1]
|
||||
return Matrix(self.__data__[self.__chunk__], (rows, cols))
|
||||
|
||||
def get_data(self):
|
||||
def get_matrix(self):
|
||||
"""
|
||||
Returns the whole ``Matrix`` that is used internally
|
||||
Returns the ``Matrix`` that is used internally
|
||||
:return: The ``Matrix`` that is used internally
|
||||
"""
|
||||
return self.__data__
|
||||
|
||||
def get_internal_data(self):
|
||||
def get_data(self):
|
||||
"""
|
||||
Returns the raw data of the internal data structure
|
||||
:return: The raw data of the internal data structure
|
||||
@ -100,9 +62,6 @@ class MatrixMPI:
|
||||
"""
|
||||
return self.transpose()
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__data__)
|
||||
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
Return ``self==value``
|
||||
@ -111,17 +70,21 @@ class MatrixMPI:
|
||||
:return: True if data in the matrix are equal to the given data in other for each component, otherwise False
|
||||
"""
|
||||
if isinstance(other, MatrixMPI):
|
||||
return all(self.__mpi_comm__.allgather(self.__rank_subdata__ == other.__rank_subdata__))
|
||||
return self.__data__ == other.__data__
|
||||
else:
|
||||
return self.__data__ == other
|
||||
|
||||
def __neg__(self):
|
||||
return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(-self.__rank_subdata__)))
|
||||
gathered_data = self.__mpi_comm__.gather(-self.get_rank_submatrix())
|
||||
data = self.__mpi_comm__.bcast(gathered_data)
|
||||
return MatrixMPI.of(Matrix.flatten(data))
|
||||
|
||||
def __add__(self, other):
|
||||
if isinstance(other, MatrixMPI):
|
||||
other = other.__rank_subdata__
|
||||
return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ + other)))
|
||||
other = other.get_rank_submatrix()
|
||||
gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() + other)
|
||||
data = self.__mpi_comm__.bcast(gathered_data)
|
||||
return MatrixMPI.of(Matrix.flatten(data))
|
||||
|
||||
def __radd__(self, other):
|
||||
return self + other
|
||||
@ -133,12 +96,16 @@ class MatrixMPI:
|
||||
return -self + other
|
||||
|
||||
def __truediv__(self, other):
|
||||
return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / other)))
|
||||
gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() / other)
|
||||
data = self.__mpi_comm__.bcast(gathered_data)
|
||||
return MatrixMPI.of(Matrix.flatten(data))
|
||||
|
||||
def __mul__(self, other):
|
||||
if isinstance(other, MatrixMPI):
|
||||
other = other.get_data()
|
||||
return MatrixMPI.of(Matrix.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ * other)))
|
||||
other = other.get_matrix()
|
||||
gathered_data = self.__mpi_comm__.gather(self.get_rank_submatrix() * other)
|
||||
data = self.__mpi_comm__.bcast(gathered_data)
|
||||
return MatrixMPI.of(Matrix.flatten(data))
|
||||
|
||||
def __rmul__(self, other):
|
||||
return self * other
|
||||
@ -154,10 +121,6 @@ class MatrixMPI:
|
||||
|
||||
:return: the norm as a number
|
||||
"""
|
||||
if f == "frobenius":
|
||||
return math.sqrt(self.__mpi_comm__.allreduce(self.__rank_subdata__.get_abs_sum_of_squares()))
|
||||
elif f == "row sum":
|
||||
return max(self.__mpi_comm__.allgather(self.__rank_subdata__.norm(f)))
|
||||
return self.__data__.norm(f)
|
||||
|
||||
def __getitem__(self, key):
|
||||
|
@ -24,19 +24,6 @@ class Vector(Matrix):
|
||||
else:
|
||||
raise ValueError("data must be a ``list``, a ``numpy.ndarray`` or an integer for dimension")
|
||||
|
||||
@staticmethod
|
||||
def flatten(vectors: list):
|
||||
"""
|
||||
Flattens a list of matrices into one bigger matrix.
|
||||
The columns must match the first ``Matrix`` in the list and the rows can be arbitrarily.
|
||||
|
||||
:param vectors: A list of vectors.
|
||||
:type vectors: list
|
||||
:return: A ``Vector`` extended by all matrices in the list.
|
||||
"""
|
||||
flattened_data, shape = Matrix.flatten_internal(vectors)
|
||||
return Vector(flattened_data, shape)
|
||||
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
Return ``self==value``
|
||||
@ -80,7 +67,8 @@ class Vector(Matrix):
|
||||
raise ValueError("Only a number or another ``Vector`` can be added to a ``Vector``")
|
||||
|
||||
def __mul_vector_same_shape_internal__(self, other):
|
||||
rows, cols = self.__shape__
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
if rows >= cols:
|
||||
new_data = [(self.__data__[i][0] * other.__data__[i][0]) for i in range(rows)]
|
||||
else:
|
||||
@ -88,7 +76,8 @@ class Vector(Matrix):
|
||||
return new_data
|
||||
|
||||
def __mul_tensor_internal__(self, other):
|
||||
rows, cols = self.__shape__[0], other.__shape__[1]
|
||||
rows = self.__shape__[0]
|
||||
cols = other.__shape__[1]
|
||||
return [[self.__data__[i][0] * other.__data__[0][j] for j in range(cols)] for i in range(rows)], (rows, cols)
|
||||
|
||||
def __mul__(self, other):
|
||||
@ -110,24 +99,31 @@ class Vector(Matrix):
|
||||
"a compatible ``Matrix`` or a scalar")
|
||||
|
||||
def __mul_matrix_vector_internal__(self, other):
|
||||
rows, vector_rows = other.__shape__[0], self.__shape__[0]
|
||||
return [sum([other.__data__[i][j] * self.__data__[j][0] for j in range(vector_rows)]) for i in range(rows)]
|
||||
rows = other.__shape__[0]
|
||||
new_data = [0] * rows
|
||||
for i in range(rows):
|
||||
new_data[i] = sum([other.__data__[i][j] * self.__data__[j][0] for j in range(self.__shape__[0])])
|
||||
return new_data
|
||||
|
||||
def __rmul__(self, other):
|
||||
return Vector(self.__mul_matrix_vector_internal__(other)) if isinstance(other, Matrix) else self * other
|
||||
if isinstance(other, Matrix):
|
||||
return Vector(self.__mul_matrix_vector_internal__(other))
|
||||
return self * other
|
||||
|
||||
def __truediv_vector_internal__(self, other):
|
||||
rows, cols = self.__shape__
|
||||
rows = self.__shape__[0]
|
||||
cols = self.__shape__[1]
|
||||
return [[(self.__data__[i][j] / other.__data__[i][j]) for j in range(cols)] for i in range(rows)]
|
||||
|
||||
def __truediv__(self, other):
|
||||
if not isinstance(other, (Vector, int, float)):
|
||||
raise ValueError("A ``Vector`` can only be divided ba a number or another same-shaped ``Vector``")
|
||||
if isinstance(other, Vector):
|
||||
if self.__shape__ != other.__shape__:
|
||||
raise ValueError("The ``Vector``s to be divided must have the same shape")
|
||||
return Vector(self.__truediv_vector_internal__(other))
|
||||
return Vector(super().__truediv_scalar_internal__(other))
|
||||
elif isinstance(other, int) or isinstance(other, float):
|
||||
return Vector(super().__truediv_scalar_internal__(other))
|
||||
else:
|
||||
raise ValueError("A ``Vector`` can only be divided ba a number or another same-shaped ``Vector``")
|
||||
|
||||
def norm(self, **kwargs):
|
||||
"""
|
||||
|
@ -1,29 +1,45 @@
|
||||
import math
|
||||
|
||||
import numpy
|
||||
|
||||
from matrix_mpi import MatrixMPI
|
||||
from vector import Vector
|
||||
|
||||
|
||||
class VectorMPI(MatrixMPI):
|
||||
__data__: Vector = None
|
||||
|
||||
def __init__(self, data=None, shape=None):
|
||||
if isinstance(data, Vector):
|
||||
self.__data__ = data
|
||||
else:
|
||||
self.__data__ = Vector(data=data, shape=shape)
|
||||
|
||||
# Calculate how much rows are delegated to the rank
|
||||
total_amount_of_rows = self.__data__.shape()[0]
|
||||
chunks = numpy.array_split(list(range(total_amount_of_rows)), self.__mpi_size__)
|
||||
self.__chunk__ = chunks[self.__mpi_rank__].tolist()
|
||||
|
||||
# Store the delegated rows explicitly for calculations
|
||||
self.__rank_subdata__ = Vector(self.__data__[self.__chunk__])
|
||||
self.__data__ = Vector(data=data, shape=shape)
|
||||
|
||||
@staticmethod
|
||||
def of(vector: Vector):
|
||||
return VectorMPI(vector)
|
||||
return VectorMPI(vector.get_data(), vector.shape())
|
||||
|
||||
def get_vector(self):
|
||||
"""
|
||||
Returns the ``Vector`` that is used internally
|
||||
:return: The ``Vector`` that is used internally
|
||||
"""
|
||||
return self.__data__
|
||||
|
||||
def get_data(self):
|
||||
"""
|
||||
Returns the raw data of the internal data structure
|
||||
:return: The raw data of the internal data structure
|
||||
"""
|
||||
return self.__data__.get_data()
|
||||
|
||||
def shape(self):
|
||||
return self.__data__.shape()
|
||||
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
Return ``self==value``
|
||||
|
||||
:param other: The object to compare to; must be either a ``Vector``, a ``list`` or a ``numpy.ndarray``
|
||||
:return: True if data in the same-shaped vectors are equal to the given data in other for each component otherwise False
|
||||
"""
|
||||
if isinstance(other, VectorMPI):
|
||||
return self.__data__ == other.__data__
|
||||
else:
|
||||
return self.__data__ == other
|
||||
|
||||
def transpose(self):
|
||||
"""
|
||||
@ -34,36 +50,29 @@ class VectorMPI(MatrixMPI):
|
||||
def T(self):
|
||||
return self.transpose()
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__data__)
|
||||
|
||||
def __neg__(self):
|
||||
return VectorMPI.of(-self.__data__)
|
||||
|
||||
def __add__(self, other):
|
||||
if isinstance(other, VectorMPI):
|
||||
other = other.__rank_subdata__
|
||||
return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ + other)))
|
||||
other = other.__data__
|
||||
return VectorMPI.of(self.__data__ + other)
|
||||
|
||||
def __mul__(self, other):
|
||||
if isinstance(other, VectorMPI):
|
||||
other = other.__data__
|
||||
|
||||
if isinstance(other, int) or isinstance(other, float):
|
||||
result = Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ * other))
|
||||
else:
|
||||
result = self.__data__ * other
|
||||
result = self.__data__ * other
|
||||
return VectorMPI.of(result) if isinstance(result, Vector) else result
|
||||
|
||||
def __rmul__(self, other):
|
||||
if isinstance(other, MatrixMPI):
|
||||
return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(other.get_rank_subdata() * self.get_data())))
|
||||
return VectorMPI.of(other.get_matrix() * self.get_vector())
|
||||
return self * other
|
||||
|
||||
def __truediv__(self, other):
|
||||
if isinstance(other, VectorMPI):
|
||||
other = other.__rank_subdata__
|
||||
return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / other)))
|
||||
other = other.__data__
|
||||
return VectorMPI.of(self.__data__ / other)
|
||||
|
||||
def norm(self, **kwargs):
|
||||
"""
|
||||
@ -72,7 +81,7 @@ class VectorMPI(MatrixMPI):
|
||||
:param kwargs: ignored
|
||||
:return: the 2-norm of the vector
|
||||
"""
|
||||
return math.sqrt(self.__mpi_comm__.allreduce(self.__rank_subdata__.get_abs_sum_of_squares()))
|
||||
return self.__data__.norm()
|
||||
|
||||
def normalize(self):
|
||||
"""
|
||||
@ -81,4 +90,10 @@ class VectorMPI(MatrixMPI):
|
||||
|
||||
:return: the normalized vector
|
||||
"""
|
||||
return VectorMPI.of(Vector.flatten(self.__mpi_comm__.allgather(self.__rank_subdata__ / self.norm())))
|
||||
return VectorMPI.of(self.__data__ / self.norm())
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.__data__[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__data__[key] = value
|
||||
|
Loading…
x
Reference in New Issue
Block a user