From ad2c546632621ca813a39b351af6716a276c5869 Mon Sep 17 00:00:00 2001 From: lijialin03 Date: Wed, 8 Nov 2023 06:53:59 +0000 Subject: [PATCH] update1 --- .../graphGalerkin/LinearElasticity.py | 375 +++++++++ .../graphGalerkin/ModelSource.txt | 8 - jointContribution/graphGalerkin/NsChebnet.py | 291 +++++++ jointContribution/graphGalerkin/Possion.py | 227 ++++++ .../graphGalerkin/demo0/DiskPossionHard.py | 147 ---- .../graphGalerkin/demo0/circle.py | 121 --- .../graphGalerkin/demo1/HardWay_LINELA.py | 160 ---- .../graphGalerkin/demo1/main_square.py | 137 ---- .../graphGalerkin/demo2/main_ste.py | 241 ------ .../graphGalerkin/graphGalerkin.md | 19 - .../{demo1 => }/msh/cylshk0a-simp-nref0p1.mat | Bin .../graphGalerkin/source/GCNNModel.py | 396 +++++----- .../graphGalerkin/source/TensorFEMCore.py | 713 +++++++++++------- 13 files changed, 1536 insertions(+), 1299 deletions(-) create mode 100644 jointContribution/graphGalerkin/LinearElasticity.py delete mode 100644 jointContribution/graphGalerkin/ModelSource.txt create mode 100644 jointContribution/graphGalerkin/NsChebnet.py create mode 100644 jointContribution/graphGalerkin/Possion.py delete mode 100644 jointContribution/graphGalerkin/demo0/DiskPossionHard.py delete mode 100644 jointContribution/graphGalerkin/demo0/circle.py delete mode 100644 jointContribution/graphGalerkin/demo1/HardWay_LINELA.py delete mode 100644 jointContribution/graphGalerkin/demo1/main_square.py delete mode 100644 jointContribution/graphGalerkin/demo2/main_ste.py delete mode 100644 jointContribution/graphGalerkin/graphGalerkin.md rename jointContribution/graphGalerkin/{demo1 => }/msh/cylshk0a-simp-nref0p1.mat (100%) diff --git a/jointContribution/graphGalerkin/LinearElasticity.py b/jointContribution/graphGalerkin/LinearElasticity.py new file mode 100644 index 0000000000..333cb5f454 --- /dev/null +++ b/jointContribution/graphGalerkin/LinearElasticity.py @@ -0,0 +1,375 @@ +import sys + +import matplotlib.pyplot as plt +import numpy as np +import paddle +from scipy.io import loadmat + +sys.path.insert(0, "pycamotk") +from pyCaMOtk.create_dbc_strct import create_dbc_strct +from pyCaMOtk.create_fem_resjac import create_fem_resjac +from pyCaMOtk.create_femsp_cg import create_femsp_cg +from pyCaMOtk.create_mesh_hcube import mesh_hcube +from pyCaMOtk.geom_mltdim import Hypercube +from pyCaMOtk.geom_mltdim import Simplex +from pyCaMOtk.LinearElasticityHandCode import * +from pyCaMOtk.mesh import Mesh +from pyCaMOtk.mesh import get_gdof_from_bndtag +from pyCaMOtk.solve_fem import solve_fem +from pyCaMOtk.visualize_fem import visualize_fem + +sys.path.insert(0, "source") +import setup_prob_eqn_handcode +import TensorFEMCore +from GCNNModel import LinearElasticityNet2D +from GCNNModel import e2vcg2connectivity +from TensorFEMCore import Double +from TensorFEMCore import ReshapeFix +from TensorFEMCore import solve_fem_GCNN + +sys.path.insert(0, "utils") +from utils import Data + +paddle.seed(0) + + +class LinearElasticity: + def __init__(self) -> None: + # GCNN model + self.model = LinearElasticityNet2D() + + def train( + self, + Ufem, + ndof, + xcg, + connectivity, + LossF, + tol, + maxit, + dbc, + ndim, + nnode, + etype, + e2vcg, + e2bnd, + ): + ii = 0 + Graph = [] + Ue = Double(Ufem.flatten().reshape(ndof, 1)) + fcn_id = Double(np.asarray([ii])) + Ue_aug = paddle.concat((fcn_id, Ue), axis=0) + xcg_gcnn = np.zeros((2, 2 * xcg.shape[1])) + for i in range(xcg.shape[1]): + xcg_gcnn[:, 2 * i] = xcg[:, i] + xcg_gcnn[:, 2 * i + 1] = xcg[:, i] + Uin = Double(xcg_gcnn.T) + graph = Data(x=Uin, y=Ue_aug, edge_index=connectivity) + Graph.append(graph) + DataList = [[Graph[0]]] + TrainDataloader = DataList + [self.model, info] = solve_fem_GCNN( + TrainDataloader, LossF, self.model, tol, maxit + ) + np.save("modelCircleDet.npy", info) + solution = self.model(Graph[0].to("cuda")) + solution = ReshapeFix(paddle.clone(solution), [len(solution.flatten()), 1], "C") + solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) + solution = solution.detach().cpu().numpy() + xcg_defGCNN = xcg + np.reshape(solution, [ndim, nnode], order="F") + msh_defGCNN = Mesh(etype, xcg_defGCNN, e2vcg, e2bnd, ndim) + uabsGCNN = np.sqrt( + solution[[i for i in range(ndof) if i % 2 == 0]] ** 2 + + solution[[i for i in range(ndof) if i % 2 != 0]] ** 2 + ) + return msh_defGCNN, uabsGCNN + + def plot_hard_way(self, msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs): + fig = plt.figure() + ax1 = plt.subplot(1, 2, 1) + visualize_fem( + ax1, msh_defGCNN, uabsGCNN[e2vcg], {"plot_elem": False, "nref": 1}, [] + ) + ax1.set_title("GCNN solution") + ax2 = plt.subplot(1, 2, 2) + visualize_fem(ax2, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 1}, []) + ax2.set_title("FEM solution") + fig.tight_layout(pad=3.0) + plt.savefig("GCNN.pdf", bbox_inches="tight") + + def plot_square(self, msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs): + plt.figure() + ax1 = plt.subplot(1, 1, 1) + _, cbar1 = visualize_fem( + ax1, msh_defGCNN, uabsGCNN[e2vcg], {"plot_elem": False, "nref": 4}, [] + ) + ax1.axis("off") + cbar1.remove() + plt.margins(0, 0) + plt.savefig( + "gcnn_2dlinearelasticity_square.png", + bbox_inches="tight", + pad_inches=0, + dpi=800, + ) + + plt.figure() + ax2 = plt.subplot(1, 1, 1) + _, cbar2 = visualize_fem( + ax2, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 4}, [] + ) + ax2.axis("off") + cbar2.remove() + plt.margins(0, 0) + plt.savefig( + "fem_2dlinearelasticity_square.png", + bbox_inches="tight", + pad_inches=0, + dpi=800, + ) + + def hard_way(self): + # FEM + etype = "simplex" + ndim = 2 + dat = loadmat("./msh/cylshk0a-simp-nref0p1.mat") + xcg = dat["xcg"] / 10 + e2vcg = dat["e2vcg"] - 1 + e2bnd = dat["e2bnd"] - 1 + msh = Mesh(etype, xcg, e2vcg, e2bnd, ndim) + xcg = msh.xcg + e2vcg = msh.e2vcg + e2bnd = msh.e2bnd + porder = msh.porder + [ndim, nnode] = xcg.shape + nvar = ndim + ndof = nnode * nvar + + lam = lambda x, el: 1 + mu = lambda x, el: 1 + f = lambda x, el: np.zeros([ndim, 1]) + bnd2nbc = [0.0, 1.0, 2.0, 3.0, 4.0] + tb = lambda x, n, bnd, el, fc: np.asarray([[2], [0]]) * ( + bnd == 2 or bnd == 2.0 or (bnd - 2) ** 2 < 1e-8 + ) + np.asarray([[0], [0]]) + prob = setup_linelast_base_handcode(ndim, lam, mu, f, tb, bnd2nbc) + # Create finite element space + femsp = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg) + ldof2gdof = femsp.ldof2gdof_var.ldof2gdof + geo = Simplex(ndim, porder) + f2v = geo.f2n + dbc_idx = get_gdof_from_bndtag( + [i for i in range(ndim)], [0], nvar, ldof2gdof, e2bnd, f2v + ) + dbc_idx.sort() + dbc_idx = np.asarray(dbc_idx) + dbc_val = 0 * dbc_idx + dbc = create_dbc_strct(ndof, dbc_idx, dbc_val) + femsp.dbc = dbc + tol = 1.0e-8 + maxit = 100000 + [Ufem, info] = solve_fem( + "cg", + msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp.spmat, + dbc, + None, + tol, + maxit, + ) + + xcg_def = xcg + np.reshape(Ufem, [ndim, nnode], order="F") + msh_def = Mesh(etype, xcg_def, e2vcg, e2bnd, ndim) + uabs = np.sqrt( + Ufem[[i for i in range(ndof) if i % 2 == 0]] ** 2 + + Ufem[[i for i in range(ndof) if i % 2 != 0]] ** 2 + ) + fig = plt.figure() + ax1 = plt.subplot(1, 1, 1) + visualize_fem(ax1, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 1}, []) + ax1.set_title("FEM solution") + fig.tight_layout(pad=3.0) + + idx_xcg = [ + i + for i in range(xcg.shape[1]) + if 2 * i not in dbc_idx and 2 * i + 1 not in dbc_idx + ] + + obsidx = np.asarray([5, 11, 26, 32, 38]) # max is 9 + + idx_whole = [] + for i in obsidx: + idx_whole.append(2 * i) + idx_whole.append(2 * i + 1) + obsxcg = msh_def.xcg[:, obsidx] + ax1.plot(obsxcg[0, :], obsxcg[1, :], "o") + + dbc_idx_new = np.hstack((dbc_idx, idx_whole)) + dbc_val_new = Ufem[dbc_idx_new] + dbc = create_dbc_strct(msh.xcg.shape[1] * nvar, dbc_idx_new, dbc_val_new) + + Src_new = self.model.source + K_new = paddle.to_tensor([[0], [0]], dtype="float32").reshape((2,)) + parsfuncI = lambda x: paddle.concat((Src_new[0:1], Src_new[1:2], K_new), axis=0) + # GCNN + connectivity = e2vcg2connectivity(e2vcg, "ele") + prob = setup_prob_eqn_handcode.setup_linelast_base_handcode( + ndim, lam, mu, f, tb, bnd2nbc + ) + femsp_gcnn = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg, dbc) + LossF = [] + fcn = lambda u_: TensorFEMCore.create_fem_resjac( + "cg", + u_, + msh.transfdatacontiguous, + femsp_gcnn.elem, + femsp_gcnn.elem_data, + femsp_gcnn.ldof2gdof_eqn.ldof2gdof, + femsp_gcnn.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp_gcnn.spmat, + dbc, + [i for i in range(ndof) if i not in dbc_idx], + parsfuncI, + None, + ) + LossF.append(fcn) + msh_defGCNN, uabsGCNN = self.train( + Ufem, + ndof, + xcg, + connectivity, + LossF, + tol, + maxit, + dbc, + ndim, + nnode, + etype, + e2vcg, + e2bnd, + ) + self.plot_hard_way(msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs) + + def main_square(self): + # FEM + nvar = 2 + etype = "hcube" + lims = np.asarray([[0, 1], [0, 1]]) + nel = [2, 2] + porder = 2 + nf = 4 + msh = mesh_hcube(etype, lims, nel, porder).getmsh() + xcg = msh.xcg + e2vcg = msh.e2vcg + e2bnd = msh.e2bnd + porder = msh.porder + [ndim, nnode] = xcg.shape + nvar = ndim + ndof = nnode * nvar + + lam = lambda x, el: 1 + mu = lambda x, el: 1 + f = lambda x, el: np.zeros([ndim, 1]) + bnd2nbc = np.asarray([0, 1, 2, 3]) + tb = lambda x, n, bnd, el, fc: np.asarray([[0.5], [0]]) * ( + (bnd - 2) ** 2 < 1e-8 + ) + np.asarray([[0], [0]]) + prob = setup_linelast_base_handcode(ndim, lam, mu, f, tb, bnd2nbc) + # Create finite element space + femsp = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg) + ldof2gdof = femsp.ldof2gdof_var.ldof2gdof + geo = Hypercube(ndim, porder) + f2v = geo.f2n + dbc_idx = get_gdof_from_bndtag( + [i for i in range(ndim)], [0], nvar, ldof2gdof, e2bnd, f2v + ) + dbc_idx.sort() + dbc_idx = np.asarray(dbc_idx) + dbc_val = 0 * dbc_idx + dbc = create_dbc_strct(ndof, dbc_idx, dbc_val) + femsp.dbc = dbc + tol = 1.0e-8 + maxit = 4500 + + [Ufem, info] = solve_fem( + "cg", + msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp.spmat, + dbc, + None, + tol, + maxit, + ) + + xcg_def = xcg + np.reshape(Ufem, [ndim, nnode], order="F") + msh_def = Mesh(etype, xcg_def, e2vcg, e2bnd, ndim) + uabs = np.sqrt( + Ufem[[i for i in range(ndof) if i % 2 == 0]] ** 2 + + Ufem[[i for i in range(ndof) if i % 2 != 0]] ** 2 + ) + # GCNN + connectivity = e2vcg2connectivity(e2vcg, "ele") + prob = setup_prob_eqn_handcode.setup_linelast_base_handcode( + ndim, lam, mu, f, tb, bnd2nbc + ) + femsp_gcnn = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg, dbc) + LossF = [] + fcn = lambda u_: TensorFEMCore.create_fem_resjac( + "cg", + u_, + msh.transfdatacontiguous, + femsp_gcnn.elem, + femsp_gcnn.elem_data, + femsp_gcnn.ldof2gdof_eqn.ldof2gdof, + femsp_gcnn.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp_gcnn.spmat, + dbc, + ) + fcn_fem = lambda u_: create_fem_resjac( + "cg", + u_, + msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp.spmat, + dbc, + ) + LossF.append(fcn) + msh_defGCNN, uabsGCNN = self.train( + Ufem, + ndof, + xcg, + connectivity, + LossF, + tol, + maxit, + dbc, + ndim, + nnode, + etype, + e2vcg, + e2bnd, + ) + self.plot_square(msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs) + + +if __name__ == "__main__": + le_obj = LinearElasticity() + le_obj.hard_way() + le_obj.main_square() diff --git a/jointContribution/graphGalerkin/ModelSource.txt b/jointContribution/graphGalerkin/ModelSource.txt deleted file mode 100644 index 0e88532a46..0000000000 --- a/jointContribution/graphGalerkin/ModelSource.txt +++ /dev/null @@ -1,8 +0,0 @@ -0.25 -0.25 -0.25 -0.25 -0.1 -0.1 -0.1 -0.1 diff --git a/jointContribution/graphGalerkin/NsChebnet.py b/jointContribution/graphGalerkin/NsChebnet.py new file mode 100644 index 0000000000..4b2d4ecf3c --- /dev/null +++ b/jointContribution/graphGalerkin/NsChebnet.py @@ -0,0 +1,291 @@ +import pdb +import sys + +import matplotlib.pyplot as plt +import numpy as np +import paddle + +sys.path.insert(0, "pycamotk") +from pyCaMOtk.create_dbc_strct import create_dbc_strct +from pyCaMOtk.create_femsp_cg import create_femsp_cg_mixed2 +from pyCaMOtk.create_mesh_hcube import mesh_hcube +from pyCaMOtk.mesh import Mesh +from pyCaMOtk.setup_ins_base_handcode import setup_ins_base_handcode +from pyCaMOtk.solve_fem import solve_fem +from pyCaMOtk.visualize_fem import visualize_fem + +sys.path.insert(0, "source") +import setup_prob_eqn_handcode +import TensorFEMCore +from GCNNModel import Ns_Chebnet +from GCNNModel import e2vcg2connectivity +from TensorFEMCore import Double +from TensorFEMCore import ReshapeFix +from TensorFEMCore import solve_fem_GCNN + +sys.path.insert(0, "utils") +from utils import Data + + +def train(): + """ + Solve GCNN + """ + connectivity_uv = e2vcg2connectivity(e2vcg, "ele") + connectivity_p = e2vcg2connectivity(e2vcg2, "ele") + connectivity = paddle.concat( + [connectivity_uv, connectivity_uv, connectivity_p], axis=1 + ) + prob = setup_prob_eqn_handcode.setup_ins_base_handcode( + ndim, lambda x, el: rho, lambda x, el: nu, tb, bnd2nbc + ) + + femsp_gcnn = create_femsp_cg_mixed2( + prob, + msh, + neqn1, + nvar1, + porder, + porder, + e2vcg, + e2vcg, + neqn2, + nvar2, + porder - 1, + porder - 1, + e2vcg2, + e2vcg2, + ) + LossF = [] + fcn = lambda u_: TensorFEMCore.create_fem_resjac( + "cg", + u_, + msh.transfdatacontiguous, + femsp_gcnn.elem, + femsp_gcnn.elem_data, + femsp_gcnn.ldof2gdof_eqn.ldof2gdof, + femsp_gcnn.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp_gcnn.spmat, + dbc, + ) + LossF.append(fcn) + ii = 0 + Graph = [] + Ue = Double(U.flatten().reshape(-1, 1)) + fcn_id = Double(np.asarray([ii])) + Ue_aug = paddle.concat((fcn_id, Ue), axis=0) + xcg_gcnn = np.zeros((2, 2 * xcg.shape[1] + msh_.xcg.shape[1])) + for i in range(xcg.shape[1]): + xcg_gcnn[:, 2 * i] = xcg[:, i] + xcg_gcnn[:, 2 * i + 1] = xcg[:, i] + for i in range(msh_.xcg.shape[1]): + xcg_gcnn[:, 2 * xcg.shape[1] + i] = msh_.xcg[:, i] + Uin = Double(xcg_gcnn.T) + graph = Data(x=Uin, y=Ue_aug, edge_index=connectivity) + Graph.append(graph) + DataList = [[Graph[0]]] + TrainDataloader = DataList + split = [xcg.shape[1], msh_.xcg.shape[1], connectivity_uv.shape[1]] + model = Ns_Chebnet(split) + [model, info] = solve_fem_GCNN(TrainDataloader, LossF, model, tol, maxit) + paddle.save(model, "./Model.pth") + np.save("modelTrain.npy", info) + solution = model(Graph[0]) + solution = ReshapeFix(paddle.clone(solution), [len(solution.flatten()), 1], "C") + solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) + solution = solution.detach().numpy() + + uv_GCNN = np.reshape(solution[0 : ndim * nnode], [ndim, nnode], order="F") + uabs_GCNN = np.sqrt(uv_GCNN[0, :] ** 2 + uv_GCNN[1, :] ** 2) + pGCNN = solution[ndim * nnode :] + + return uabs_GCNN, pGCNN + + +def plot(uabs_GCNN, pGCNN): + fig = plt.figure() + ax1 = plt.subplot(1, 2, 1) + visualize_fem(ax1, msh, uabs[e2vcg], {"plot_elem": False, "nref": 4}, []) + ax1.set_title("FEM-Mixed2 Velocity Magnitude") + ax2 = plt.subplot(1, 2, 2) + visualize_fem(ax2, msh, uabs_GCNN[e2vcg], {"plot_elem": False, "nref": 4}, []) + ax2.set_title("GCNN Velocity Magnitude") + fig.tight_layout(pad=2) + plt.savefig("StenosisNSU.pdf", bbox_inches="tight") + + fig = plt.figure() + ax1 = plt.subplot(1, 2, 1) + visualize_fem(ax1, msh_, p[e2vcg2], {"plot_elem": False, "nref": 4}, []) + ax1.set_title("FEM-Mixed2 Pressure") + ax2 = plt.subplot(1, 2, 2) + visualize_fem(ax2, msh_, pGCNN[e2vcg2], {"plot_elem": False, "nref": 4}, []) + ax2.set_title("GCNN Pressure Magnitude") + fig.tight_layout(pad=2) + plt.savefig("StenosisNSP.pdf", bbox_inches="tight") + + +if __name__ == "__main__": + # Basic setting of the case + ReList = np.linspace(10, 100, 2) + U0 = None + for Re in ReList: + print("Re=", Re) + rho = 1 + nu = 1 / Re + L = 1 + etype = "hcube" + nelem = [10, 10] + porder = 2 + pltit = True + ndim = 2 + nvar = 2 + inletVelocity = 1 + s = 0.4 + + # Create finite element mesh + msh = mesh_hcube(etype, np.asarray([[0, L], [0, L]]), nelem, porder).getmsh() + msh_ = mesh_hcube( + etype, np.asarray([[0, L], [0, L]]), nelem, porder - 1 + ).getmsh() + e2vcg2 = msh_.e2vcg + xcg = msh.xcg + e2vcg = msh.e2vcg + nnode = xcg.shape[1] + nnode_p = msh_.xcg.shape[1] + + # Setup equation parameters and natural boundary conditions + tb = lambda x, n, bnd, el, fc: np.zeros([ndim + 1, 1]) + bnd2nbc = [1, 1, 1, 1] + prob = setup_ins_base_handcode( + ndim, lambda x, el: rho, lambda x, el: nu, tb, bnd2nbc + ) + + # start to impose BC + ndofU = ndim * nnode + ndofUP = ndofU + msh_.xcg.shape[1] + dbc_idx1 = [] + for i in range(nnode): + if i in dbc_idx1: + continue + if xcg[0, i] < 1e-12 or xcg[0, i] > ( + L - 1e-12 + ): # xcg[0,i]<1e-12 or xcg[1,i]<1e-12 or xcg[0,i]>(L-1e-12): + dbc_idx1.append(i) + dbc_idx2 = [i for i in range(nnode) if xcg[1, i] < 1e-12 and i not in dbc_idx1] + dbc_idx3 = [ + i + for i in range(nnode_p) + if msh_.xcg[1, i] > L - 1e-12 and i not in dbc_idx1 and i not in dbc_idx2 + ] + + dbc_val1 = [0 for i in dbc_idx1] + dbc_val2 = [0 for i in dbc_idx2] + dbc_val3 = [0 for i in dbc_idx3] + + dbc_idx = [2 * i for i in dbc_idx1] + dbc_val = [i for i in dbc_val1] + for i in range(len(dbc_val1)): + dbc_idx.append(2 * dbc_idx1[i] + 1) + dbc_val.append(dbc_val1[i]) + + for i in range(len(dbc_idx2)): + dbc_idx.append(2 * dbc_idx2[i]) + dbc_val.append(dbc_val2[i]) + for i in range(len(dbc_idx2)): + dbc_idx.append(2 * dbc_idx2[i] + 1) + dbc_val.append(inletVelocity) + + for i in range(len(dbc_idx3)): + dbc_idx.append(ndofU + dbc_idx3[i]) + dbc_val.append(dbc_val3[i]) + + dbc_idx, I = np.unique(np.asarray(dbc_idx), return_index=True) + dbc_idx = [i for i in dbc_idx] + dbc_val = np.asarray(dbc_val) + dbc_val = dbc_val[I] + dbc_val = [i for i in dbc_val] + + dbc_idx = np.asarray(dbc_idx) + dbc_val = np.asarray(dbc_val) + dbc = create_dbc_strct(ndofUP, dbc_idx, dbc_val) + + # ReDefine Mesh + xcg_ = msh_.xcg + shrinkScalar = lambda y: (1 - s * np.cos(np.pi * (y - L / 2))) + + for i in range(xcg.shape[1]): + xcg[0, i] = (xcg[0, i] - L / 2) * shrinkScalar(xcg[1, i]) + L / 2 + for i in range(xcg_.shape[1]): + xcg_[0, i] = (xcg_[0, i] - L / 2) * shrinkScalar(xcg_[1, i]) + L / 2 + + msh = Mesh(etype, xcg, e2vcg, msh.e2bnd, 2) + msh_ = Mesh(etype, xcg_, e2vcg2, msh_.e2bnd, 2) + e2vcg2 = msh_.e2vcg + xcg = msh.xcg + e2vcg = msh.e2vcg + nnode = xcg.shape[1] + + # Create finite element space + neqn1 = ndim + neqn2 = 1 + nvar1 = ndim + nvar2 = 1 + femsp = create_femsp_cg_mixed2( + prob, + msh, + neqn1, + nvar1, + porder, + porder, + e2vcg, + e2vcg, + neqn2, + nvar2, + porder - 1, + porder - 1, + e2vcg2, + e2vcg2, + ) + ldof2gdof = femsp.ldof2gdof_var.ldof2gdof + femsp.dbc = dbc + + tol = 1.0e-8 + maxit = 10000 + [U, info] = solve_fem( + "cg", + msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp.spmat, + dbc, + U0, + tol, + maxit, + ) + + idx_free = [i for i in range(len(U)) if i not in dbc_idx] + U0 = U[idx_free].reshape([-1, 1]) + uv = np.reshape(U[0 : ndim * nnode], [ndim, nnode], order="F") + p = U[ndim * nnode :] + uabs = np.sqrt(uv[0, :] ** 2 + uv[1, :] ** 2) + if Re == ReList[-1]: + fig = plt.figure() + ax = plt.subplot(1, 1, 1) + visualize_fem(ax, msh, uabs[e2vcg], {"plot_elem": False, "nref": 4}, []) + ax.set_title("FEM-Mixed2 Velocity Magnitude") + fig.tight_layout(pad=2) + plt.savefig("FE-Mixed2V.pdf", bbox_inches="tight") + + fig = plt.figure() + ax = plt.subplot(1, 1, 1) + visualize_fem(ax, msh_, p[e2vcg2], {"plot_elem": False, "nref": 4}, []) + ax.set_title("FEM-Mixed2 Pressure") + fig.tight_layout(pad=2) + plt.savefig("FE-Mixed2P.pdf", bbox_inches="tight") + + uabs_GCNN, pGCNN = train() + plot(uabs_GCNN, pGCNN) diff --git a/jointContribution/graphGalerkin/Possion.py b/jointContribution/graphGalerkin/Possion.py new file mode 100644 index 0000000000..54a149529a --- /dev/null +++ b/jointContribution/graphGalerkin/Possion.py @@ -0,0 +1,227 @@ +import sys + +import matplotlib.pyplot as plt +import numpy as np +import paddle + +paddle.seed(1334) + +sys.path.insert(0, "pycamotk") +from pyCaMOtk.create_dbc_strct import create_dbc_strct +from pyCaMOtk.create_femsp_cg import create_femsp_cg +from pyCaMOtk.create_mesh_hsphere import mesh_hsphere +from pyCaMOtk.visualize_fem import visualize_fem + +sys.path.insert(0, "source") +import setup_prob_eqn_handcode +from FEM_ForwardModel import analyticalPossion +from GCNNModel import PossionNet +from GCNNModel import e2vcg2connectivity +from TensorFEMCore import Double +from TensorFEMCore import create_fem_resjac +from TensorFEMCore import solve_fem_GCNN + +sys.path.insert(0, "utils") +from utils import Data + + +class Possion: + def __init__(self) -> None: + # GCNN model + self.model = PossionNet() + + def params_possion(self): + """ + e2vcg is a 2D array (NNODE PER ELEM, NELEM): The connectivity of the + mesh. The (:, e) entries are the global node numbers of the nodes + that comprise element e. The local node numbers of each element are + defined by the columns of this matrix, e.g., e2vcg(i, e) is the + global node number of the ith local node of element e. + The flux constant Flux=[du/dx, du/dy]^T=K dot [dphi/dx,dphi/dy] + where phi is the solution polynomial function + """ + + # Set up GCNN-FEM Possion problem + self.nin = 1 # Number of input variable + self.nvar = 1 # Number of primanry variable + etype = "hcube" # Mesh type + c = [0, 0] # Domain center + r = 1 # Radius + self.porder = 2 # Polynomial order for solution and geometry basis + nel = [2, 2] # Number of element in x and y axis + self.msh = mesh_hsphere( + etype, c, r, nel, self.porder + ).getmsh() # Create mesh object + self.xcg = self.msh.xcg # Extract node coordinates + self.ndof = self.xcg.shape[1] + self.e2vcg = self.msh.e2vcg # Extract element connectivity + self.connectivity = e2vcg2connectivity(self.msh.e2vcg, "ele") + + self.bnd2nbc = np.asarray([0]) # Define the boundary tag! + self.K = lambda x, el: np.asarray([[1], [0], [0], [1]]) + self.Qb = ( + lambda x, n, bnd, el, fc: 0 + ) # The primary variable value on the boundary + dbc_idx = [ + i + for i in range(self.xcg.shape[1]) + if np.sum(self.xcg[:, i] ** 2) > 1 - 1e-12 + ] # The boundary node id + self.dbc_idx = np.asarray(dbc_idx) + self.dbc_val = dbc_idx * 0 # The boundary node primary variable value + + def train(self): + paddle.device.set_device("gpu:0") + dbc = create_dbc_strct( + self.xcg.shape[1] * self.nvar, self.dbc_idx, self.dbc_val + ) # Create the class of boundary condition + + Src_new = self.model.source + K_new = paddle.to_tensor([[1], [0], [0], [1]], dtype="float32").reshape((4,)) + parsfuncI = lambda x: paddle.concat((K_new, Src_new), axis=0) + S = [2] # Parametrize the source value in the pde -F_ij,j=S_i + LossF = [] + # Define the Training Data + Graph = [] + ii = 0 + + for i in S: + f = lambda x, el: i + prob = setup_prob_eqn_handcode.setup_linelptc_sclr_base_handcode( + 2, self.K, f, self.Qb, self.bnd2nbc + ) + + femsp = create_femsp_cg( + prob, self.msh, self.porder, self.e2vcg, self.porder, self.e2vcg, dbc + ) + fcn = lambda u_: create_fem_resjac( + "cg", + u_, + self.msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + self.msh.e2e, + femsp.spmat, + dbc, + [i for i in range(self.ndof) if i not in self.dbc_idx], + parsfuncI, + None, + self.model, + ) + LossF.append(fcn) + + Ue = Double(analyticalPossion(self.xcg, i).flatten().reshape(self.ndof, 1)) + fcn_id = Double(np.asarray([ii])) + Ue_aug = paddle.concat((fcn_id, Ue), axis=0) + Uin = Double(self.xcg.T) + graph = Data(x=Uin, y=Ue_aug, edge_index=self.connectivity) + Graph.append(graph) + ii = ii + 1 + DataList = [[Graph[i]] for i in range(len(S))] + TrainDataloader = DataList + + # Training Data + [model, info] = solve_fem_GCNN( + TrainDataloader, LossF, self.model, self.tol, self.maxit + ) + print("K=", self.K) + print("Min Error=", info["Er"].min()) + print("Mean Error Last 10 iterations=", np.mean(info["Er"][-10:])) + print("Var Error Last 10 iterations=", np.var(info["Er"][-10:])) + + np.savetxt("demo0\ErFinal.txt", info["Er"]) + np.savetxt("demo0\Loss.txt", info["Loss"]) + + solution = model(Graph[0]) + solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) + solution = solution.detach().cpu().numpy() + Ue = Ue.detach().cpu().numpy() + return solution, Ue + + def plot_disk(self, solution, Ue): + ax1 = plt.subplot(1, 1, 1) + _, cbar1 = visualize_fem( + ax1, self.msh, solution[self.e2vcg], {"plot_elem": True, "nref": 6}, [] + ) + ax1.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + top=False, + labelbottom=False, + labelleft=False, + ) + ax1.axis("off") + cbar1.remove() + plt.margins(0, 0) + plt.savefig( + "gcnn_possion_circle.png", bbox_inches="tight", pad_inches=-0.11, dpi=800 + ) + plt.close() + + ax2 = plt.subplot(1, 1, 1) + _, cbar2 = visualize_fem( + ax2, self.msh, Ue[self.e2vcg], {"plot_elem": True, "nref": 6}, [] + ) + ax2.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + top=False, + labelbottom=False, + labelleft=False, + ) + ax2.axis("off") + cbar2.remove() + plt.margins(0, 0) + plt.savefig( + "exact_possion_circle.png", bbox_inches="tight", pad_inches=-0.11, dpi=800 + ) + + def plot_circle(self, solution, Ue): + fig = plt.figure() + ax1 = plt.subplot(1, 2, 1) + visualize_fem( + ax1, self.msh, solution[self.e2vcg], {"plot_elem": True, "nref": 6}, [] + ) + ax1.set_title("GCNN solution") + ax2 = plt.subplot(1, 2, 2) + visualize_fem(ax2, self.msh, Ue[self.e2vcg], {"plot_elem": True, "nref": 6}, []) + ax2.set_title("Exact solution") + fig.tight_layout(pad=3.0) + plt.savefig("demo0\Demo.pdf", bbox_inches="tight") + + def disk_possion_hard(self): + # Hyper prameters + self.tol = 1.0e-16 + self.maxit = 3000 + self.params_possion() + Ufem = analyticalPossion(self.xcg, 2).flatten().reshape(self.ndof, 1) + + obsidx = np.asarray([8]) + self.dbc_idx = np.hstack((np.asarray(self.dbc_idx), obsidx)) + self.dbc_val = Ufem[self.dbc_idx] + + solution, Ue = self.train() + self.plot_disk(solution, Ue) + + def circle(self): + # Hyper prameters + self.tol = 1.0e-16 + self.maxit = 500 + self.params_possion() + self.dbc_idx = np.asarray(self.dbc_idx) + self.dbc_val = self.dbc_idx * 0 # The boundary node primary variable value + + solution, Ue = self.train() + self.plot_circle(solution, Ue) + + +if __name__ == "__main__": + possion_obj = Possion() + possion_obj.disk_possion_hard() + possion_obj.circle() diff --git a/jointContribution/graphGalerkin/demo0/DiskPossionHard.py b/jointContribution/graphGalerkin/demo0/DiskPossionHard.py deleted file mode 100644 index 932f8a4b91..0000000000 --- a/jointContribution/graphGalerkin/demo0/DiskPossionHard.py +++ /dev/null @@ -1,147 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np -import pdb -import sys - -import paddle -paddle.seed(1334) -from random import sample - -sys.path.insert(0, 'pycamotk') -from pyCaMOtk.create_mesh_hsphere import mesh_hsphere -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.create_femsp_cg import create_femsp_cg -from pyCaMOtk.visualize_fem import visualize_fem - -sys.path.insert(0, 'source') -from FEM_ForwardModel import analyticalPossion -from GCNNModel import e2vcg2connectivity,PossionNet -from TensorFEMCore import Double,solve_fem_GCNN,create_fem_resjac -import setup_prob_eqn_handcode - -sys.path.insert(0, 'utils') -from utils import Data - -from circle import train - -def train(S): - # Define the Training Data - Graph=[] - ii=0 - for i in S: - Ue=Double(analyticalPossion(xcg,i).flatten().reshape(ndof,1)) - fcn_id=Double(np.asarray([ii])) - Ue_aug=paddle.concat((fcn_id,Ue),axis=0) - Uin=Double(xcg.T) - graph=Data(x=Uin,y=Ue_aug,edge_index=connectivity) - Graph.append(graph) - ii=ii+1 - DataList=[[Graph[i]] for i in range(len(S))] - TrainDataloader=DataList - # GCNN model - device=paddle.device.set_device('gpu:0') - model=PossionNet() - - # Training Data - [model,info]=solve_fem_GCNN(TrainDataloader,LossF,model,tol,maxit) - print('K=',K) - print('Min Error=',info['Er'].min()) - print('Mean Error Last 10 iterations=',np.mean(info['Er'][-10:])) - print('Var Error Last 10 iterations=',np.var(info['Er'][-10:])) - - np.savetxt('demo0\ErFinal.txt', info['Er']) - np.savetxt('demo0\Loss.txt', info['Loss']) - - solution=model(Graph[0]) - solution[dbc.dbc_idx]=Double(dbc.dbc_val.reshape([len(dbc.dbc_val),1])) - solution=solution.detach().cpu().numpy() - Ue=Ue.detach().cpu().numpy() - return solution, Ue - -def plot(solution, Ue): - ax1=plt.subplot(1,1,1) - _,cbar1=visualize_fem(ax1,msh,solution[e2vcg],{"plot_elem":True,"nref":6},[]) - ax1.tick_params(axis='both',which='both',bottom=False,left=False,top=False,labelbottom=False,labelleft=False) - ax1.axis('off') - cbar1.remove() - plt.margins(0,0) - plt.savefig('gcnn_possion_circle.png',bbox_inches='tight',pad_inches=-0.11,dpi=800) - plt.close() - - ax2=plt.subplot(1,1,1) - _,cbar2=visualize_fem(ax2,msh,Ue[e2vcg],{"plot_elem":True,"nref":6},[]) - ax2.tick_params(axis='both',which='both',bottom=False,left=False,top=False,labelbottom=False,labelleft=False) - ax2.axis('off') - cbar2.remove() - plt.margins(0,0) - plt.savefig('exact_possion_circle.png',bbox_inches='tight',pad_inches=-0.11,dpi=800) - -if __name__=='__main__': - """ - Hyper prameters - """ - tol=1.0e-16 - maxit=3000 - - # GCNN model - model=PossionNet() - """ - Set up GCNN-FEM Possion problem - """ - nin=1 # Number of input variable - nvar=1 # Number of primanry variable - etype='hcube' # Mesh type - c=[0,0] # Domain center - r=1 # Radius - porder=2 # Polynomial order for solution and geometry basis - nel=[2,2] # Number of element in x and y axis - msh=mesh_hsphere(etype,c,r,nel,porder).getmsh() # Create mesh object - xcg=msh.xcg # Extract node coordinates - ndof=xcg.shape[1] - e2vcg=msh.e2vcg # Extract element connectivity - connectivity=e2vcg2connectivity(msh.e2vcg,'ele') - """ - e2vcg is a 2D array (NNODE PER ELEM, NELEM): The connectivity of the - mesh. The (:, e) entries are the global node numbers of the nodes - that comprise element e. The local node numbers of each element are - defined by the columns of this matrix, e.g., e2vcg(i, e) is the - global node number of the ith local node of element e. - """ - bnd2nbc=np.asarray([0]) # Define the boundary tag! - K=lambda x,el: np.asarray([[1],[0],[0],[1]]) - """ - # The flux constant Flux=[du/dx, du/dy]^T=K dot [dphi/dx,dphi/dy] - where phi is the solution polynomial function - """ - Qb=lambda x,n,bnd,el,fc: 0 # The primary variable value on the boundary - dbc_idx=[i for i in range(xcg.shape[1]) if np.sum(xcg[:,i]**2)>1-1e-12] # The boundary node id - dbc_idx=np.asarray(dbc_idx) - dbc_val=dbc_idx*0 # The boundary node primary variable value - Ufem=analyticalPossion(xcg,2).flatten().reshape(ndof,1) - - idx_whole=[i for i in range(ndof) if i not in dbc_idx] - obsidx=np.asarray([8]) - obsxcg=msh.xcg[:,obsidx] - - dbc_idx_new=np.hstack((np.asarray(dbc_idx),obsidx)) - dbc_val_new=Ufem[dbc_idx_new] - dbc=create_dbc_strct(xcg.shape[1]*nvar,dbc_idx_new,dbc_val_new) # Create the class of boundary condition - - Src_new=model.source - K_new=paddle.to_tensor([[1],[0],[0],[1]], dtype='float32').reshape((4,)) - parsfuncI=lambda x: paddle.concat((K_new,Src_new),axis=0) - S=[2] # Parametrize the source value in the pde -F_ij,j=S_i - LossF=[] - for i in S: - f=lambda x,el: i - prob=setup_prob_eqn_handcode.setup_linelptc_sclr_base_handcode(2,K,f,Qb,bnd2nbc) - femsp=create_femsp_cg(prob,msh,porder,e2vcg,porder,e2vcg,dbc) - fcn=lambda u_:create_fem_resjac('cg',u_,msh.transfdatacontiguous, - femsp.elem,femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - msh.e2e,femsp.spmat,dbc,[i for i in range(ndof) if i not in dbc_idx],parsfuncI,None,model) - LossF.append(fcn) - - solution, Ue = train(S) - plot(solution, Ue) \ No newline at end of file diff --git a/jointContribution/graphGalerkin/demo0/circle.py b/jointContribution/graphGalerkin/demo0/circle.py deleted file mode 100644 index d272ad8c5d..0000000000 --- a/jointContribution/graphGalerkin/demo0/circle.py +++ /dev/null @@ -1,121 +0,0 @@ -import numpy as np -import pdb -import sys -import paddle -import matplotlib.pyplot as plt - -sys.path.insert(0, 'pycamotk') -from pyCaMOtk.create_mesh_hsphere import mesh_hsphere -from pyCaMOtk.setup_linelptc_sclr_base_handcode import setup_linelptc_sclr_base_handcode -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.create_femsp_cg import create_femsp_cg -from pyCaMOtk.solve_fem import solve_fem -from pyCaMOtk.visualize_fem import visualize_fem - -paddle.seed(2268) - -sys.path.insert(0, 'source') -from FEM_ForwardModel import analyticalPossion -from GCNNModel import e2vcg2connectivity, PossionNet -from TensorFEMCore import Double, create_fem_resjac, solve_fem_GCNN -from setup_prob_eqn_handcode import setup_linelptc_sclr_base_handcode - -sys.path.insert(0, 'utils') -from utils import Data - -paddle.set_default_dtype("float32") - -def train(S): - # Define the Training Data - Graph=[] - ii=0 - for i in S: - Ue=Double(analyticalPossion(xcg,i).flatten().reshape(ndof,1)) - fcn_id=Double(np.asarray([ii])) - Ue_aug=paddle.concat((fcn_id,Ue),axis=0) - Uin=Double(xcg.T) - graph=Data(x=Uin,y=Ue_aug,edge_index=connectivity) - Graph.append(graph) - ii=ii+1 - DataList=[[Graph[i]] for i in range(len(S))] - TrainDataloader=DataList - # GCNN model - device=paddle.device.set_device('gpu:0') - model=PossionNet() - - # Training Data - [model,info]=solve_fem_GCNN(TrainDataloader,LossF,model,tol,maxit) - print('K=',K) - print('Min Error=',info['Er'].min()) - print('Mean Error Last 10 iterations=',np.mean(info['Er'][-10:])) - print('Var Error Last 10 iterations=',np.var(info['Er'][-10:])) - - np.savetxt('demo0\ErFinal.txt', info['Er']) - np.savetxt('demo0\Loss.txt', info['Loss']) - - solution=model(Graph[0]) - solution[dbc.dbc_idx]=Double(dbc.dbc_val.reshape([len(dbc.dbc_val),1])) - solution=solution.detach().cpu().numpy() - Ue=Ue.detach().cpu().numpy() - return solution, Ue - -def plot(solution, Ue): - fig=plt.figure() - ax1=plt.subplot(1,2,1) - visualize_fem(ax1,msh,solution[e2vcg],{"plot_elem":True,"nref":6},[]) - ax1.set_title('GCNN solution') - ax2=plt.subplot(1,2,2) - visualize_fem(ax2,msh,Ue[e2vcg],{"plot_elem":True,"nref":6},[]) - ax2.set_title('Exact solution') - fig.tight_layout(pad=3.0) - plt.savefig('demo0\Demo.pdf',bbox_inches='tight') - -if __name__=='__main__': - """ - Hyper prameters - """ - tol=1.0e-16 - maxit=500 - """ - Set up GCNN-FEM Possion problem - """ - nin=1 # Number of input variable - nvar=1 # Number of primanry variable - etype='hcube' # Mesh type - c=[0,0] # Domain center - r=1 # Radius - porder=2 # Polynomial order for solution and geometry basis - nel=[2,2] # Number of element in x and y axis - msh=mesh_hsphere(etype,c,r,nel,porder).getmsh() # Create mesh object - xcg=msh.xcg # Extract node coordinates - ndof=xcg.shape[1] - e2vcg=msh.e2vcg # Extract element connectivity - connectivity=e2vcg2connectivity(msh.e2vcg,'ele') - - bnd2nbc=np.asarray([0]) # Define the boundary tag! - K=lambda x,el: np.asarray([[1],[0],[0],[1]]) - """ - # The flux constant Flux=[du/dx, du/dy]^T=K dot [dphi/dx,dphi/dy] - where phi is the solution polynomial function - """ - Qb=lambda x,n,bnd,el,fc: 0 # The primary variable value on the boundary - dbc_idx=[i for i in range(xcg.shape[1]) if np.sum(xcg[:,i]**2)>1-1e-12] # The boundary node id - dbc_idx=np.asarray(dbc_idx) - dbc_val=dbc_idx*0 # The boundary node primary variable value - dbc=create_dbc_strct(xcg.shape[1]*nvar,dbc_idx,dbc_val) # Create the class of boundary condition - - S=[1] # Parametrize the source value in the pde -F_ij,j=S_i - LossF=[] - for i in S: - f=lambda x,el: i - prob=setup_linelptc_sclr_base_handcode(2,K,f,Qb,bnd2nbc) - femsp=create_femsp_cg(prob,msh,porder,e2vcg,porder,e2vcg,dbc) - fcn=lambda u_:create_fem_resjac('cg',u_,msh.transfdatacontiguous, - femsp.elem,femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - msh.e2e,femsp.spmat,dbc) - LossF.append(fcn) - - solution, Ue = train(S) - plot(solution, Ue) diff --git a/jointContribution/graphGalerkin/demo1/HardWay_LINELA.py b/jointContribution/graphGalerkin/demo1/HardWay_LINELA.py deleted file mode 100644 index 0724614321..0000000000 --- a/jointContribution/graphGalerkin/demo1/HardWay_LINELA.py +++ /dev/null @@ -1,160 +0,0 @@ -import numpy as np -import pdb -import matplotlib.pyplot as plt -import sys -from scipy.io import loadmat - -import paddle -import pgl -sys.path.insert(0, 'pycamotk') -from pyCaMOtk.mesh import Mesh, get_gdof_from_bndtag -from pyCaMOtk.LinearElasticityHandCode import * -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.create_femsp_cg import create_femsp_cg -from pyCaMOtk.solve_fem import solve_fem -from pyCaMOtk.visualize_fem import visualize_fem -from pyCaMOtk.geom_mltdim import Simplex -from random import sample - -sys.path.insert(0, 'source') -import TensorFEMCore -from GCNNModel import e2vcg2connectivity,LinearElasticityNet2D -from TensorFEMCore import Double,solve_fem_GCNN, ReshapeFix -import setup_prob_eqn_handcode - -sys.path.insert(0, 'utils') -from utils import Data - -from main_square import train - -paddle.seed(0) - -def plot(msh_defGCNN,uabsGCNN): - fig=plt.figure() - ax1=plt.subplot(1,2,1) - visualize_fem(ax1,msh_defGCNN,uabsGCNN[e2vcg],{"plot_elem":False,"nref":1},[]) - ax1.set_title('GCNN solution') - ax2=plt.subplot(1,2,2) - visualize_fem(ax2,msh_def,uabs[e2vcg],{"plot_elem":False,"nref":1},[]) - ax2.set_title('FEM solution') - fig.tight_layout(pad=3.0) - plt.savefig('GCNN.pdf',bbox_inches='tight') - -def train(): - ii=0 - Graph=[] - Ue=Double(Ufem.flatten().reshape(ndof,1)) - fcn_id=Double(np.asarray([ii])) - Ue_aug=paddle.concat((fcn_id,Ue),axis=0) - xcg_gcnn=np.zeros((2,2*xcg.shape[1])) - for i in range(xcg.shape[1]): - xcg_gcnn[:,2*i]=xcg[:,i] - xcg_gcnn[:,2*i+1]=xcg[:,i] - Uin=Double(xcg_gcnn.T) - graph=Data(x=Uin,y=Ue_aug,edge_index=connectivity) - Graph.append(graph) - DataList=[[Graph[0]]] - TrainDataloader=DataList - model=LinearElasticityNet2D() - [model,info]=solve_fem_GCNN(TrainDataloader,LossF,model,tol,maxit) - np.save('modelCircleDet.npy',info) - solution=model(Graph[0].to('cuda')) - solution=ReshapeFix(paddle.clone(solution),[len(solution.flatten()),1],'C') - solution[dbc.dbc_idx]=Double(dbc.dbc_val.reshape([len(dbc.dbc_val),1])) - solution=solution.detach().cpu().numpy() - xcg_defGCNN=xcg+np.reshape(solution,[ndim,nnode],order='F') - msh_defGCNN=Mesh(etype,xcg_defGCNN,e2vcg,e2bnd,ndim) - uabsGCNN=np.sqrt(solution[[i for i in range(ndof) if i%2==0]]**2+\ - solution[[i for i in range(ndof) if i%2!=0]]**2) - return msh_defGCNN, uabsGCNN - -if __name__=='__main__': - - model=LinearElasticityNet2D() - ############################################################ - # FEM - etype='simplex'; ndim=2 - dat=loadmat('./demo1/msh/cylshk0a-simp-nref0p1.mat') - xcg=dat['xcg']/10; e2vcg=dat['e2vcg']-1; e2bnd=dat['e2bnd']-1 - msh=Mesh(etype,xcg,e2vcg,e2bnd,ndim) - xcg=msh.xcg - e2vcg=msh.e2vcg - e2bnd=msh.e2bnd - porder=msh.porder - [ndim,nnode]=xcg.shape - nvar=ndim - ndof=nnode*nvar - - maxit_gcnn=1000 - lam=lambda x,el:1 - mu=lambda x,el:1 - f=lambda x,el:np.zeros([ndim,1]) - bnd2nbc=[0.,1.,2.,3.,4.] - tb=lambda x,n,bnd,el,fc:np.asarray([[2],[0]])*(bnd==2 or bnd==2.0 or (bnd-2)**2<1e-8)+np.asarray([[0],[0]]) - prob=setup_linelast_base_handcode(ndim,lam,mu,f,tb,bnd2nbc) - # Create finite element space - femsp=create_femsp_cg(prob,msh,porder,e2vcg,porder,e2vcg) - ldof2gdof=femsp.ldof2gdof_var.ldof2gdof - geo=Simplex(ndim,porder) - f2v=geo.f2n - dbc_idx=get_gdof_from_bndtag([i for i in range(ndim)],[0],nvar,ldof2gdof,e2bnd,f2v) - dbc_idx.sort() - dbc_idx=np.asarray(dbc_idx) - dbc_val=0*dbc_idx - dbc=create_dbc_strct(ndof,dbc_idx,dbc_val) - femsp.dbc=dbc - tol = 1.0e-8; maxit=100000 - [Ufem,info]=solve_fem('cg',msh.transfdatacontiguous,femsp.elem,femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof,msh.e2e, - femsp.spmat,dbc,None,tol,maxit) - - xcg_def=xcg+np.reshape(Ufem,[ndim,nnode],order='F') - msh_def=Mesh(etype,xcg_def,e2vcg,e2bnd,ndim) - uabs=np.sqrt(Ufem[[i for i in range(ndof) if i%2==0]]**2+\ - Ufem[[i for i in range(ndof) if i%2!=0]]**2) - fig=plt.figure() - ax1=plt.subplot(1,1,1) - visualize_fem(ax1,msh_def,uabs[e2vcg],{"plot_elem":False,"nref":1},[]) - ax1.set_title('FEM solution') - fig.tight_layout(pad=3.0) - - idx_xcg=[i for i in range(xcg.shape[1]) if 2*i not in dbc_idx and 2*i+1 not in dbc_idx] - - ######## - obsidx=np.asarray([5,11,26,32,38]) # max is 9 - ######## - - idx_whole=[] - for i in obsidx: - idx_whole.append(2*i) - idx_whole.append(2*i+1) - obsxcg=msh_def.xcg[:,obsidx] - ax1.plot(obsxcg[0,:],obsxcg[1,:],'o') - - dbc_idx_new=np.hstack((dbc_idx,idx_whole)) - dbc_val_new=Ufem[dbc_idx_new] - dbc=create_dbc_strct(msh.xcg.shape[1]*nvar,dbc_idx_new,dbc_val_new) - - Src_new=(model.source) - K_new=paddle.to_tensor([[0],[0]], dtype='float32').reshape((2,)) - lamdafix=paddle.to_tensor([1]).reshape([1]) - mufix=paddle.to_tensor([1]).reshape([1]) - parsfuncI=lambda x: paddle.concat((Src_new[0:1],Src_new[1:2],K_new),axis=0) - #GCNN - ##### - connectivity=e2vcg2connectivity(e2vcg,'ele') - ##### - prob=setup_prob_eqn_handcode.setup_linelast_base_handcode\ - (ndim,lam,mu,f,tb,bnd2nbc) - femsp_gcnn=create_femsp_cg(prob,msh,porder,e2vcg,porder,e2vcg,dbc) - LossF=[] - fcn=lambda u_:TensorFEMCore.create_fem_resjac('cg', - u_,msh.transfdatacontiguous, - femsp_gcnn.elem,femsp_gcnn.elem_data, - femsp_gcnn.ldof2gdof_eqn.ldof2gdof, - femsp_gcnn.ldof2gdof_var.ldof2gdof, - msh.e2e,femsp_gcnn.spmat,dbc,[i for i in range(ndof) if i not in dbc_idx],parsfuncI,None) - LossF.append(fcn) - msh_defGCNN, uabsGCNN = train() - plot(msh_defGCNN, uabsGCNN) \ No newline at end of file diff --git a/jointContribution/graphGalerkin/demo1/main_square.py b/jointContribution/graphGalerkin/demo1/main_square.py deleted file mode 100644 index 82e03985bd..0000000000 --- a/jointContribution/graphGalerkin/demo1/main_square.py +++ /dev/null @@ -1,137 +0,0 @@ -import numpy as np -import pdb -import matplotlib.pyplot as plt -import sys -from scipy.io import loadmat - -import paddle -sys.path.insert(0, 'pycamotk') -from pyCaMOtk.create_mesh_hcube import mesh_hcube -from pyCaMOtk.mesh import Mesh, get_gdof_from_bndtag -from pyCaMOtk.LinearElasticityHandCode import * -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.create_femsp_cg import create_femsp_cg -from pyCaMOtk.solve_fem import solve_fem -from pyCaMOtk.visualize_fem import visualize_fem -from pyCaMOtk.geom_mltdim import Hypercube -from pyCaMOtk.create_fem_resjac import create_fem_resjac - -sys.path.insert(0, 'source') -import TensorFEMCore -from GCNNModel import e2vcg2connectivity,LinearElasticityNet2D -from TensorFEMCore import Double,solve_fem_GCNN, ReshapeFix -import setup_prob_eqn_handcode - -sys.path.insert(0, 'utils') -from utils import Data -def train(): - ii=0 - Graph=[] - Ue=Double(Ufem.flatten().reshape(ndof,1)) - fcn_id=Double(np.asarray([ii])) - Ue_aug=paddle.concat((fcn_id,Ue),axis=0) - xcg_gcnn=np.zeros((2,2*xcg.shape[1])) - for i in range(xcg.shape[1]): - xcg_gcnn[:,2*i]=xcg[:,i] - xcg_gcnn[:,2*i+1]=xcg[:,i] - Uin=Double(xcg_gcnn.T) - graph=Data(x=Uin,y=Ue_aug,edge_index=connectivity) - Graph.append(graph) - DataList=[[Graph[0]]] - TrainDataloader=DataList - model=LinearElasticityNet2D() - [model,info]=solve_fem_GCNN(TrainDataloader,LossF,model,tol,maxit) - np.save('modelCircleDet.npy',info) - solution=model(Graph[0].to('cuda')) - solution=ReshapeFix(paddle.clone(solution),[len(solution.flatten()),1],'C') - solution[dbc.dbc_idx]=Double(dbc.dbc_val.reshape([len(dbc.dbc_val),1])) - solution=solution.detach().cpu().numpy() - xcg_defGCNN=xcg+np.reshape(solution,[ndim,nnode],order='F') - msh_defGCNN=Mesh(etype,xcg_defGCNN,e2vcg,e2bnd,ndim) - uabsGCNN=np.sqrt(solution[[i for i in range(ndof) if i%2==0]]**2+\ - solution[[i for i in range(ndof) if i%2!=0]]**2) - return msh_defGCNN, uabsGCNN - -def plot(msh_defGCNN, uabsGCNN): - plt.figure() - ax1=plt.subplot(1,1,1) - _,cbar1=visualize_fem(ax1,msh_defGCNN,uabsGCNN[e2vcg],{"plot_elem":False,"nref":4},[]) - ax1.axis('off') - cbar1.remove() - plt.margins(0,0) - plt.savefig('gcnn_2dlinearelasticity_square.png', - bbox_inches='tight',pad_inches=0,dpi=800) - - plt.figure() - ax2=plt.subplot(1,1,1) - _,cbar2=visualize_fem(ax2,msh_def,uabs[e2vcg],{"plot_elem":False,"nref":4},[]) - ax2.axis('off') - cbar2.remove() - plt.margins(0,0) - plt.savefig('fem_2dlinearelasticity_square.png', - bbox_inches='tight',pad_inches=0,dpi=800) -if __name__=='__main__': - - ############################################################ - # FEM - nvar=2; etype='hcube'; lims=np.asarray([[0,1],[0,1]]) - nel=[2,2]; porder=2; nf=4 - msh=mesh_hcube(etype,lims,nel,porder).getmsh() - xcg=msh.xcg - e2vcg=msh.e2vcg - e2bnd=msh.e2bnd - porder=msh.porder - [ndim,nnode]=xcg.shape - nvar=ndim - ndof=nnode*nvar - - lam=lambda x,el:1 - mu=lambda x,el:1 - f=lambda x,el:np.zeros([ndim,1]) - bnd2nbc=np.asarray([0,1,2,3]) - tb=lambda x,n,bnd,el,fc:np.asarray([[0.5],[0]])*((bnd-2)**2<1e-8)+np.asarray([[0],[0]]) - prob=setup_linelast_base_handcode(ndim,lam,mu,f,tb,bnd2nbc) - # Create finite element space - femsp=create_femsp_cg(prob,msh,porder,e2vcg,porder,e2vcg) - ldof2gdof=femsp.ldof2gdof_var.ldof2gdof - geo=Hypercube(ndim,porder) - f2v=geo.f2n - dbc_idx=get_gdof_from_bndtag([i for i in range(ndim)],[0],nvar,ldof2gdof,e2bnd,f2v) - dbc_idx.sort() - dbc_idx=np.asarray(dbc_idx) - dbc_val=0*dbc_idx - dbc=create_dbc_strct(ndof,dbc_idx,dbc_val) - femsp.dbc=dbc - tol = 1.0e-8; maxit=4500 - - [Ufem,info]=solve_fem('cg',msh.transfdatacontiguous,femsp.elem,femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof,msh.e2e, - femsp.spmat,dbc,None,tol,maxit) - - xcg_def=xcg+np.reshape(Ufem,[ndim,nnode],order='F') - msh_def=Mesh(etype,xcg_def,e2vcg,e2bnd,ndim) - uabs=np.sqrt(Ufem[[i for i in range(ndof) if i%2==0]]**2+\ - Ufem[[i for i in range(ndof) if i%2!=0]]**2) - #GCNN - connectivity=e2vcg2connectivity(e2vcg,'ele') - prob=setup_prob_eqn_handcode.setup_linelast_base_handcode\ - (ndim,lam,mu,f,tb,bnd2nbc) - femsp_gcnn=create_femsp_cg(prob,msh,porder,e2vcg,porder,e2vcg,dbc) - LossF=[] - fcn=lambda u_:TensorFEMCore.create_fem_resjac('cg', - u_,msh.transfdatacontiguous, - femsp_gcnn.elem,femsp_gcnn.elem_data, - femsp_gcnn.ldof2gdof_eqn.ldof2gdof, - femsp_gcnn.ldof2gdof_var.ldof2gdof, - msh.e2e,femsp_gcnn.spmat,dbc) - fcn_fem=lambda u_:create_fem_resjac('cg', - u_,msh.transfdatacontiguous, - femsp.elem,femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - msh.e2e,femsp.spmat,dbc) - LossF.append(fcn) - msh_defGCNN, uabsGCNN = train() - plot(msh_defGCNN, uabsGCNN) - diff --git a/jointContribution/graphGalerkin/demo2/main_ste.py b/jointContribution/graphGalerkin/demo2/main_ste.py deleted file mode 100644 index 5f21d13095..0000000000 --- a/jointContribution/graphGalerkin/demo2/main_ste.py +++ /dev/null @@ -1,241 +0,0 @@ -import numpy as np -import pdb -import sys -import matplotlib.pyplot as plt - -import paddle -sys.path.insert(0, 'pycamotk') -from pyCaMOtk.create_mesh_hcube import mesh_hcube -from pyCaMOtk.setup_ins_base_handcode import \ - setup_ins_base_handcode -from pyCaMOtk.create_femsp_cg import create_femsp_cg_mixed2 -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.solve_fem import solve_fem -from pyCaMOtk.visualize_fem import visualize_fem -from pyCaMOtk.mesh import Mesh - -sys.path.insert(0, 'source') -import TensorFEMCore -from GCNNModel import e2vcg2connectivity,Ns_Chebnet -from TensorFEMCore import Double,solve_fem_GCNN, ReshapeFix -import setup_prob_eqn_handcode - -sys.path.insert(0, 'utils') -from utils import Data -def train(): - ''' - Solve GCNN - ''' - connectivity_uv=e2vcg2connectivity(e2vcg,'ele') - connectivity_p=e2vcg2connectivity(e2vcg2,'ele') - connectivity=paddle.concat([connectivity_uv,connectivity_uv, - connectivity_p],axis=1) - prob=setup_prob_eqn_handcode.setup_ins_base_handcode\ - (ndim,lambda x,el:rho,lambda x,el:nu,tb,bnd2nbc) - - femsp_gcnn=create_femsp_cg_mixed2(prob,msh, - neqn1,nvar1, - porder,porder, - e2vcg,e2vcg, - neqn2,nvar2, - porder-1,porder-1, - e2vcg2,e2vcg2) - LossF=[] - fcn=lambda u_:TensorFEMCore.create_fem_resjac('cg', - u_,msh.transfdatacontiguous, - femsp_gcnn.elem,femsp_gcnn.elem_data, - femsp_gcnn.ldof2gdof_eqn.ldof2gdof, - femsp_gcnn.ldof2gdof_var.ldof2gdof, - msh.e2e,femsp_gcnn.spmat,dbc) - LossF.append(fcn) - ii=0 - Graph=[] - Ue=Double(U.flatten().reshape(-1,1)) - fcn_id=Double(np.asarray([ii])) - Ue_aug=paddle.concat((fcn_id,Ue),axis=0) - xcg_gcnn=np.zeros((2,2*xcg.shape[1]+msh_.xcg.shape[1])) - for i in range(xcg.shape[1]): - xcg_gcnn[:,2*i]=xcg[:,i] - xcg_gcnn[:,2*i+1]=xcg[:,i] - for i in range(msh_.xcg.shape[1]): - xcg_gcnn[:,2*xcg.shape[1]+i]=msh_.xcg[:,i] - Uin=Double(xcg_gcnn.T) - graph=Data(x=Uin,y=Ue_aug,edge_index=connectivity) - Graph.append(graph) - DataList=[[Graph[0]]] - TrainDataloader=DataList - split=[xcg.shape[1],msh_.xcg.shape[1],connectivity_uv.shape[1]] - model=Ns_Chebnet(split) - [model,info]=solve_fem_GCNN(TrainDataloader,LossF,model,tol,maxit) - paddle.save(model, './Model.pth') - np.save('modelTrain.npy',info) - solution=model(Graph[0]) - solution=ReshapeFix(paddle.clone(solution),[len(solution.flatten()),1],'C') - solution[dbc.dbc_idx]=Double(dbc.dbc_val.reshape([len(dbc.dbc_val),1])) - solution=solution.detach().numpy() - - uv_GCNN=np.reshape(solution[0:ndim*nnode],[ndim,nnode],order='F') - uabs_GCNN=np.sqrt(uv_GCNN[0,:]**2+uv_GCNN[1,:]**2) - pGCNN=solution[ndim*nnode:] - - return uabs_GCNN, pGCNN - -def plot(uabs_GCNN, pGCNN): - fig=plt.figure() - ax1=plt.subplot(1,2,1) - visualize_fem(ax1,msh,uabs[e2vcg],{"plot_elem":False,"nref":4},[]) - ax1.set_title('FEM-Mixed2 Velocity Magnitude') - ax2=plt.subplot(1,2,2) - visualize_fem(ax2,msh,uabs_GCNN[e2vcg],{"plot_elem":False,"nref":4},[]) - ax2.set_title('GCNN Velocity Magnitude') - fig.tight_layout(pad=2) - plt.savefig('StenosisNSU.pdf',bbox_inches='tight') - - fig=plt.figure() - ax1=plt.subplot(1,2,1) - visualize_fem(ax1,msh_,p[e2vcg2],{"plot_elem":False,"nref":4},[]) - ax1.set_title('FEM-Mixed2 Pressure') - ax2=plt.subplot(1,2,2) - visualize_fem(ax2,msh_,pGCNN[e2vcg2],{"plot_elem":False,"nref":4},[]) - ax2.set_title('GCNN Pressure Magnitude') - fig.tight_layout(pad=2) - plt.savefig('StenosisNSP.pdf',bbox_inches='tight') -if __name__=='__main__': - # Basic setting of the case - ReList=np.linspace(10,100,2) - U0=None - for Re in ReList: - print('Re=',Re) - rho=1 - nu=1/Re - L=1 - etype='hcube' - nelem=[10,10] - porder=2 - pltit=True - ndim=2 - nvar=2 - inletVelocity=1 - s=0.4 - - # Create finite element mesh - msh=mesh_hcube(etype,np.asarray([[0,L],[0,L]]),nelem,porder).getmsh() - msh_=mesh_hcube(etype,np.asarray([[0,L],[0,L]]),nelem,porder-1).getmsh() - e2vcg2=msh_.e2vcg - xcg=msh.xcg - e2vcg=msh.e2vcg - nnode=xcg.shape[1] - nnode_p=msh_.xcg.shape[1] - - # Setup equation parameters and natural boundary conditions - tb=lambda x,n,bnd,el,fc:np.zeros([ndim+1,1]) - bnd2nbc=[1,1,1,1] - prob=setup_ins_base_handcode(ndim,lambda x,el:rho, - lambda x,el:nu, - tb,bnd2nbc) - - # start to impose BC - ndofU=ndim*nnode; - ndofUP=ndofU+msh_.xcg.shape[1] - dbc_idx1=[] - for i in range(nnode): - if i in dbc_idx1: - continue - if xcg[0,i]<1e-12 or xcg[0,i]>(L-1e-12):#xcg[0,i]<1e-12 or xcg[1,i]<1e-12 or xcg[0,i]>(L-1e-12): - dbc_idx1.append(i) - dbc_idx2=[i for i in range(nnode) if xcg[1,i]<1e-12 and i not in dbc_idx1] - dbc_idx3=[i for i in range(nnode_p) if msh_.xcg[1,i]>L-1e-12 and i not in dbc_idx1 and i not in dbc_idx2] - - dbc_val1=[0 for i in dbc_idx1] - dbc_val2=[0 for i in dbc_idx2] - dbc_val3=[0 for i in dbc_idx3] - - dbc_idx=[2*i for i in dbc_idx1] - dbc_val=[i for i in dbc_val1] - for i in range(len(dbc_val1)): - dbc_idx.append(2*dbc_idx1[i]+1) - dbc_val.append(dbc_val1[i]) - - for i in range(len(dbc_idx2)): - dbc_idx.append(2*dbc_idx2[i]) - dbc_val.append(dbc_val2[i]) - for i in range(len(dbc_idx2)): - dbc_idx.append(2*dbc_idx2[i]+1) - dbc_val.append(inletVelocity) - - for i in range(len(dbc_idx3)): - dbc_idx.append(ndofU+dbc_idx3[i]) - dbc_val.append(dbc_val3[i]) - - dbc_idx,I=np.unique(np.asarray(dbc_idx),return_index=True) - dbc_idx=[i for i in dbc_idx] - dbc_val=np.asarray(dbc_val) - dbc_val=dbc_val[I] - dbc_val=[i for i in dbc_val] - - dbc_idx=np.asarray(dbc_idx) - dbc_val=np.asarray(dbc_val) - dbc=create_dbc_strct(ndofUP,dbc_idx,dbc_val) - - # ReDefine Mesh - xcg_=msh_.xcg - shrinkScalar=lambda y :(1-s*np.cos(np.pi*(y-L/2))) - - for i in range(xcg.shape[1]): - xcg[0,i]=(xcg[0,i]-L/2)*shrinkScalar(xcg[1,i])+L/2 - for i in range(xcg_.shape[1]): - xcg_[0,i]=(xcg_[0,i]-L/2)*shrinkScalar(xcg_[1,i])+L/2 - - msh=Mesh(etype,xcg,e2vcg,msh.e2bnd,2) - msh_=Mesh(etype,xcg_,e2vcg2,msh_.e2bnd,2) - e2vcg2=msh_.e2vcg - xcg=msh.xcg - e2vcg=msh.e2vcg - nnode=xcg.shape[1] - - # Create finite element space - neqn1=ndim; neqn2=1 - nvar1=ndim; nvar2=1 - femsp=create_femsp_cg_mixed2(prob,msh, - neqn1,nvar1, - porder,porder, - e2vcg,e2vcg, - neqn2,nvar2, - porder-1,porder-1, - e2vcg2,e2vcg2) - ldof2gdof = femsp.ldof2gdof_var.ldof2gdof - femsp.dbc=dbc - - tol=1.0e-8 - maxit=10000 - [U,info]=solve_fem('cg',msh.transfdatacontiguous, - femsp.elem,femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - msh.e2e,femsp.spmat,dbc,U0, - tol,maxit) - - idx_free=[i for i in range(len(U)) if i not in dbc_idx] - U0=U[idx_free].reshape([-1,1]) - uv=np.reshape(U[0:ndim*nnode],[ndim,nnode],order='F') - p=U[ndim*nnode:] - uabs=np.sqrt(uv[0,:]**2+uv[1,:]**2) - if Re==ReList[-1]: - fig=plt.figure() - ax=plt.subplot(1,1,1) - visualize_fem(ax,msh,uabs[e2vcg],{"plot_elem":False,"nref":4},[]) - ax.set_title('FEM-Mixed2 Velocity Magnitude') - fig.tight_layout(pad=2) - plt.savefig('FE-Mixed2V.pdf',bbox_inches='tight') - - fig=plt.figure() - ax=plt.subplot(1,1,1) - visualize_fem(ax,msh_,p[e2vcg2],{"plot_elem":False,"nref":4},[]) - ax.set_title('FEM-Mixed2 Pressure') - fig.tight_layout(pad=2) - plt.savefig('FE-Mixed2P.pdf',bbox_inches='tight') - - uabs_GCNN, pGCNN = train() - plot(uabs_GCNN, pGCNN) - - diff --git a/jointContribution/graphGalerkin/graphGalerkin.md b/jointContribution/graphGalerkin/graphGalerkin.md deleted file mode 100644 index 588842627e..0000000000 --- a/jointContribution/graphGalerkin/graphGalerkin.md +++ /dev/null @@ -1,19 +0,0 @@ -# Physics-informed graph neural galerkin networks -## 1. 背景简介 -尽管物理信息神经网络(PINNs)在解决前向和反向问题方面具有巨大潜力,但存在一些技术挑战,限制了其在更复杂和现实应用中的应用。首先,大多数现有的PINNs都基于逐点的公式,使用全连接网络来学习连续函数,这导致可扩展性差和难以执行硬边界条件。其次,无限的搜索空间使非凸优化网络训练变得过于复杂。第三,虽然基于卷积神经网络(CNN)的离散学习可以显著提高训练效率,但CNN在处理不规则几何形状和无结构网格时存在困难。为了解决这些挑战,作者提出了一种基于图卷积网络(GCN)和PDE的变分结构的新型离散PINN框架,以统一方式解决前向和反向偏微分方程(PDEs)。分段多项式基的使用可以减少搜索空间的维度,有助于训练和收敛。在经典PINNs中不需要调整参数的情况下,所提出的方法可以严格执行边界条件并在前向和反向问题的训练中加入稀疏数据。GCNs的灵活性被用于处理不规则几何形状和无结构网格。作者所提出方法的有效性和优点在各种受线性和非线性PDEs控制的前向和反向力学问题上进行了验证。由于多项式减少了搜索空间,因此训练效率更高。由于边界条件和稀疏数据的强制性满足,所提出的方法不需要调整损失函数权重,具有更好的鲁棒性。 -## 2. 案例1:2D homogeneous Poisson equation in circular disks -### 2.1 问题定义 -泊松方程可以写为: -$$ -f+\nabla u=0 \ in \: \Omega -$$ -边界条件为: -$$ -u=0 \ on \ \partial \Omega -$$ -该算例的理论解为: -$$ -u(x,y)={1-x^2-y^2}/4 -$$ -### 2.2 问题求解 -#### 2.2.1 模型构建 diff --git a/jointContribution/graphGalerkin/demo1/msh/cylshk0a-simp-nref0p1.mat b/jointContribution/graphGalerkin/msh/cylshk0a-simp-nref0p1.mat similarity index 100% rename from jointContribution/graphGalerkin/demo1/msh/cylshk0a-simp-nref0p1.mat rename to jointContribution/graphGalerkin/msh/cylshk0a-simp-nref0p1.mat diff --git a/jointContribution/graphGalerkin/source/GCNNModel.py b/jointContribution/graphGalerkin/source/GCNNModel.py index 7cc05604dd..f7b8c54fd5 100644 --- a/jointContribution/graphGalerkin/source/GCNNModel.py +++ b/jointContribution/graphGalerkin/source/GCNNModel.py @@ -3,194 +3,244 @@ import paddle import paddle.nn.initializer as Initializer import sys -sys.path.insert(0, 'utils') + +sys.path.insert(0, "utils") from ChebConv import ChebConv from paddle.nn.functional import relu from paddle.nn import Layer, Linear place = paddle.CUDAPlace(0) -def e2vcg2connectivity(e2vcg,type='iso'): - """ - e2vcg should be in np.array - """ - NnG=np.max(e2vcg)+1 - NnE=e2vcg.shape[1] - if type=='ele': - connectivity=[] - for i in range(NnG): - positions=np.argwhere(e2vcg==i)[:,0] - #pdb.set_trace() - for j in positions: - for k in range(NnE): - if e2vcg[j,k]!=i: - connectivity.append(np.asarray([i,e2vcg[j,k]])) - return paddle.to_tensor(paddle.floor(paddle.to_tensor(np.asarray(connectivity).T, place=place, dtype=paddle.float32)), dtype=paddle.int64) - elif type=='iso': - connectivity=[[i for i in range(NnG)],[i for i in range(NnG)]] - return paddle.to_tensor(paddle.floor(paddle.to_tensor(np.asarray(connectivity), place=place, dtype=paddle.float32)), dtype=paddle.int64) - elif type=='eletruncate': - connectivity=[] - for i in range(NnG): - positions=np.argwhere(e2vcg==i)[:,0] - for j in positions: - for k in range(NnE): - if e2vcg[j,k]!=i: - connectivity.append(np.asarray([i,e2vcg[j,k]])) - return paddle.to_tensor(paddle.floor(paddle.to_tensor(np.asarray(connectivity).T, place=place, dtype=paddle.float32)), dtype=paddle.int64) - - ############################################## + +def e2vcg2connectivity(e2vcg, type="iso"): + """ + e2vcg should be in np.array + """ + NnG = np.max(e2vcg) + 1 + NnE = e2vcg.shape[1] + if type == "ele": + connectivity = [] + for i in range(NnG): + positions = np.argwhere(e2vcg == i)[:, 0] + # pdb.set_trace() + for j in positions: + for k in range(NnE): + if e2vcg[j, k] != i: + connectivity.append(np.asarray([i, e2vcg[j, k]])) + return paddle.to_tensor( + paddle.floor( + paddle.to_tensor( + np.asarray(connectivity).T, place=place, dtype=paddle.float32 + ) + ), + dtype=paddle.int64, + ) + elif type == "iso": + connectivity = [[i for i in range(NnG)], [i for i in range(NnG)]] + return paddle.to_tensor( + paddle.floor( + paddle.to_tensor( + np.asarray(connectivity), place=place, dtype=paddle.float32 + ) + ), + dtype=paddle.int64, + ) + elif type == "eletruncate": + connectivity = [] + for i in range(NnG): + positions = np.argwhere(e2vcg == i)[:, 0] + for j in positions: + for k in range(NnE): + if e2vcg[j, k] != i: + connectivity.append(np.asarray([i, e2vcg[j, k]])) + return paddle.to_tensor( + paddle.floor( + paddle.to_tensor( + np.asarray(connectivity).T, place=place, dtype=paddle.float32 + ) + ), + dtype=paddle.int64, + ) + + ############################################## + + ############################################## + def last_chance0(maru): - f91 = paddle.concat([dark.weight.T.unsqueeze(0) for dark in maru.lins], axis=0) - f91 = paddle.create_parameter(f91.shape, paddle.float32, attr=Initializer.Orthogonal(Initializer.calculate_gain('relu'))) - for i in range(len(maru.lins)): - w_ = paddle.create_parameter(f91[i,:,:].T.shape, paddle.float32, attr=Initializer.Assign(f91[i,:,:].T)) - maru.lins[i].weight = w_ - return maru + f91 = paddle.concat([dark.weight.T.unsqueeze(0) for dark in maru.lins], axis=0) + f91 = paddle.create_parameter( + f91.shape, + paddle.float32, + attr=Initializer.Orthogonal(Initializer.calculate_gain("relu")), + ) + for i in range(len(maru.lins)): + w_ = paddle.create_parameter( + f91[i, :, :].T.shape, + paddle.float32, + attr=Initializer.Assign(f91[i, :, :].T), + ) + maru.lins[i].weight = w_ + return maru + def last_chance1(maru): - weights=paddle.concat([dark.weight.T.unsqueeze(0) for dark in maru.lins],axis=0) - weights = paddle.create_parameter(weights.shape, weights.dtype, attr=Initializer.Orthogonal()) - for i in range(len(maru.lins)): - w_ = paddle.create_parameter(maru.lins[i].weight.T.shape, paddle.float32, attr=Initializer.Assign(weights[i,:,:].T)) - maru.lins[i].weight=w_ - return maru + weights = paddle.concat([dark.weight.T.unsqueeze(0) for dark in maru.lins], axis=0) + weights = paddle.create_parameter( + weights.shape, weights.dtype, attr=Initializer.Orthogonal() + ) + for i in range(len(maru.lins)): + w_ = paddle.create_parameter( + maru.lins[i].weight.T.shape, + paddle.float32, + attr=Initializer.Assign(weights[i, :, :].T), + ) + maru.lins[i].weight = w_ + return maru + class PossionNet(Layer): - def __init__(self,nci=2,nco=1,kk=10): - super(PossionNet, self).__init__() - feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] - self.conv_layers = [] - for i in range(len(feature) - 1): - conv = ChebConv(feature[i], feature[i+1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers.append((conv, last_chance)) - for i, (conv, last_chance) in enumerate(self.conv_layers): - setattr(self, f'conv{i+1}', conv) - setattr(self, f'last_chance{i}', last_chance) - self.source=paddle.to_tensor([0.25]) - self.source=paddle.create_parameter(self.source.shape, dtype=paddle.float32, attr=Initializer.Assign(self.source)) - - def forward(self, data): - x, edge_index = data.x, data.edge_index - for i, (conv, last_chance) in enumerate(self.conv_layers): - x = conv(x, edge_index) - if i < len(self.conv_layers) - 2: - x = relu(x) - return x + def __init__(self, nci=2, nco=1, kk=10): + super(PossionNet, self).__init__() + feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] + self.conv_layers = [] + for i in range(len(feature) - 1): + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers.append((conv, last_chance)) + for i, (conv, last_chance) in enumerate(self.conv_layers): + setattr(self, f"conv{i+1}", conv) + setattr(self, f"last_chance{i}", last_chance) + self.source = paddle.to_tensor([0.25]) + self.source = paddle.create_parameter( + self.source.shape, + dtype=paddle.float32, + attr=Initializer.Assign(self.source), + ) + + def forward(self, data): + x, edge_index = data.x, data.edge_index + for i, (conv, last_chance) in enumerate(self.conv_layers): + x = conv(x, edge_index) + if i < len(self.conv_layers) - 2: + x = relu(x) + return x + ######### class LinearElasticityNet2D(Layer): - def __init__(self): - super(LinearElasticityNet2D, self).__init__() - nci=2;nco=1 - kk=10 - feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] - self.conv_layers_1 = [] - self.conv_layers_2 = [] - for i in range(len(feature) - 1): - conv = ChebConv(feature[i], feature[i+1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_1.append((conv, last_chance)) - conv = ChebConv(feature[i], feature[i+1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_2.append((conv, last_chance)) - for i, (conv, last_chance) in enumerate(self.conv_layers_1): - setattr(self, f'conv{i+1}', conv) - setattr(self, f'last_chance{i}', last_chance) - for i, (conv, last_chance) in enumerate(self.conv_layers_2): - setattr(self, f'conv{i+9}', conv) - setattr(self, f'last_chance{i}', last_chance) - - self.source=paddle.to_tensor([0.1,0.1]) - self.source.stop_gradient= False - - def forward(self, data): - x, edge_index = data.x, data.edge_index - n1=int(max(x.shape)/2) - idx1=[2*i for i in range(n1)] - idx2=[2*i+1 for i in range(n1)] - x1=x[idx1] - x2=x[idx2] - edge_index1=edge_index - edge_index2=edge_index - for i, (conv, last_chance) in enumerate(self.conv_layers_1): - x1 = conv(x1, edge_index1) - if i < len(self.conv_layers_1) - 2: - x1 = relu(x1) - for i, (conv, last_chance) in enumerate(self.conv_layers_2): - x2 = conv(x2, edge_index2) - if i < len(self.conv_layers_2) - 2: - x2 = relu(x2) - - uv=[] - for i in range(n1): - uv.append(paddle.concat([x1[i:i+1,0:],x2[i:i+1,0:]],axis=0)) - uv_=paddle.concat(uv,axis=0) - return uv_ + def __init__(self): + super(LinearElasticityNet2D, self).__init__() + nci = 2 + nco = 1 + kk = 10 + feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] + self.conv_layers_1 = [] + self.conv_layers_2 = [] + for i in range(len(feature) - 1): + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_1.append((conv, last_chance)) + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_2.append((conv, last_chance)) + for i, (conv, last_chance) in enumerate(self.conv_layers_1): + setattr(self, f"conv{i+1}", conv) + setattr(self, f"last_chance{i}", last_chance) + for i, (conv, last_chance) in enumerate(self.conv_layers_2): + setattr(self, f"conv{i+9}", conv) + setattr(self, f"last_chance{i}", last_chance) + + self.source = paddle.to_tensor([0.1, 0.1]) + self.source.stop_gradient = False + + def forward(self, data): + x, edge_index = data.x, data.edge_index + n1 = int(max(x.shape) / 2) + idx1 = [2 * i for i in range(n1)] + idx2 = [2 * i + 1 for i in range(n1)] + x1 = x[idx1] + x2 = x[idx2] + edge_index1 = edge_index + edge_index2 = edge_index + for i, (conv, last_chance) in enumerate(self.conv_layers_1): + x1 = conv(x1, edge_index1) + if i < len(self.conv_layers_1) - 2: + x1 = relu(x1) + for i, (conv, last_chance) in enumerate(self.conv_layers_2): + x2 = conv(x2, edge_index2) + if i < len(self.conv_layers_2) - 2: + x2 = relu(x2) + + uv = [] + for i in range(n1): + uv.append(paddle.concat([x1[i : i + 1, 0:], x2[i : i + 1, 0:]], axis=0)) + uv_ = paddle.concat(uv, axis=0) + return uv_ + class Ns_Chebnet(Layer): - def __init__(self,split): - super(Ns_Chebnet, self).__init__() - nci=2;nco=1 - kk=10 - self.split=split - feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] - self.conv_layers_1 = [] - self.conv_layers_2 = [] - self.conv_layers_3 = [] - for i in range(len(feature) - 1): - conv = ChebConv(feature[i], feature[i+1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_1.append((conv, last_chance)) - conv = ChebConv(feature[i], feature[i+1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_2.append((conv, last_chance)) - conv = ChebConv(feature[i], feature[i+1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_3.append((conv, last_chance)) - for i, (conv, last_chance) in enumerate(self.conv_layers_1): - setattr(self, f'conv{i+1}', conv) - setattr(self, f'last_chance{i}', last_chance) - for i, (conv, last_chance) in enumerate(self.conv_layers_2): - setattr(self, f'conv{i+9}', conv) - setattr(self, f'last_chance{i}', last_chance) - for i, (conv, last_chance) in enumerate(self.conv_layers_3): - setattr(self, f'conv{i+17}', conv) - setattr(self, f'last_chance{i}', last_chance) - - def forward(self, data): - x, edge_index = data.x, data.edge_index - n1=self.split[0] - n2=self.split[1] - n3=self.split[2] - idx1=[2*i for i in range(n1)] - idx2=[2*i+1 for i in range(n1)] - idx3=[i+n1*2 for i in range(n2)] - x1=x[idx1] - x2=x[idx2] - x3=x[idx3] - edge_index1=edge_index[:,0:n3] - edge_index2=edge_index[:,n3:2*n3] - edge_index3=edge_index[:,2*n3:] - - for i, (conv, last_chance) in enumerate(self.conv_layers_1): - x1 = conv(x1, edge_index1) - if i < len(self.conv_layers_1) - 2: - x1 = relu(x1) - for i, (conv, last_chance) in enumerate(self.conv_layers_2): - x2 = conv(x2, edge_index2) - if i < len(self.conv_layers_2) - 2: - x2 = relu(x2) - for i, (conv, last_chance) in enumerate(self.conv_layers_3): - x2 = conv(x3, edge_index3) - if i < len(self.conv_layers_3) - 2: - x2 = relu(x2) - - uv=[] - for i in range(n1): - uv.append(paddle.concat([x1[i:i+1,0:],x2[i:i+1,0:]],axis=0)) - uv_=paddle.concat(uv,axis=0) - return paddle.concat([uv_,x3],axis=0) \ No newline at end of file + def __init__(self, split): + super(Ns_Chebnet, self).__init__() + nci = 2 + nco = 1 + kk = 10 + self.split = split + feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] + self.conv_layers_1 = [] + self.conv_layers_2 = [] + self.conv_layers_3 = [] + for i in range(len(feature) - 1): + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_1.append((conv, last_chance)) + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_2.append((conv, last_chance)) + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_3.append((conv, last_chance)) + for i, (conv, last_chance) in enumerate(self.conv_layers_1): + setattr(self, f"conv{i+1}", conv) + setattr(self, f"last_chance{i}", last_chance) + for i, (conv, last_chance) in enumerate(self.conv_layers_2): + setattr(self, f"conv{i+9}", conv) + setattr(self, f"last_chance{i}", last_chance) + for i, (conv, last_chance) in enumerate(self.conv_layers_3): + setattr(self, f"conv{i+17}", conv) + setattr(self, f"last_chance{i}", last_chance) + + def forward(self, data): + x, edge_index = data.x, data.edge_index + n1 = self.split[0] + n2 = self.split[1] + n3 = self.split[2] + idx1 = [2 * i for i in range(n1)] + idx2 = [2 * i + 1 for i in range(n1)] + idx3 = [i + n1 * 2 for i in range(n2)] + x1 = x[idx1] + x2 = x[idx2] + x3 = x[idx3] + edge_index1 = edge_index[:, 0:n3] + edge_index2 = edge_index[:, n3 : 2 * n3] + edge_index3 = edge_index[:, 2 * n3 :] + + for i, (conv, last_chance) in enumerate(self.conv_layers_1): + x1 = conv(x1, edge_index1) + if i < len(self.conv_layers_1) - 2: + x1 = relu(x1) + for i, (conv, last_chance) in enumerate(self.conv_layers_2): + x2 = conv(x2, edge_index2) + if i < len(self.conv_layers_2) - 2: + x2 = relu(x2) + for i, (conv, last_chance) in enumerate(self.conv_layers_3): + x2 = conv(x3, edge_index3) + if i < len(self.conv_layers_3) - 2: + x2 = relu(x2) + + uv = [] + for i in range(n1): + uv.append(paddle.concat([x1[i : i + 1, 0:], x2[i : i + 1, 0:]], axis=0)) + uv_ = paddle.concat(uv, axis=0) + return paddle.concat([uv_, x3], axis=0) diff --git a/jointContribution/graphGalerkin/source/TensorFEMCore.py b/jointContribution/graphGalerkin/source/TensorFEMCore.py index 8d87e03d8b..9815510621 100644 --- a/jointContribution/graphGalerkin/source/TensorFEMCore.py +++ b/jointContribution/graphGalerkin/source/TensorFEMCore.py @@ -1,309 +1,436 @@ -import numpy as np -import matplotlib.pyplot as plt +import os import pdb -from scipy import sparse +import time + +import matplotlib.pyplot as plt +import numpy as np import paddle -from paddle.optimizer import Adam from paddle.nn import MSELoss -import time -import os +from paddle.optimizer import Adam +from scipy import sparse place = paddle.CUDAPlace(0) -def eval_unassembled_resjac_claw_cg(U,transf_data,elem, - elem_data,ldof2gdof_var,parsfuncI,parsfuncB,model=None): - """Evaluate elementwise residual and jacobian of conservation law in CG""" - nelem=elem_data.nelem - neqn_per_elem=elem.Tv_eqn_ref.shape[0] - nvar_per_elem=elem.Tv_var_ref.shape[0] - Re=np.zeros([neqn_per_elem,nelem]) - dRe=np.zeros([neqn_per_elem,nvar_per_elem,nelem]) - Re=Double(Re) - dRe=Double(dRe) - Re.stop_gradient=False - dRe.stop_gradient=False - Re_=[] - dRe_=[] - for e in range(nelem): - Ue=U[ldof2gdof_var[:,e]] - Re0_,dRe0_=intg_elem_claw_vol(Ue,transf_data,elem,elem_data,e,parsfuncI,model) - Re1_,dRe1_=intg_elem_claw_extface(Ue,transf_data,elem,elem_data,e,parsfuncB) - Re_.append(ReshapeFix((Re0_+Re1_),[neqn_per_elem,1],order='F')) - dRe_.append(ReshapeFix((dRe0_+dRe1_),[neqn_per_elem,nvar_per_elem,1],order='C')) - Re=paddle.concat(Re_,axis=1) - dRe=paddle.concat(dRe_,axis=2) - return Re,dRe -def create_fem_resjac(fespc,Uf,transf_data,elem,elem_data,ldof2gdof_eqn,ldof2gdof_var,e2e,spmat,dbc,enforce_idx=None,parsfuncI=None,parsfuncB=None,model=None): - """Create global residual(loss) and jacobian of conservation law in CG""" - ndof_var=np.max(ldof2gdof_var[:])+1 - dbc_idx=paddle.to_tensor(dbc.dbc_idx) - dbc_val=dbc.dbc_val - free_idx=dbc.free_idx - Uf=ReshapeFix(Uf,[ndof_var,1],'C') - U_temp=paddle.to_tensor(paddle.zeros([ndof_var,1]), dtype='float32', place=place, stop_gradient=False) - src = paddle.to_tensor(dbc_val, dtype='float32', place=place, stop_gradient=False).reshape([len(dbc_val),1])-paddle.index_select(Uf,dbc_idx) - U_temp = paddle.scatter_nd_add(U_temp, dbc_idx.reshape([-1, 1]), src.reshape([-1, 1])) - U_temp[dbc_idx]=paddle.to_tensor(dbc_val, dtype='float32', place=place, stop_gradient=False).reshape([len(dbc_val),1])-Uf[dbc_idx] - U=U_temp+Uf - # U is the GCNN output hardimpose BC but can backPP - if fespc=='cg' or fespc=='CG': - Re,dRe=eval_unassembled_resjac_claw_cg(U,transf_data,elem,elem_data, - ldof2gdof_var,parsfuncI,parsfuncB,model) - dR=assemble_nobc_mat(dRe,spmat.cooidx,spmat.lmat2gmat) - else: - raise ValueError('FE space only support cg!') - R=assemble_nobc_vec(Re,ldof2gdof_eqn) - if enforce_idx==None: - Rf=R[free_idx] - else: - Rf=R[enforce_idx] - dRf=dR.tocsr()[free_idx,:] - dRf=dRf.tocsr()[:,free_idx].T - print('Max Rf ===============================',paddle.max(paddle.abs(Rf))) - return Rf,dRf,dbc +def eval_unassembled_resjac_claw_cg( + U, transf_data, elem, elem_data, ldof2gdof_var, parsfuncI, parsfuncB, model=None +): + """Evaluate elementwise residual and jacobian of conservation law in CG""" + nelem = elem_data.nelem + neqn_per_elem = elem.Tv_eqn_ref.shape[0] + nvar_per_elem = elem.Tv_var_ref.shape[0] + Re = np.zeros([neqn_per_elem, nelem]) + dRe = np.zeros([neqn_per_elem, nvar_per_elem, nelem]) + Re = Double(Re) + dRe = Double(dRe) + Re.stop_gradient = False + dRe.stop_gradient = False + Re_ = [] + dRe_ = [] + for e in range(nelem): + Ue = U[ldof2gdof_var[:, e]] + Re0_, dRe0_ = intg_elem_claw_vol( + Ue, transf_data, elem, elem_data, e, parsfuncI, model + ) + Re1_, dRe1_ = intg_elem_claw_extface( + Ue, transf_data, elem, elem_data, e, parsfuncB + ) + Re_.append(ReshapeFix((Re0_ + Re1_), [neqn_per_elem, 1], order="F")) + dRe_.append( + ReshapeFix((dRe0_ + dRe1_), [neqn_per_elem, nvar_per_elem, 1], order="C") + ) + Re = paddle.concat(Re_, axis=1) + dRe = paddle.concat(dRe_, axis=2) + return Re, dRe + + +def create_fem_resjac( + fespc, + Uf, + transf_data, + elem, + elem_data, + ldof2gdof_eqn, + ldof2gdof_var, + e2e, + spmat, + dbc, + enforce_idx=None, + parsfuncI=None, + parsfuncB=None, + model=None, +): + """Create global residual(loss) and jacobian of conservation law in CG""" + ndof_var = np.max(ldof2gdof_var[:]) + 1 + dbc_idx = paddle.to_tensor(dbc.dbc_idx) + dbc_val = dbc.dbc_val + free_idx = dbc.free_idx + Uf = ReshapeFix(Uf, [ndof_var, 1], "C") + U_temp = paddle.to_tensor( + paddle.zeros([ndof_var, 1]), dtype="float32", place=place, stop_gradient=False + ) + src = paddle.to_tensor( + dbc_val, dtype="float32", place=place, stop_gradient=False + ).reshape([len(dbc_val), 1]) - paddle.index_select(Uf, dbc_idx) + U_temp = paddle.scatter_nd_add( + U_temp, dbc_idx.reshape([-1, 1]), src.reshape([-1, 1]) + ) + U_temp[dbc_idx] = ( + paddle.to_tensor( + dbc_val, dtype="float32", place=place, stop_gradient=False + ).reshape([len(dbc_val), 1]) + - Uf[dbc_idx] + ) + U = U_temp + Uf + # U is the GCNN output hardimpose BC but can backPP + if fespc == "cg" or fespc == "CG": + Re, dRe = eval_unassembled_resjac_claw_cg( + U, transf_data, elem, elem_data, ldof2gdof_var, parsfuncI, parsfuncB, model + ) + dR = assemble_nobc_mat(dRe, spmat.cooidx, spmat.lmat2gmat) + else: + raise ValueError("FE space only support cg!") + R = assemble_nobc_vec(Re, ldof2gdof_eqn) + if enforce_idx == None: + Rf = R[free_idx] + else: + Rf = R[enforce_idx] + dRf = dR.tocsr()[free_idx, :] + dRf = dRf.tocsr()[:, free_idx].T + print("Max Rf ===============================", paddle.max(paddle.abs(Rf))) + return Rf, dRf, dbc + + +def intg_elem_claw_vol(Ue, transf_data, elem, elem_data, e, parsfuncI=None, model=None): + """Intergrate elementwise internal volume of element residual and jacobian of conservation law""" + [neqn_per_elem, neqn, ndimP1, nq] = elem.Tv_eqn_ref.shape + [nvar_per_elem, nvar, _, _] = elem.Tv_var_ref.shape + ndim = ndimP1 - 1 + wq = elem.wq + detG = transf_data.detG[:, e] + Tvar = elem_data.Tv_var_phys[:, :, :, :, e].reshape( + [nvar_per_elem, nvar * (ndim + 1) * nq], order="F" + ) + Re = np.zeros([neqn_per_elem, 1]) + dRe = np.zeros([neqn_per_elem, nvar_per_elem]) + Tvar_tensor = paddle.to_tensor(Tvar, place=place, dtype=paddle.float32) + UQq = ReshapeFix(paddle.matmul(Tvar_tensor.T, Ue), [nvar, ndim + 1, nq], "F") + w = wq * detG + Re = Double(Re) + dRe = Double(dRe) + Re.stop_gradient = False + dRe.stop_gradient = False + for k in range(nq): + Teqn = elem_data.Tv_eqn_phys[:, :, :, k, e].reshape( + [neqn_per_elem, neqn * (ndim + 1)], order="F" + ) + Tvar = elem_data.Tv_var_phys[:, :, :, k, e].reshape( + [nvar_per_elem, nvar * (ndim + 1)], order="F" + ) + x = transf_data.xq[:, k, e] + if parsfuncI == None: + pars = elem_data.vol_pars[:, k, e] + else: + pars = parsfuncI(x) + SF, dSFdU = elem.eqn.srcflux(UQq[:, :, k], pars, x) + dSFdU = ReshapeFix(dSFdU, [neqn * (ndim + 1), nvar * (ndim + 1)], order="F") + Teqn = Double(Teqn) + Tvar = Double(Tvar) + SF = ReshapeFix(SF, [len(SF.flatten()), 1]) + Re = Re - w[k] * ReshapeFix(paddle.matmul(Teqn, SF), Re.shape, order="F") + dRe = dRe - w[k] * paddle.matmul(Teqn, paddle.matmul(dSFdU, Tvar.T)) + return Re, dRe + + +def intg_elem_claw_extface(Ue, transf_data, elem, elem_data, e, parsfuncB=None): + """Intergrate elementwise the boundary face of element residual and jacobian of conservation law""" + [neqn_per_elem, neqn, ndimP1, nqf, nf] = elem.Tvf_eqn_ref.shape + [nvar_per_elem, nvar, _, _, _] = elem.Tvf_var_ref.shape + ndim = ndimP1 - 1 + wqf = elem.wqf + sigf = transf_data.sigf[:, :, e] + nbcnbr = elem_data.nbcnbr[:, e] + Re = np.zeros([neqn_per_elem, 1]) + dRe = np.zeros([neqn_per_elem, nvar_per_elem]) + wf = wqf[:].reshape([len(wqf), 1]) * sigf + Re = Double(Re) + dRe = Double(dRe) + Re.stop_gradient = False + dRe.stop_gradient = False + for f in range(nf): + if np.isnan(nbcnbr[f]): + continue + Tvar = np.reshape( + elem_data.Tvf_var_phys[:, :, :, :, f, e], + [nvar_per_elem, nvar * (ndim + 1) * nqf], + order="F", + ) + Tvar = Double(Tvar) + UQqf = ReshapeFix(paddle.matmul(Tvar.T, Ue), [nvar, ndim + 1, nqf], order="F") + for k in range(nqf): + x = transf_data.xqf[:, k, f, e] + n = transf_data.n[:, k, f, e] + Teqn = elem_data.Tvf_eqn_phys[:, :, 0, k, f, e] + Tvar = np.reshape( + elem_data.Tvf_var_phys[:, :, :, k, f, e], + [nvar_per_elem, nvar * (ndim + 1)], + order="F", + ) + Teqn = Double(Teqn) + Tvar = Double(Tvar) + if parsfuncB == None: + pars = elem_data.bnd_pars[:, k, f, e] + else: + pars = parsfuncB(x) + _, _, Fb, dFbdU = elem.eqn.bndstvcflux(nbcnbr[f], UQqf[:, :, k], pars, x, n) + dFbdU = ReshapeFix(dFbdU, [neqn, nvar * (ndim + 1)], order="F") + Re = Re + wf[k, f] * paddle.matmul(Teqn, Fb) + dRe = dRe + wf[k, f] * paddle.matmul(Teqn, paddle.matmul(dFbdU, Tvar.T)) + return Re, dRe + + +def assemble_nobc_mat(Me, cooidx, lmat2gmat): + """Assembly global jacobian of conservation law (currently no use)""" + Me = Me.detach().cpu().numpy() + nnz = cooidx.shape[0] + cooidx = cooidx.astype("int") + Mval = np.zeros(shape=[nnz, 1]) + Mval = Double(Mval) + Mval.stop_gradient = False + idx = paddle.to_tensor(lmat2gmat.reshape([-1, 1])) + src = paddle.to_tensor(Me.reshape([-1, 1])) + Mval = paddle.scatter_nd_add(Mval, idx, src).squeeze(-1) + M = sparse.coo_matrix((Mval, (cooidx[:, 0], cooidx[:, 1]))) + return M + + +def assemble_nobc_vec(Fe, ldof2gdof_eqn): + """Assembly global residual of conservation law (!!very useful!!)""" + ndof = np.max(ldof2gdof_eqn[:]) + 1 + nelem = Fe.shape[1] + F = np.zeros(shape=[ndof, 1]) + F = Double(F) + F.stop_gradient = False + idx = paddle.to_tensor(ldof2gdof_eqn.reshape([-1, 1])) + src = Fe.reshape([-1, 1]) + F = paddle.scatter_nd_add(F, idx, src) + return F + + +def solve_fem_GCNN( + DataLoader, + LossF, + model, + tol=1e-3, + maxit=2000, + qoiidx=None, + softidx=None, + penaltyConstant=None, +): + """Wrapper""" + startime = time.time() + model, info = solve_SGD( + DataLoader, LossF, model, tol, maxit, qoiidx, softidx, penaltyConstant + ) + print("wallclock time of all epochs = ", time.time() - startime) + return model, info + + +def solve_SGD( + DataLoader, + LossF, + model, + tol, + maxit, + qoiidx, + softidx, + penaltyConstant, + plotFlag="True", +): + """ + DataLoader: training data + fcn: loss function + model: GCNN model to be trained + tol: the trauncation of loss function + maxit: the maximum number of epoch + """ + optimizer = Adam(parameters=model.parameters(), learning_rate=0.001) + criterion = MSELoss() + Er = [] + Loss = [] + tol_e = [ + 1, + 0.1, + 0.09, + 0.08, + 0.07, + 0.06, + 0.05, + 0.04, + 0.03, + 0.02, + 0.01, + 0.005, + 0.001, + 0.0009, + 0.0008, + 0.0007, + 0.0006, + 0.0005, + 0.0004, + 0.0003, + 0.0002, + 0.0001, + 0.00001, + ] + idx_tol_e = 0 + for epoch in range(maxit): + print("epoch = ", epoch) + startime = time.time() + er, loss, model = trainmodel( + DataLoader, + LossF, + model, + optimizer, + criterion, + qoiidx, + softidx, + penaltyConstant, + ) + print("Solution er = ", er) + print("wallclock time of this epoch= ", time.time() - startime) + Er.append(er) + Loss.append(loss) + if loss < tol or er < tol_e[idx_tol_e]: + idx_tol_e = idx_tol_e + 1 + print("The training reaches the expected loss!") + pass + np.savetxt("./Er_" + str(er) + "Epoch_" + str(epoch) + ".txt", np.asarray(Er)) + np.savetxt("./Loss_" + str(loss) + "Epoch_" + str(epoch) + ".txt", np.asarray(Loss)) + if plotFlag: + fig = plt.figure() + ax = plt.subplot(1, 1, 1) + ax.plot(Er, label="Relative Error") + ax.plot(Loss, label="|Residual|") + ax.legend() + ax.set_xlabel("Epoch") + ax.set_yscale("log") + fig.savefig("./LossResidual.png", bbox_inches="tight") + plt.show() + + return model, {"Er": np.asarray(Er), "Loss": np.asarray(Loss)} -def intg_elem_claw_vol(Ue,transf_data,elem,elem_data,e,parsfuncI=None, model=None): - """Intergrate elementwise internal volume of element residual and jacobian of conservation law""" - [neqn_per_elem,neqn,ndimP1,nq]=elem.Tv_eqn_ref.shape - [nvar_per_elem,nvar,_,_]=elem.Tv_var_ref.shape - ndim=ndimP1-1 - wq=elem.wq - detG=transf_data.detG[:,e] - Tvar=elem_data.Tv_var_phys[:,:,:,:,e].reshape([nvar_per_elem,nvar*(ndim+1)*nq], - order='F') - Re=np.zeros([neqn_per_elem,1]) - dRe=np.zeros([neqn_per_elem,nvar_per_elem]) - Tvar_tensor=paddle.to_tensor(Tvar, place=place, dtype=paddle.float32) - UQq=ReshapeFix(paddle.matmul(Tvar_tensor.T,Ue),[nvar,ndim+1,nq],'F') - w=wq*detG - Re=Double(Re) - dRe=Double(dRe) - Re.stop_gradient=False - dRe.stop_gradient=False - for k in range(nq): - Teqn=elem_data.Tv_eqn_phys[:,:,:,k,e].reshape([neqn_per_elem,neqn*(ndim+1)], - order='F') - Tvar=elem_data.Tv_var_phys[:,:,:,k,e].reshape([nvar_per_elem,nvar*(ndim+1)], - order='F') - x=transf_data.xq[:,k,e] - if parsfuncI==None: - pars=elem_data.vol_pars[:,k,e] - else: - pars=parsfuncI(x) - SF,dSFdU=elem.eqn.srcflux(UQq[:,:,k],pars,x) - dSFdU=ReshapeFix(dSFdU,[neqn*(ndim+1),nvar*(ndim+1)],order='F') - Teqn=Double(Teqn) - Tvar=Double(Tvar) - SF=ReshapeFix(SF,[len(SF.flatten()),1]) - Re=Re-w[k]*ReshapeFix(paddle.matmul(Teqn,SF),Re.shape,order='F') - dRe=dRe-w[k]*paddle.matmul(Teqn,paddle.matmul(dSFdU,Tvar.T)) - return Re, dRe - -def intg_elem_claw_extface(Ue,transf_data,elem,elem_data,e,parsfuncB=None): - """Intergrate elementwise the boundary face of element residual and jacobian of conservation law """ - [neqn_per_elem,neqn,ndimP1,nqf,nf]=elem.Tvf_eqn_ref.shape - [nvar_per_elem,nvar,_,_,_]=elem.Tvf_var_ref.shape - ndim=ndimP1-1 - wqf=elem.wqf - sigf=transf_data.sigf[:,:,e] - nbcnbr=elem_data.nbcnbr[:,e] - Re=np.zeros([neqn_per_elem,1]) - dRe=np.zeros([neqn_per_elem,nvar_per_elem]) - wf=wqf[:].reshape([len(wqf),1])*sigf - Re=Double(Re) - dRe=Double(dRe) - Re.stop_gradient=False - dRe.stop_gradient=False - for f in range(nf): - if np.isnan(nbcnbr[f]): - continue - Tvar=np.reshape(elem_data.Tvf_var_phys[:,:,:,:,f,e], - [nvar_per_elem,nvar*(ndim+1)*nqf],order='F') - Tvar=Double(Tvar) - UQqf=ReshapeFix(paddle.matmul(Tvar.T,Ue),[nvar,ndim+1,nqf],order='F') - for k in range(nqf): - x=transf_data.xqf[:,k,f,e] - n=transf_data.n[:,k,f,e] - Teqn=elem_data.Tvf_eqn_phys[:,:,0,k,f,e] - Tvar=np.reshape(elem_data.Tvf_var_phys[:,:,:,k,f,e], - [nvar_per_elem,nvar*(ndim+1)],order='F') - Teqn=Double(Teqn) - Tvar=Double(Tvar) - if parsfuncB==None: - pars=elem_data.bnd_pars[:,k,f,e] - else: - pars=parsfuncB(x) - _,_,Fb,dFbdU=elem.eqn.bndstvcflux(nbcnbr[f],UQqf[:,:,k],pars,x,n) - dFbdU=ReshapeFix(dFbdU,[neqn,nvar*(ndim+1)],order='F') - Re=Re+wf[k,f]*paddle.matmul(Teqn,Fb) - dRe=dRe+wf[k,f]*paddle.matmul(Teqn,paddle.matmul(dFbdU,Tvar.T)) - return Re,dRe - -def assemble_nobc_mat(Me,cooidx,lmat2gmat): - """Assembly global jacobian of conservation law (currently no use)""" - Me=Me.detach().cpu().numpy() - nnz=cooidx.shape[0] - cooidx=cooidx.astype('int') - Mval=np.zeros(shape=[nnz, 1]) - Mval = Double(Mval) - Mval.stop_gradient = False - idx = paddle.to_tensor(lmat2gmat.reshape([-1, 1])) - src = paddle.to_tensor(Me.reshape([-1, 1])) - Mval = paddle.scatter_nd_add(Mval, idx, src).squeeze(-1) - M=sparse.coo_matrix((Mval,(cooidx[:,0],cooidx[:,1]))) - return M -def assemble_nobc_vec(Fe,ldof2gdof_eqn): - """Assembly global residual of conservation law (!!very useful!!)""" - ndof=np.max(ldof2gdof_eqn[:])+1 - nelem=Fe.shape[1] - F=np.zeros(shape=[ndof, 1]) - F=Double(F) - F.stop_gradient=False - idx=paddle.to_tensor(ldof2gdof_eqn.reshape([-1, 1])) - src = Fe.reshape([-1, 1]) - F = paddle.scatter_nd_add(F, idx, src) - return F +def trainmodel( + DataLoader, LossF, model, optimizer, criterion, qoiidx, softidx, penaltyConstant +): + model.train() + er_0 = 0 + loss_0 = 0 + erlist = [] + ReList = [] + optimizer.clear_grad() + for data in DataLoader: + input = data[0] + fcn_id = data[0].y[0, 0] + truth = data[0].y[1:, 0:] + fcn = LossF[int(fcn_id)] + assert ( + int(fcn_id) - fcn_id + ) ** 2 < 1e-12, "The loss function is selected right!" + tic = time.time() + output = model(input) + Re, dRe, dbc = fcn(output) + print("wallclock time of evl Res= ", time.time() - tic) + ReList.append(paddle.abs(Re)) + solution = ReshapeFix(paddle.clone(output), [len(output.flatten()), 1], "C") + solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) + er_0 = ( + er_0 + + paddle.sqrt( + criterion(solution, truth) / criterion(truth, truth * 0) + ).item() + ) + erlist.append( + paddle.sqrt(criterion(solution, truth) / criterion(truth, truth * 0)).item() + ) + loss = ReList[0] * 0 + for i in range(len(ReList)): + loss = loss + ReList[i] + print("max Res=", loss.abs().max().item()) + loss = paddle.norm(loss) + if softidx is not None and penaltyConstant is not None: + print( + "DataLoss = ", + criterion(solution[softidx], truth[softidx]) * penaltyConstant, + ) + loss = criterion(solution[softidx], truth[softidx]) * penaltyConstant + loss + else: + pass + if qoiidx is not None: + QOI_ER = paddle.sqrt( + criterion(solution[qoiidx], truth[qoiidx]) + / criterion(truth[qoiidx], truth[qoiidx] * 0) + ).item() + print("QOI Error=", QOI_ER) + os.system("touch QOIError.txt") + os.system("touch QOIValue.txt") + file1 = open("QOIError.txt", "a") + file1.writelines(str(QOI_ER) + "\n") + file2 = open("QOIValue.txt", "a") + file2.writelines( + str(solution[qoiidx].detach().cpu().numpy().reshape([1, -1])[:]) + "\n" + ) + file1.close() + file2.close() + else: + pass + tic = time.time() + loss.backward() + print("wallclock time of this BP= ", time.time() - tic) + optimizer.step() + print(">>>>>>>max error<<<<<<< ====================================", max(erlist)) + try: + print(">>>>>>>model source<<<<<<< =======================", model.source) + os.system("touch ModelSource.txt") + os.system("echo ModelSource.txt") + file3 = open("ModelSource.txt", "a") + object2write = model.source.detach().cpu().numpy().reshape([1, -1]) + for ifer in range(2): + try: + file3.writelines(str(object2write[0, ifer]) + "\n") + except: + pass + file3.close() + except: + pass + return er_0 / len(DataLoader), loss.norm().item() / len(DataLoader), model -def solve_fem_GCNN(DataLoader,LossF,model,tol=1e-3,maxit=2000,qoiidx=None,softidx=None,penaltyConstant=None): - """Wrapper""" - startime=time.time() - model,info=solve_SGD(DataLoader,LossF,model,tol,maxit,qoiidx,softidx,penaltyConstant) - print('wallclock time of all epochs = ',time.time()-startime) - return model, info -def solve_SGD(DataLoader,LossF,model,tol,maxit,qoiidx,softidx,penaltyConstant,plotFlag='True'): - """ - DataLoader: training data - fcn: loss function - model: GCNN model to be trained - tol: the trauncation of loss function - maxit: the maximum number of epoch - """ - optimizer=Adam(parameters=model.parameters(), learning_rate=0.001) - criterion=MSELoss() - Er=[]; Loss=[] - tol_e=[1,0.1,0.09,0.08,0.07,0.06,0.05,0.04,0.03,0.02,0.01, - 0.005,0.001,0.0009,0.0008,0.0007, - 0.0006,0.0005,0.0004,0.0003,0.0002, - 0.0001,0.00001] - idx_tol_e=0 - for epoch in range(maxit): - print('epoch = ',epoch) - startime=time.time() - er,loss,model=trainmodel(DataLoader,LossF,model, - optimizer,criterion,qoiidx,softidx,penaltyConstant) - print('Solution er = ',er) - print('wallclock time of this epoch= ',time.time()-startime) - Er.append(er);Loss.append(loss) - if loss>>>>>>max error<<<<<<< ====================================', max(erlist)) - try: - print('>>>>>>>model source<<<<<<< =======================',model.source) - os.system("echo ModelSource.txt") - file3= open("ModelSource.txt","a") - object2write=model.source.detach().cpu().numpy().reshape([1,-1]) - for ifer in range(2): - try: - file3.writelines(str(object2write[0,ifer])+"\n") - except: - pass - file3.close() - except: - pass - return er_0/len(DataLoader),loss.norm().item()/len(DataLoader),model +def ReshapeFix(input, Shape, order="F"): + if order == "F": + return paddle.reshape( + input.T, [Shape[len(Shape) - 1 - i] for i in range(len(Shape))] + ).transpose([len(Shape) - 1 - i for i in range(len(Shape))]) + elif order == "C": + return paddle.reshape(input, Shape) + else: + raise ValueError("Reshape Only Support Fortran or C") -def Reshape(input, Shape,order='F'): - if order=='F': - return paddle.reshape(input,[Shape[len(Shape)-1-i] \ - for i in range(len(Shape))]).permute([len(Shape)-1-i \ - for i in range(len(Shape))]) - elif order=='C': - return paddle.reshape(input, Shape) - else: - raise ValueError('Reshape Only Support Fortran or C') -def ReshapeFix(input, Shape,order='F'): - if order=='F': - return paddle.reshape(input.T,[Shape[len(Shape)-1-i] \ - for i in range(len(Shape))]).transpose([len(Shape)-1-i \ - for i in range(len(Shape))]) - elif order=='C': - return paddle.reshape(input, Shape) - else: - raise ValueError('Reshape Only Support Fortran or C') - def Double(A): - if len(A.shape)==0 or (len(A.shape)==1 and A.shape[0]==1): - return paddle.to_tensor([A], place=place, dtype=paddle.float32).reshape([1,1]) - else: - return paddle.to_tensor(A, place=place, dtype=paddle.float32) \ No newline at end of file + if len(A.shape) == 0 or (len(A.shape) == 1 and A.shape[0] == 1): + return paddle.to_tensor([A], place=place, dtype=paddle.float32).reshape([1, 1]) + else: + return paddle.to_tensor(A, place=place, dtype=paddle.float32)