Skip to content
Snippets Groups Projects
Commit 819eca6f authored by Ludovic Noels's avatar Ludovic Noels
Browse files

Merge branch 'master' into newStructureNonLocal

parents 085e9961 6f3b0e13
No related branches found
No related tags found
No related merge requests found
......@@ -33,7 +33,7 @@ sol = 2 # Gmm=0 (default) Taucs=1 PETsc=2
soltype = 4 #3 # StaticLinear=0 (default) StaticNonLinear=1
nstep = 6000 # number of step (used only if soltype=1)
ftime =3.e-2 # Final time (used only if soltype=1)
tol=1.e-6 # relative tolerance for NR scheme (used only if soltype=1)
tol=1.e-5 # relative tolerance for NR scheme (used only if soltype=1)
nstepArch=20 # Number of step between 2 archiving (used only if soltype=1)
fullDg =1 #O = CG, 1 = DG
space1 = 0 # function space (Lagrange=0)
......@@ -73,7 +73,7 @@ mysolver.addMaterialLaw(lawcoh1)
mysolver.addMaterialLaw(lawfrac1)
mysolver.Scheme(soltype)
mysolver.Solver(sol)
mysolver.snlData(nstep,ftime,tol)
mysolver.snlData(nstep,ftime,tol,tol/10.)
mysolver.snlManageTimeStep(30,5,20,10)
#mysolver.lineSearch(bool(1))
mysolver.stepBetweenArchiving(nstepArch)
......@@ -128,8 +128,8 @@ mysolver.archivingNodeDisplacement(57,1,10)
mysolver.solve()
check = TestCheck()
check.equal(-8.431679e+01,mysolver.getArchivedForceOnPhysicalGroup("Face", 55, 1),5.e-2)
check.equal(9.2e-04,mysolver.getArchivedNodalValue(57,1,mysolver.displacement),1e-6)
check.equal(-4.139801e+02,mysolver.getArchivedForceOnPhysicalGroup("Face", 55, 1),5.e-2)
check.equal(8.999786e-04,mysolver.getArchivedNodalValue(57,1,mysolver.displacement),1e-6)
try:
import linecache
......@@ -139,5 +139,5 @@ except:
import os
os._exit(1)
else:
check.equal(8.659006e-01,float(linesDam.split(';')[1]))
check.equal(9.179512e-01,float(linesDam.split(';')[1]))
......@@ -3119,29 +3119,37 @@ void torchANNBasedDG3DMaterialLaw::RNNstressGeo_stiff(const fullMatrix<double>&
// Normalize extra inputs at previous time step and store in Extra0_vec. Necessary for feature enabling SCU @Mohib
Normalize_geo(Extra0, Extra0_vec);
 
// Populate the container "Combine_vec0" with the normalized quantities in the correct order.
// Order is : Radius + E0 for SCU. Last entry in Extra0_vec contains time and isnt needed here @Mohib.
Combine0_vec.insert(Combine0_vec.end(), Extra0_vec.begin(), Extra0_vec.end() - 1);
Combine0_vec.insert(Combine0_vec.end(), E0_vec.begin(), E0_vec.end());
// Changes in SCU with Radius correction dont require this combine vector anymore TODO: Cleanup b4 commit
// // Populate the container "Combine_vec0" with the normalized quantities in the correct order.
// // Order is : Radius + E0 for SCU. Last entry in Extra0_vec contains time and isnt needed here @Mohib.
// Combine0_vec.insert(Combine0_vec.end(), Extra0_vec.begin(), Extra0_vec.end() - 1);
// Combine0_vec.insert(Combine0_vec.end(), E0_vec.begin(), E0_vec.end());
 
}
 
if(_NeedExtraNorm and _DoubleInput){
 
// Populate the container "Combine_vec" with the normalized quantities in the correct order.
// Order is : Radius + E for SCU . Last entry in Extra_vec contains time and isnt needed here @Mohib.
Combine_vec.insert(Combine_vec.end(), Extra_vec.begin(), Extra_vec.end() - 1);
Combine_vec.insert(Combine_vec.end(), E_vec.begin(), E_vec.end());
// Changes in SCU with Radius correction dont require this combine vector anymore TODO: Cleanup b4 commit
// // Populate the container "Combine_vec" with the normalized quantities in the correct order.
// // Order is : Radius + E for SCU . Last entry in Extra_vec contains time and isnt needed here @Mohib.
// Combine_vec.insert(Combine_vec.end(), Extra_vec.begin(), Extra_vec.end() - 1);
// Combine_vec.insert(Combine_vec.end(), E_vec.begin(), E_vec.end());
 
if(stiff){
E0_norm = torch::from_blob(E0_vec.data(), {1,1, _numberOfInput - _numberOfExtraInput}, torch::requires_grad());
Combine0_norm = torch::from_blob(Combine0_vec.data(), {1,1, _numberOfInput - 1}, torch::requires_grad());
// Changes in SCU with Radius correction dont require this combine vector anymore TODO: Cleanup b4 commit
// Combine0_norm = torch::from_blob(Combine0_vec.data(), {1,1, _numberOfInput - 1}, torch::requires_grad());
}
else{
E0_norm = torch::from_blob(E0_vec.data(), {1,1, _numberOfInput - _numberOfExtraInput}, torch::requires_grad(false));
Combine0_norm = torch::from_blob(Combine0_vec.data(), {1,1, _numberOfInput - 1}, torch::requires_grad(false));
// Changes in SCU with Radius correction dont require this combine vector anymore TODO: Cleanup b4 commit
// Combine0_norm = torch::from_blob(Combine0_vec.data(), {1,1, _numberOfInput - 1}, torch::requires_grad(false));
}
inputs.push_back(Combine0_norm);
// Changes in SCU with Radius correction dont require pushing this combine vector into the input jit anymore TODO: Cleanup b4 commit
// inputs.push_back(Combine0_norm);
inputs.push_back(E0_norm);
}
else{
 
......@@ -3166,42 +3174,51 @@ void torchANNBasedDG3DMaterialLaw::RNNstressGeo_stiff(const fullMatrix<double>&
// added norm of E1 and delta E_vec to feature enable SCU @Mohib
vector<float> norm_E1(1);
// delta DE_vec doesnt include time so decrementing by 1 @Mohib.
vector<float> DE_vec(_numberOfInput - 1);
// delta DE_vec doesnt include time and radius so decrementing by num of extea variables @Mohib.
vector<float> DE_vec(_numberOfInput - _numberOfExtraInput);
torch::Tensor norm;
if(_kinematicInput == torchANNBasedDG3DMaterialLaw::EGLInc or _NeedExtraNorm){
if(_kinematicInput == torchANNBasedDG3DMaterialLaw::EGL and _NeedExtraNorm){
for (int i = 0; i <_numberOfInput - 1; i++) {
// DE_vec[i] = E_vec[i]-E0_vec[i]; #TODO: Remove b4 Commit @Mohib
DE_vec[i] = Combine_vec[i]- Combine0_vec[i];
for (int i = 0; i <_numberOfInput - _numberOfExtraInput; i++) {
// Changes in SCU with Radius correction dont require this combine vector anymore TODO: Cleanup b4 commit
//DE_vec[i] = Combine_vec[i]- Combine0_vec[i];
DE_vec[i] = E_vec[i]-E0_vec[i];
}
if(_DoubleInput){
if(stiff){
dE_norm = torch::from_blob(DE_vec.data(), {1,1, _numberOfInput - 1}, torch::requires_grad());
dE_norm = torch::from_blob(DE_vec.data(), {1,1, _numberOfInput - _numberOfExtraInput}, torch::requires_grad());
}
else{
dE_norm = torch::from_blob(DE_vec.data(), {1,1, _numberOfInput - 1}, torch::requires_grad(false));
dE_norm = torch::from_blob(DE_vec.data(), {1,1, _numberOfInput - _numberOfExtraInput}, torch::requires_grad(false));
}
inputs.push_back(dE_norm);
}
norm_E1[0] = sqrt(std::inner_product(DE_vec.begin(), DE_vec.end(), DE_vec.begin(), 0.0));
// norm_E1[0] = sqrt(std::inner_product(DE_vec.begin(), DE_vec.end(), DE_vec.begin(), 0.0));
norm_E1[0] = 0;
for (int i = 0; i <_numberOfInput - _numberOfExtraInput; i++ ) {
norm_E1[0] += DE_vec[i] * DE_vec[i];
}
norm_E1[0] = sqrt(norm_E1[0]);
}
else{
fullMatrix<double> CombineE1(1, _numberOfInput - 1);
 
for (int i = 0; i < _numberOfExtraInput - 1; i++)
{
CombineE1(0, i) = Extra1(0, i);
}
// Changes in SCU with Radius correction dont require this combine vector anymore TODO: Cleanup b4 commit
// fullMatrix<double> CombineE1(1, _numberOfInput - 1);
//
//
// for (int i = 0; i < _numberOfExtraInput - 1; i++)
// {
// CombineE1(0, i) = Extra1(0, i);
// }
//
// for (int i = 0; i < _numberOfInput - _numberOfExtraInput; i++)
// {
// CombineE1(0, _numberOfExtraInput - 1 + i) = E1(0, i);
// }
// norm_E1[0] = CombineE1.norm();
norm_E1[0] = E1.norm();
 
for (int i = 0; i < _numberOfInput - _numberOfExtraInput; i++)
{
CombineE1(0, _numberOfExtraInput - 1 + i) = E1(0, i);
}
//norm_E1[0] = E1.norm(); #TODO: Remove b4 Commit @Mohib
norm_E1[0] = CombineE1.norm();
}
if(stiff){
......@@ -3236,6 +3253,29 @@ void torchANNBasedDG3DMaterialLaw::RNNstressGeo_stiff(const fullMatrix<double>&
}
}
 
// added norm of radius and radius to feature enable SCU with Radius correction. @Mohib
vector<float> r_vec(1);
torch::Tensor r_norm;
if(_kinematicInput == torchANNBasedDG3DMaterialLaw::EGLInc or _NeedExtraNorm){
if(_kinematicInput == torchANNBasedDG3DMaterialLaw::EGL and _NeedExtraNorm){
// // For SCU dt needs to be seperately normalized. @Mohib
// Normalize_dt(Extra1, Extra_vec);
// Normalize_dt(Extra0, Extra0_vec);
r_vec[0] = Extra_vec[0];
if(stiff){
r_norm = torch::from_blob(r_vec.data(), {1,1,1}, torch::requires_grad());
}
else{
r_norm = torch::from_blob(r_vec.data(), {1,1,1}, torch::requires_grad(false));
}
inputs.push_back(r_norm);
}
}
inputs.push_back(h0);
 
auto outputs= module.forward(inputs).toTuple();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment