Skip to content
Snippets Groups Projects
Commit fc487a57 authored by Van Dung NGUYEN's avatar Van Dung NGUYEN
Browse files

avoid memory problem

parent 104f5a9b
No related branches found
No related tags found
1 merge request!309Master
......@@ -980,9 +980,14 @@ void BimaterialHomogenization::compute(const std::vector<fullMatrix<double> >& C
{
DCeffDnormal.resize(3);
}
};
computeBimaterial(Cin[0],Cin[1],f[0],f[1],normal,Ceff,
stiff,&DCeffDcin[0],&DCeffDcin[1],&DCeffDf[0],&DCeffDf[1],&DCeffDnormal[0],&DCeffDnormal[1],&DCeffDnormal[2]);
}
else
{
computeBimaterial(Cin[0],Cin[1],f[0],f[1],normal,Ceff);
};
};
void LaminateHomogenization::compute(const fullMatrix<double>& C1, const fullMatrix<double>& C2, const fullMatrix<double>& C3,
......@@ -1104,6 +1109,8 @@ void LaminateHomogenization::compute(const std::vector<fullMatrix<double> >& Cin
}
}
else if (numPhase == 2)
{
if (stiff)
{
computeBimaterial(Cin[0],Cin[1],f[0],f[1],normal,Ceff,
stiff,&DCeffDcin[0],&DCeffDcin[1],
......@@ -1111,6 +1118,11 @@ void LaminateHomogenization::compute(const std::vector<fullMatrix<double> >& Cin
&DCeffDnormal[1],&DCeffDnormal[2]);
}
else
{
computeBimaterial(Cin[0],Cin[1],f[0],f[1],normal,Ceff);
}
}
else
{
//
auto getSumFaction = [](const std::vector<double>& ff, int i, fullVector<double>& DoutDf)
......@@ -1173,7 +1185,14 @@ void LaminateHomogenization::compute(const std::vector<fullMatrix<double> >& Cin
};
}
// two phase
if (stiff)
{
computeBimaterial(CC[i],Cin[i+1],f0i,f1i,normal,CC[i+1],stiff,&A[i],&B[i],&F0[i],&F1[i],&N0[i],&N1[i],&N2[i]);
}
else
{
computeBimaterial(CC[i],Cin[i+1],f0i,f1i,normal,CC[i+1]);
}
};
//
Ceff = CC[numPhase-1];
......@@ -1264,6 +1283,8 @@ void LaminateHomogenization::compute(const std::vector<fullMatrix<double> >& Cin
}
}
else if (numPhase == 2)
{
if (stiff)
{
computeBimaterial(Cin[0],Cin[1],f[0],f[1],normal[0],Ceff,
stiff,&DCeffDcin[0],&DCeffDcin[1],
......@@ -1271,6 +1292,11 @@ void LaminateHomogenization::compute(const std::vector<fullMatrix<double> >& Cin
&DCeffDnormal[0][1],&DCeffDnormal[0][2]);
}
else
{
computeBimaterial(Cin[0],Cin[1],f[0],f[1],normal[0],Ceff);
}
}
else
{
//
auto getSumFaction = [](const std::vector<double>& ff, int i, fullVector<double>& DoutDf)
......@@ -1333,7 +1359,14 @@ void LaminateHomogenization::compute(const std::vector<fullMatrix<double> >& Cin
};
}
// two phase
if (stiff)
{
computeBimaterial(CC[i],Cin[i+1],f0i,f1i,normal[i],CC[i+1],stiff,&A[i],&B[i],&F0[i],&F1[i],&N0[i],&N1[i],&N2[i]);
}
else
{
computeBimaterial(CC[i],Cin[i+1],f0i,f1i,normal[i],CC[i+1]);
}
};
//
Ceff = CC[numPhase-1];
......@@ -1696,7 +1729,8 @@ void TrainingDeepMaterialNetwork::train(double lr, int maxEpoch, std::string los
WBest = Wcur;
Wprev = Wcur;
g.resize(numDof,true);
double costfuncPrev = 0.;
double costfuncPrev = evaluateTrainingSet(*lossEval);
Msg::Info("costfuncPrev = %e",costfuncPrev);
//OfflineData
int epoch = 0;
......@@ -1773,7 +1807,7 @@ void TrainingDeepMaterialNetwork::train(double lr, int maxEpoch, std::string los
double costfunc = evaluateTrainingSet(*lossEval);
if (numberBatches==Ntrain)
{
if ((costfunc > costfuncPrev) && (epoch > 0))
if (costfunc > costfuncPrev)
{
numEpochIncrease ++;
if (numEpochIncrease == 1)
......
......@@ -142,10 +142,10 @@ class BimaterialHomogenization : public Homogenization
fullMatrix<double>& DNDn2) const;
virtual void computeBimaterial(const fullMatrix<double>& C1, const fullMatrix<double>& C2, double f1, double f2, const SVector3& normal,
fullMatrix<double>& Ceff,
bool stiff,
hyperFullMatrix* DCeffDC1, hyperFullMatrix* DCeffDC2,
fullMatrix<double>* DCeffDf1, fullMatrix<double>* DCeffDf2,
fullMatrix<double>* DCeffDnormal1,fullMatrix<double>* DCeffDnormal2, fullMatrix<double>* DCeffDnormal3) const;
bool stiff=false,
hyperFullMatrix* DCeffDC1=NULL, hyperFullMatrix* DCeffDC2=NULL,
fullMatrix<double>* DCeffDf1=NULL, fullMatrix<double>* DCeffDf2=NULL,
fullMatrix<double>* DCeffDnormal1=NULL,fullMatrix<double>* DCeffDnormal2=NULL, fullMatrix<double>* DCeffDnormal3=NULL) const;
#endif //SWIG
};
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment