From f4f4c7cd746f06f3e79d2707dd5c8ddc246c974c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fran=C3=A7ois=20Henrotte?= <francois.henrotte@uclouvain.be>
Date: Tue, 16 Oct 2018 12:57:50 +0200
Subject: [PATCH] a few corrections

---
 Ccore/ccore.pro | 12 +++++++-----
 Ccore/shp.py    |  9 +++++----
 2 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/Ccore/ccore.pro b/Ccore/ccore.pro
index d7054f6..620a355 100644
--- a/Ccore/ccore.pro
+++ b/Ccore/ccore.pro
@@ -37,16 +37,18 @@
   - effectively solve the optimisation problem.
 
   The computation of sensitivities is a delicate step of the optimisation,
-  which can however be one hundred percent validated by finite difference.
+  which can however be fully validated by finite difference.
   This tutorial is thus dedicated to both the explanation and verification
   of the direct computation of sensitivities in Magnetostatics.
 
   Communication between the optimisation loop ('shp.py')
   and the Onelab model is done via the Onelab database. 
-  Design variables are defined as Onelab variables whose name
-  has the reserved prefix 'Optimization/Parameters'.
-  Similarly, velocities and sensibilities are also exchanged as 
-  Onelab variables with reserved prefixes 'Optimization/Results/*'.
+  Design variables (here 'D' and 'E') are defined as Onelab variables 
+  whose name has the reserved prefix 'Optimization/Parameters'.
+  Similarly, the unknown field 'a', velocities 'velocity_*'
+  and sensibilities 'dwdtau_*' are also exchanged in memory
+  between different functions or steps of the model
+  as Onelab variables with reserved prefixes 'Optimization/Results/*'.
 
   Finally, an extra mechanism is implemented 
   to apply a temporary small perturbation of a design parameter 
diff --git a/Ccore/shp.py b/Ccore/shp.py
index bb23d55..ed42af8 100644
--- a/Ccore/shp.py
+++ b/Ccore/shp.py
@@ -110,7 +110,9 @@ def grad(xFromOpti):
           + ' -setnumber VelocityTag '+str(dv)\
           + ' -solve GetGradient_wrt_dv')
     c.waitOnSubClients()
-    grads = [c.getNumber('Optimization/Results/dwdtau_0'), c.getNumber('Optimization/Results/dwdtau_1')]
+    grads=[]
+    for dv, var in x.iteritems():
+      grads.append(c.getNumber('Optimization/Results/dwdtau_'+str(dv)))
     return np.asarray(grads)
 
 Nfeval = 1
@@ -123,7 +125,7 @@ def test(n):
     tab = np.zeros((n+1,4))
     for step in range(n+1):
         xstep = x[0][2] + (x[0][3]-x[0][2])/float(n)*step
-        f,g,d = fobj([xstep]), grad([xstep]), 0
+        f,g,d = fobj([xstep]), grad([xstep])[0], 0
         tab[step] = [xstep, f, g, d]
         if step >= 2 :
             tab[step-1][3] = (tab[step][1]-tab[step-2][1])/(tab[step][0]-tab[step-2][0])
@@ -151,10 +153,9 @@ designParameters={
 x = {}
 index = int(c.getNumber('Optimization/Sweep on parameter'))-1
 
-if index >= 0: # parameter sweep and optimisation over one design parameter
+if index >= 0: # parameter sweep and optimisation over one selected design parameter
     x[0] = designParameters[index];
     test(int(c.getNumber('Optimization/Steps in range')))
-    #callback=callbackF, 
     res = minimize(fobj, [x[0][1]], jac=grad, bounds=[(x[0][2],x[0][3])],\
                    callback=callbackF, method = 'L-BFGS-B', tol=None,\
                    options={'ftol': 1e-5, 'gtol': 1e-3, 'disp':True} )
-- 
GitLab