Venkat Subramanian

411 Reputation

13 Badges

14 years, 340 days

MaplePrimes Activity


These are replies submitted by

@dharr 

if you can create a code without using the i+(j-1)N trick, please share it as I want to know if your version will be faster.

thanks

@dharr 

Thanks.

Using i+(j-1)*N is tailored for this model and is not useful for general problems. For this specific model one can write F and Jac explicity and that runs very well in parallel as well. See the code below. When j11 is passed with storage=sparse,datatype=float[8] there is no issue. As @Carl Love mentioned, the issue seems to be j11 being passed as a matrix/array with storage=sparse.

 

NULL

restart:

Digits:=15;

15

 

N:=40;h:=1.0/N:

40

Fp:=subs("N"=N,
proc(n0,nf,uu::Vector, uu_old::Vector, deltA::float, tnew::float, F1::Vector)
local h::float,left::float,right::float,top::float,bot::float,i::integer,j::integer,ind::integer;
h:=1.0/"N":
for i from n0 to nf do
for j from 1 to "N" do
ind:=i+(j-1)*"N":

if i = 1 then left:=0: else left:=(0.5*uu[ind]+uu_old[ind]-0.5*uu[ind-1]-uu_old[ind-1])/h:end:
if i = "N" then right:=-0.1: else right:=(0.5*uu[ind+1]+uu_old[ind+1]-0.5*uu[ind]-uu_old[ind])/h:end:
if j = 1 then bot:=0: else bot:=(0.5*uu[ind]+uu_old[ind]-0.5*uu[ind-"N"]-uu_old[ind-"N"])/h:end:
if j = "N" then top:=-0.1: else top:=(0.5*uu[ind+"N"]+uu_old[ind+"N"]-0.5*uu[ind]-uu_old[ind])/h:end:
F1[ind]:=uu[ind]-deltA*(right+top-left-bot)/h+deltA*(0.5*uu[ind]+uu_old[ind])^2;
#print(F1);
od:od:
    
end proc);

proc (n0, nf, uu::Vector, uu_old::Vector, deltA::float, tnew::float, F1::Vector) local h::float, left::float, right::float, top::float, bot::float, i::integer, j::integer, ind::integer; h := 0.250000000000000e-1; for i from n0 to nf do for j to 40 do ind := i+40*j-40; if i = 1 then left := 0 else left := (.5*uu[ind]+uu_old[ind]-.5*uu[ind-1]-uu_old[ind-1])/h end if; if i = 40 then right := -.1 else right := (.5*uu[ind+1]+uu_old[ind+1]-.5*uu[ind]-uu_old[ind])/h end if; if j = 1 then bot := 0 else bot := (.5*uu[ind]+uu_old[ind]-.5*uu[ind-40]-uu_old[ind-40])/h end if; if j = 40 then top := -.1 else top := (.5*uu[ind+40]+uu_old[ind+40]-.5*uu[ind]-uu_old[ind])/h end if; F1[ind] := uu[ind]-deltA*(right+top-left-bot)/h+deltA*(.5*uu[ind]+uu_old[ind])^2 end do end do end proc

 

F:=subs("N"=N,
proc(uu::Vector, uu_old::Vector, deltA::float, tnew::float, F1::Vector)
local h::float,left::float,right::float,top::float,bot::float,i::integer,j::integer,ind::integer;
Fp(1,"N",uu,uu_old,deltA,tnew,F1);  
end proc);

proc (uu::Vector, uu_old::Vector, deltA::float, tnew::float, F1::Vector) local h::float, left::float, right::float, top::float, bot::float, i::integer, j::integer, ind::integer; Fp(1, 40, uu, uu_old, deltA, tnew, F1) end proc

 

with(Threads[Task]):

FpDistribute := proc(n0,nf,uu::Vector, uu_old::Vector, deltA::float, tnew::float, F1::Vector)
local nmid;
if 20 < nf - n0 then

#nmid := floor(1/2*nf - 1/2*n0) + i_low;
nmid:=iquo(n0+nf,2);
Continue(null,
Task = [FpDistribute, n0,nmid,uu,uu_old,deltA,tnew,F1],
Task = [FpDistribute,nmid,nf,uu,uu_old,deltA,tnew,F1]);
else
Fp(n0,nf,uu,uu_old,deltA,tnew,F1);  end if;
end proc;

proc (n0, nf, uu::Vector, uu_old::Vector, deltA::float, tnew::float, F1::Vector) local nmid; if 20 < nf-n0 then nmid := iquo(n0+nf, 2); Threads:-Task:-Continue(null, Task = [FpDistribute, n0, nmid, uu, uu_old, deltA, tnew, F1], Task = [FpDistribute, nmid, nf, uu, uu_old, deltA, tnew, F1]) else Fp(n0, nf, uu, uu_old, deltA, tnew, F1) end if end proc

Fparallel:=subs("N"=N,
proc(uu::Vector, uu_old::Vector, deltA::float, tnew::float, F1::Vector)
local h::float,left::float,right::float,top::float,bot::float,i::integer,j::integer,ind::integer;
Start(FpDistribute,1,"N",uu,uu_old,deltA,tnew,F1);  
end proc);

proc (uu::Vector, uu_old::Vector, deltA::float, tnew::float, F1::Vector) local h::float, left::float, right::float, top::float, bot::float, i::integer, j::integer, ind::integer; Threads:-Task:-Start(FpDistribute, 1, 40, uu, uu_old, deltA, tnew, F1) end proc

interface(prettyprint=3);

2

 

Jp:=subs("N"=N,proc(n0,nf,uu::Vector,uu_old::Vector,deltA::float,tnew::float,Nt::integer,J::Matrix(storage=sparse,datatype=float[8]))
local h::float,left::float,right::float,top::float,bot::float,i::integer,j::integer,ind::integer;
h:=1.0/"N";
#J:=Matrix(1..Nt,1..Nt,storage=sparse,datatype=float[8]):
for i from n0 to nf do
for j from 1 to "N" do
#for j from 1 to "N" do
ind:=i+(j-1)*"N":
J[ind,ind]:=1.0+deltA*(0.5*uu[ind]+uu_old[ind]):
if i > 1 then J[ind,ind]:=J[ind,ind]+deltA*0.5/h^2:J[ind,ind-1]:=-deltA*0.5/h^2:end:
if i < "N" then J[ind,ind]:=J[ind,ind]+deltA*0.5/h^2:J[ind,ind+1]:=-deltA*0.5/h^2:end:
if j >1 then J[ind,ind]:=J[ind,ind]+deltA*0.5/h^2:J[ind,ind-"N"]:=-deltA*0.5/h^2:end:
if j < "N" then J[ind,ind]:= J[ind,ind]+deltA*0.5/h^2:J[ind,ind+"N"]:=-deltA*0.5/h^2:end:
od:od:
#J;
end proc);

proc (n0, nf, uu::Vector, uu_old::Vector, deltA::float, tnew::float, Nt::integer, J::(Matrix(storage = sparse, datatype = float[8]))) local h::float, left::float, right::float, top::float, bot::float, i::integer, j::integer, ind::integer; h := 0.250000000000000e-1; for i from n0 to nf do for j to 40 do ind := i+40*j-40; J[ind, ind] := 1.0+deltA*(.5*uu[ind]+uu_old[ind]); if 1 < i then J[ind, ind] := J[ind, ind]+.5*deltA/h^2; J[ind, ind-1] := -.5*deltA/h^2 end if; if i < 40 then J[ind, ind] := J[ind, ind]+.5*deltA/h^2; J[ind, ind+1] := -.5*deltA/h^2 end if; if 1 < j then J[ind, ind] := J[ind, ind]+.5*deltA/h^2; J[ind, ind-40] := -.5*deltA/h^2 end if; if j < 40 then J[ind, ind] := J[ind, ind]+.5*deltA/h^2; J[ind, ind+40] := -.5*deltA/h^2 end if end do end do end proc

Jac:=subs("N"=N,proc(uu::Vector,uu_old::Vector,deltA::float,tnew::float,Nt::integer,J::Matrix(storage=sparse,datatype=float[8]))
local h::float,left::float,right::float,top::float,bot::float,i::integer,j::integer,ind::integer;
Jp(1,"N",uu,uu_old,deltA,tnew,Nt,J);
#J;
end proc);

proc (uu::Vector, uu_old::Vector, deltA::float, tnew::float, Nt::integer, J::(Matrix(storage = sparse, datatype = float[8]))) local h::float, left::float, right::float, top::float, bot::float, i::integer, j::integer, ind::integer; Jp(1, 40, uu, uu_old, deltA, tnew, Nt, J) end proc

JpDistribute := proc(n0,nf,uu::Vector, uu_old::Vector, deltA::float, tnew::float, Nt::integer,J::Matrix(storage=sparse,datatype=float[8]))
local nmid;
if 100 < nf - n0 then

#nmid := floor(1/2*nf - 1/2*n0) + i_low;
nmid:=iquo(n0+nf,2);
Continue(null,
Task = [JpDistribute, n0,nmid,uu,uu_old,deltA,tnew,Nt,J],
Task = [JpDistribute,nmid,nf,uu,uu_old,deltA,tnew,Nt,J]);
else
Jp(n0,nf,uu,uu_old,deltA,tnew,Nt,J);  end if;
end proc;

proc (n0, nf, uu::Vector, uu_old::Vector, deltA::float, tnew::float, Nt::integer, J::(Matrix(storage = sparse, datatype = float[8]))) local nmid; if 100 < nf-n0 then nmid := iquo(n0+nf, 2); Threads:-Task:-Continue(null, Task = [JpDistribute, n0, nmid, uu, uu_old, deltA, tnew, Nt, J], Task = [JpDistribute, nmid, nf, uu, uu_old, deltA, tnew, Nt, J]) else Jp(n0, nf, uu, uu_old, deltA, tnew, Nt, J) end if end proc

Jacparallel:=subs("N"=N,
proc(uu::Vector, uu_old::Vector, deltA::float, tnew::float, Nt::integer,J::Matrix(storage=sparse,datatype=float[8]))
local h::float,left::float,right::float,top::float,bot::float,i::integer,j::integer,ind::integer;
Start(JpDistribute,1,"N",uu,uu_old,deltA,tnew,Nt,J);  
end proc);

proc (uu::Vector, uu_old::Vector, deltA::float, tnew::float, Nt::integer, J::(Matrix(storage = sparse, datatype = float[8]))) local h::float, left::float, right::float, top::float, bot::float, i::integer, j::integer, ind::integer; Threads:-Task:-Start(JpDistribute, 1, 200, uu, uu_old, deltA, tnew, Nt, J) end proc

 

NULL


 

Download PDEnumericalFD2Dthreadedproc.mw

 

"The Maple to C optimized code proved to be a winning combination. Stefan pioneered a similar hybrid development approach when he began writing a Modelica compiler for MapleSim in 2007. The initial implementation was written entirely in Maple. According to Stefan, the Maple language is much faster to write code in. It is much higher-level, allowing for rapid prototypes, without worrying about things like memory management and putting data-structures together. Two codebases are still maintained for the MapleSim Modelica compiler. All new features are developed in the Maple codebase, vetted and proven, before being translated to C. This approach has been adopted by Stefan's successors, and they also agree that in the end this actually saves time and produces a better result."

This has been my observation as well.  I hope to share some examples, in particular for solving DAEs and PDEs that demonstrate similar gains from Maple+C hybrid approach.

Note that Maple does not have a direct DAE solver, it converts the index 1 DAEs of the form 
dy/dt = f(y,z) (1)

0 = g(y,z) (2)

to 

dy/dt= f

dz/dt = ...  by differentiating (2) to find dz/dt.

Maplesim has direct DAE solvers that can solve (1) and (2).
Recently, we developed and published a direct sparse DAE solver in Maple.
Please see, https://mapletransactions.org/index.php/maple/article/view/16701

@AllanWittkopf will know more about this. This is my current understanding based on past Maplesim codes.

Please see the code attached. 
Erik Postma @epostma had helped me in the past to call the MKL solver, so I am just guessing the libraries and trying it. It seems to work. But it may be buggy.

IntelMKL PARDISO can be superior to UMFPACK for very large sparse systems as shown earlier by our group for the paper and codes at https://sites.utexas.edu/maple/phase-field-models-for-li-metal-batteries/


 

NULL

restart:

Digits:=15;

15

(1)

A:=Matrix(4,4,[[-1,2,0,0],[2,-1,2,0],[0,2,-1,2],[0,0,1,-1]],datatype=float[8],storage=sparse);

Matrix(4, 4, {(1, 1) = -1.0, (1, 2) = 2.0, (1, 3) = 0., (1, 4) = 0., (2, 1) = 2.0, (2, 2) = -1.0, (2, 3) = 2.0, (2, 4) = 0., (3, 1) = 0., (3, 2) = 2.0, (3, 3) = -1.0, (3, 4) = 2.0, (4, 1) = 0., (4, 2) = 0., (4, 3) = 1.0, (4, 4) = -1.0})

(2)

b:=Vector(4,[1,0.,0,0],datatype=float[8]):

 

sol:=LinearAlgebra:-LinearSolve(A,b,method=SparseDirectMKL);

Error, invalid input: LinearAlgebra:-LinearSolve expects value for keyword parameter method to be of type identical(none,SparseLU,SparseDirect,SparseDirectMKL,SparseIterative,LU,QR,solve,hybrid,Cholesky,subs,modular), but received SparseDirectMKL

 

s1:=LinearAlgebra:-LUDecomposition(A,method=SparseDirectMKL,output=handle);

before P call
after P call

 

"External/0000029485E225E8"

(3)

s1();

("External/0000029485E225E8")()

(4)


pSolve := define_external("hw_PardisoSolve", ':-MAPLE', ':-LIB' = "linalg"):

x:=Vector(4,datatype=float[8]):

pSolve(b,x,s1);

x;

Vector(4, {(1) = -5.0, (2) = -2.0, (3) = 4.0, (4) = 4.0})

(5)

 

pFactor := define_external("hw_PardisoFactor", ':-MAPLE', ':-LIB' = "linalg"):

pCSF := define_external("hw_SpCompressedSparseForm", ':-MAPLE', ':-LIB' = "linalg"):

 

One can also use pFactor, pCSF, pSolve to get C, R, X and then proceed.

CB,R,X:=pCSF(A,"r",1,1,false,true,false);

CB, R, X := Vector(5, {(1) = 1, (2) = 3, (3) = 6, (4) = 9, (5) = 11}), Vector(10, {(1) = 1, (2) = 2, (3) = 1, (4) = 2, (5) = 3, (6) = 2, (7) = 3, (8) = 4, (9) = 3, (10) = 4}), Vector(10, {(1) = -1., (2) = 2., (3) = 2., (4) = -1., (5) = 2., (6) = 2., (7) = -1., (8) = 2., (9) = 1., (10) = -1.})

(6)

mtype := 11;
handle1 := pFactor(CB, R, X, mtype);

11

 

before P call
after P call

 

"External/0000029485CB0900"

(7)

x:=Vector(4,datatype=float[8]):
pSolve(b,x,handle1):
x;

Vector(4, {(1) = -5.0, (2) = -2.0, (3) = 4.0, (4) = 4.0})

(8)

 

NULL


 

Download buglinearsolve.mw

Such a great thread, thanks all!

@Carl Love 
Not sure I know the difference. I am just looking for something that will call the external integrator without using evalf command.

See a MATLAB call from Maple and the small amount of RAM used (after the MATLAB opens up the first time). (This calls the vdp1 model). The expected wrapper or procedure should call the external integrator like this for a given n(number of odes), f(procedure to find the slopes), initial condition vector (y0), initial time (to) and final time (tf) with default settings for all other parameters

 

@AllanWittkopf is trying to see if he can help. I will update this page. My rough test for a sample code was able to run a model with 50KB RAM consumption for Maple's dsolve/numeric external integrator, this involved guessing the entries for the table creation/modification.

 

restart:

Digits:=15;

15

currentdir();

"C:\Program Files\Maple 2023"

with(Matlab);

[AddTranslator, CloseEngine, ExecuteCommand, FromMFile, FromMatlab, GetVariable, IsAssigned, OpenEngine, SetVariable, chol, closelink, defined, det, dimensions, eig, evalM, fft, getvar, inv, lu, ode15s, ode45, openlink, qr, setvar, size, square, transpose]

ss:=proc(t0,tf,ic0,T,Y)
T,Y := ode45("vdp1", 0 ..tf, ic0);
end proc;

proc (t0, tf, ic0, T, Y) T, Y := Matlab:-ode45("vdp1", 0 .. tf, ic0) end proc

ic0:=[1,0];

[1, 0]

CodeTools:-Usage((ss(0,1,ic0,T,Y)));

memory used=0.79MiB, alloc change=0 bytes, cpu time=0ns, real time=4.29s, gc time=0ns

Vector(57, {(1) = .0, (2) = 0.50237728630191596e-4, (3) = 0.10047545726038319e-3, (4) = 0.1507131858905748e-3, (5) = 0.20095091452076639e-3, (6) = 0.4521395576717244e-3, (7) = 0.7033282008226824e-3, (8) = 0.9545168439736404e-3, (9) = 0.12057054871245985e-2, (10) = 0.24616487028793887e-2, (11) = 0.3717591918634179e-2, (12) = 0.4973535134388969e-2, (13) = 0.622947835014376e-2, (14) = 0.1250919442891771e-1, (15) = 0.1878891050769166e-1, (16) = 0.2506862658646561e-1, (17) = 0.3134834266523956e-1, (18) = 0.5634834266523956e-1, (19) = 0.8134834266523956e-1, (20) = .10634834266523957, (21) = .13134834266523956, (22) = .15634834266523956, (23) = .18134834266523958, (24) = .20634834266523958, (25) = .23134834266523957, (26) = .2563483426652396, (27) = .28134834266523956, (28) = .3063483426652396, (29) = .3313483426652396, (30) = .3563483426652396, (31) = .3813483426652396, (32) = .40634834266523956, (33) = .4313483426652396, (34) = .4563483426652396, (35) = .48134834266523957, (36) = .5063483426652395, (37) = .5313483426652396, (38) = .5563483426652396, (39) = .5813483426652395, (40) = .6063483426652395, (41) = .6313483426652395, (42) = .6563483426652396, (43) = .6813483426652396, (44) = .7063483426652395, (45) = .7313483426652395, (46) = .7563483426652395, (47) = .7813483426652394, (48) = .8063483426652395, (49) = .8313483426652395, (50) = .8563483426652395, (51) = .8813483426652395, (52) = .9063483426652394, (53) = .9313483426652395, (54) = .9485112569989296, (55) = .9656741713326198, (56) = .9828370856663099, (57) = 1.0}, datatype = float[8]), Matrix(57, 2, {(1, 1) = 1.0, (1, 2) = .0, (2, 1) = .9999999987380853, (2, 2) = -0.50237728609061275e-4, (3, 1) = .9999999949523413, (3, 2) = -0.10047545709135335e-3, (4, 1) = .9999999886427678, (4, 2) = -0.15071318532014217e-3, (5, 1) = .9999999798093651, (5, 2) = -0.20095091316873185e-3, (6, 1) = .9999998977849119, (6, 2) = -0.452139542277011e-3, (7, 1) = .9999997526647312, (7, 2) = -0.7033281428979001e-3, (8, 1) = .9999995444488319, (8, 2) = -0.9545166992374083e-3, (9, 1) = .9999992731372271, (9, 2) = -0.12057051955254355e-2, (10, 1) = .999996970144356, (10, 2) = -0.24616462259132695e-2, (11, 1) = .9999930897630864, (11, 2) = -0.37175834032361936e-2, (12, 1) = .9999876319994775, (12, 2) = -0.4973514783111366e-2, (13, 1) = .9999805968620208, (13, 2) = -0.6229438436095478e-2, (14, 1) = .9999217610279643, (14, 2) = -0.12508874315545213e-1, (15, 1) = .9998234934966618, (15, 2) = -0.187878361976329e-1, (16, 1) = .9996857979451632, (16, 2) = -0.25066099711088356e-1, (17, 1) = .9995086794303525, (17, 2) = -0.3134344983295709e-1, (18, 1) = .9984128197702458, (18, 2) = -0.5632104561875044e-1, (19, 1) = .9966928705208215, (19, 2) = -0.8126957756944708e-1, (20, 1) = .9943496689987544, (20, 2) = -.10617984913209057, (21, 1) = .9913842544644557, (21, 2) = -.1310449653737403, (22, 1) = .9877978279054932, (22, 2) = -.1558602995682584, (23, 1) = .9835916754815316, (23, 2) = -.180623437107149, (24, 1) = .9787671017906019, (24, 2) = -.20533409778342732, (25, 1) = .973325392926298, (25, 2) = -.22999409516941732, (26, 1) = .9672677803530982, (26, 2) = -.25460728674271504, (27, 1) = .9605953712287775, (27, 2) = -.27917948363141554, (28, 1) = .9533090911601887, (28, 2) = -.30371835489441507, (29, 1) = .9454096522230412, (29, 2) = -.32823337582573286, (30, 1) = .9368975218313902, (30, 2) = -.35273577439140025, (31, 1) = .9277728623231336, (31, 2) = -.37723843216038444, (32, 1) = .9180354837989904, (32, 2) = -.40175578829756187, (33, 1) = .9076848174098183, (33, 2) = -.4263037870212905, (34, 1) = .896719889317598, (34, 2) = -.4508998266721802, (35, 1) = .8851392702038537, (35, 2) = -.47556266569103106, (36, 1) = .8729410373900864, (36, 2) = -.5003123302542256, (37, 1) = .8601227530027613, (37, 2) = -.5251700638949847, (38, 1) = .8466814429474174, (38, 2) = -.5501582781675165, (39, 1) = .8326135563649153, (39, 2) = -.5753004642541835, (40, 1) = .8179149364435568, (40, 2) = -.6006210932038983, (41, 1) = .8025808031784867, (41, 2) = -.6261455628066843, (42, 1) = .7866057378215437, (42, 2) = -.651900140659666, (43, 1) = .769983652915049, (43, 2) = -.6779118676028639, (44, 1) = .752707773165107, (44, 2) = -.7042084220319103, (45, 1) = .7347706235046282, (45, 2) = -.7308180499230132, (46, 1) = .7161640208762611, (46, 2) = -.7577694811736909, (47, 1) = .6968790574803524, (47, 2) = -.7850917941541237, (48, 1) = .676906097173605, (48, 2) = -.8128141924982113, (49, 1) = .6562347716355753, (49, 2) = -.8409658921908745, (50, 1) = .6348539838487927, (50, 2) = -.8695759788015689, (51, 1) = .6127519109456697, (51, 2) = -.8986731802105471, (52, 1) = .5899160285875202, (52, 2) = -.9282854723793063, (53, 1) = .5663331216220466, (53, 2) = -.9584398809321654, (54, 1) = .5497034312658997, (54, 2) = -.9794686924444002, (55, 1) = .5327104696469083, (55, 2) = -1.0007731387692438, (56, 1) = .5153494429787209, (56, 2) = -1.0223606798647442, (57, 1) = .4976154331426749, (57, 2) = -1.044238264404763}, datatype = float[8])

T[57];Y[57];

1.

Vector[row](%id = 36893490281465971884)

 


 

Download MATLABcall.mw

 

 

Please see the code attached. If anyone can explain the arguments passed then one can possibly write an evalhf-able procedure that will take n (number of odes), f(procedure for the slope), initial time, final time and do all of this with the external integrator and provide the results in evalhf format.
If anyone can show and store the misc and xf called by `dsolve/numeric/SC/IVPrun` this can be done. The goal is to reduce the number of non-evalhf able loops/calls.

(this code is written in Maple 2023).


 

restart:

Digits:=15:

eqs:=diff(ca(t),t)=-(u+u^2/2)*ca(t),diff(cb(t),t)=u*ca(t)-0.1*cb(t);

diff(ca(t), t) = -(u+(1/2)*u^2)*ca(t), diff(cb(t), t) = u*ca(t)-.1*cb(t)

u:=0.1;

.1

infolevel[all]:=10;

10

#soln:=dsolve({eqs,ca(0)=1.0,cb(0)=0.0},type=numeric):

#soln(1);

#showstat(`dsolve/numeric/SC/IVPrun`);

showstat(`dsolve/numeric/SC/hw_int`);


`dsolve/numeric/SC/hw_int` := proc()
local lib;
global `dsolve/numeric/SC/hw_int`;
   1   try
   2       lib := ExternalCalling:-ExternalLibraryName("ode2",'HWFloat');
   3       `dsolve/numeric/SC/hw_int` := define_external('hw_integrate',':-
             MAPLE',(':-LIB') = lib,':-THREAD_SAFE')
       catch :
   4       error
             "'hw_integrate' in external library %1 could not be found/used",
             lib
       end try;
   5   `dsolve/numeric/SC/hw_int`(_passed[1 .. _npassed])
end proc
 

`dsolve/numeric/SC/hw_int` := proc()
local lib;
global `dsolve/numeric/SC/hw_int`;
      try
          lib := ExternalCalling:-ExternalLibraryName("ode2",'HWFloat');
          `dsolve/numeric/SC/hw_int` := define_external('hw_integrate',':-
             MAPLE',(':-LIB') = lib,':-THREAD_SAFE')
       catch :
          error
             "'hw_integrate' in external library %1 could not be found/used",
             lib
       end try;
print(_passed);
      `dsolve/numeric/SC/hw_int`(_passed[1 .. _npassed]);
print("here");
print(_passed);
print(_passed[11]);
end proc;

proc () local lib; global `dsolve/numeric/SC/hw_int`; try lib := ExternalCalling:-ExternalLibraryName("ode2", 'HWFloat'); `dsolve/numeric/SC/hw_int` := define_external('hw_integrate', ':-MAPLE', ':-LIB' = lib, ':-THREAD_SAFE') catch: error "'hw_integrate' in external library %1 could not be found/used", lib end try; print(args); `dsolve/numeric/SC/hw_int`(args[1 .. nargs]); print("here"); print(args); print(args[11]) end proc

soln:=dsolve({eqs,ca(0)=1.0,cb(0)=0.0},type=numeric,output=Array([seq(i/1.*1,i=0..1)])):

dsolve/numeric: entering dsolve/numeric

DEtools/convertsys: Naming is {ca(t) = Y[1] cb(t) = Y[2] diff(ca(t) t) = YP[1] diff(cb(t) t) = YP[2]}

DEtools/convertsys: converted to first-order system Y'(x) = f(x,Y(x))    namely (with Y' represented by YP)

[YP[1] = -.105000000000000*Y[1], YP[2] = .1*Y[1]-.1*Y[2]]

DEtools/convertsys: correspondence between Y[i] names and original functions:

[Y[1] = ca(t), Y[2] = cb(t)]

dsolve/numeric/DAE/rhsproc: Using previously generated rhs proc

dsolve/numeric: the procedure F(x,Y,YP) for computing Y'(x)=f(x,Y(x)) is: proc (N, X, Y, YP) option [Y[1] = ca(t), Y[2] = cb(t)]; YP[1] := -.105000000000000*Y[1]; YP[2] := .1*Y[1]-.1*Y[2]; 0 end proc

dsolve/numeric: initial conditions: x0=0., y0=[1.0, 0.]

dsolve/numeric/SC/preproc: no compilation, _Env_ivpnumeric_ToExternal not set

dsolve/numeric/SC/firststep: Checking ODE for hardware floating point computation

dsolve/numeric/SC/firststep: Initial point OK for hardware floating point computation

dsolve/numeric/SC/firststep: Initial step set to: .0504765875584155

dsolve/numeric/SC/IVPrun: First call, start from initial

dsolve/numeric/SC/IVPrun: Calling external hardware nonstiff integrator with evalhf= true

Vector[row](2, {(1) = 1., (2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = -.105000000000000, (2) = .100000000000000}), proc (N, X, Y, YP) option `[Y[1] = ca(t), Y[2] = cb(t)]`; YP[1] := (-1)*.105000000000000*Y[1]; YP[2] := .100000000000000*Y[1]+(-1)*.100000000000000*Y[2]; 0 end proc, -1, Vector[row](2, {(1) = .100000000000000, (2) = .100000000000000}), Vector[row](65, {(1) = 2, (2) = 2, (3) = 0, (4) = 0, (5) = 0, (6) = 0, (7) = 1, (8) = 0, (9) = 0, (10) = 0, (11) = 0, (12) = 0, (13) = 1, (14) = 2, (15) = 1, (16) = 0, (17) = 0, (18) = 1, (19) = 30000, (20) = 1, (21) = 0, (22) = 1, (23) = 4, (24) = 0, (25) = 1, (26) = 15, (27) = 1, (28) = 0, (29) = 1, (30) = 3, (31) = 3, (32) = 0, (33) = 1, (34) = 0, (35) = 0, (36) = 0, (37) = 0, (38) = 0, (39) = 0, (40) = 0, (41) = 0, (42) = 0, (43) = 1, (44) = 0, (45) = 0, (46) = 0, (47) = 0, (48) = 0, (49) = 0, (50) = 50, (51) = 1, (52) = 0, (53) = 0, (54) = 0, (55) = 0, (56) = 0, (57) = 0, (58) = 0, (59) = 10000, (60) = 0, (61) = 1000, (62) = 0, (63) = 0, (64) = -1, (65) = 0}), Vector[row](28, {(1) = 1., (2) = 1.00000000000000*(1/1000000), (3) = 0.504765875584155e-1, (4) = 5.00001000000000*(1/1000000000000000), (5) = 0., (6) = 0.504765875584155e-1, (7) = 0., (8) = 1.00000000000000*(1/1000000), (9) = 0., (10) = 0., (11) = 0., (12) = 0., (13) = 1., (14) = 0., (15) = .499999999999990, (16) = 0., (17) = 1., (18) = 1., (19) = 0., (20) = 0., (21) = 1., (22) = 1., (23) = 0., (24) = 0., (25) = 1.00000000000000*(1/1000000000000000), (26) = 0., (27) = 0., (28) = 0.}), Array(1..6, 0..2, {(1, 1) = .8758457905848228, (1, 2) = .9121381606660928, (2, 0) = .9121381606660928, (2, 1) = 0.8006442103497206e-1, (2, 2) = 1.0962744218225748, (3, 0) = 1.0962744218225748, (3, 1) = .8912691536438646, (3, 2) = 0.9797584316374096e-1, (4, 0) = 0.9797584316374096e-1, (4, 1) = 1.316703053060327, (4, 2) = .8708776141403536, (5, 0) = .8708776141403536, (5, 1) = .11504702599633022, (5, 2) = 1.5371316842980791, (6, 0) = 1.5371316842980791, (6, 1) = .8509526190302513, (6, 2) = .13130657350916902}, datatype = float[8], order = C_order), 0, Array(1..2, 0..2, {(1, 1) = .0, (1, 2) = 1.0, (2, 0) = 1.0, (2, 1) = .0, (2, 2) = 1.0}, datatype = float[8], order = C_order), 0, 0, 0, 0, 0, 0, 0, 0, Matrix(4, 7, {(1, 1) = 0., (1, 2) = .203125000000000, (1, 3) = .304687500000000, (1, 4) = .750000000000000, (1, 5) = .812500000000000, (1, 6) = .406250000000000, (1, 7) = .812500000000000, (2, 1) = 0.637817382812500e-1, (2, 2) = 0., (2, 3) = .279296875000000, (2, 4) = .272378921508789, (2, 5) = -0.968685150146484e-1, (2, 6) = 0.195693969726562e-1, (2, 7) = .538158416748047, (3, 1) = 0.318908691406250e-1, (3, 2) = 0., (3, 3) = -.343750000000000, (3, 4) = -.335235595703125, (3, 5) = .229614257812500, (3, 6) = .417480468750000, (3, 7) = 11.4807128906250, (4, 1) = 0.971052050590515e-1, (4, 2) = 0., (4, 3) = .403503417968750, (4, 4) = 0.202974677085876e-1, (4, 5) = -0.605428218841553e-2, (4, 6) = -0.477004051208496e-1, (4, 7) = .778585672378540}), Matrix(6, 6, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (1, 6) = 1., (2, 1) = .250000000000000, (2, 2) = 0., (2, 3) = 0., (2, 4) = 0., (2, 5) = 0., (2, 6) = 1., (3, 1) = .187500000000000, (3, 2) = .562500000000000, (3, 3) = 0., (3, 4) = 0., (3, 5) = 0., (3, 6) = 2., (4, 1) = .235839843750000, (4, 2) = -.878906250000000, (4, 3) = .890625000000000, (4, 4) = 0., (4, 5) = 0., (4, 6) = .268188476562500, (5, 1) = .127273559570312, (5, 2) = -.500976562500000, (5, 3) = .449218750000000, (5, 4) = -0.128936767578125e-1, (5, 5) = 0., (5, 6) = 0.626220703125000e-1, (6, 1) = -0.927734375000000e-1, (6, 2) = .626220703125000, (6, 3) = -.432617187500000, (6, 4) = .141830444335938, (6, 5) = -0.861053466796875e-1, (6, 6) = .313110351562500}), Vector[row](6, {(1) = 0., (2) = .386000000000000, (3) = .210000000000000, (4) = .630000000000000, (5) = 1., (6) = 1.}), Vector[row](6, {(1) = .250000000000000, (2) = -.104300000000000, (3) = .103500000000000, (4) = -0.362000000000000e-1, (5) = 0., (6) = 0.}), Matrix(6, 5, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (2, 1) = 1.54400000000000, (2, 2) = 0., (2, 3) = 0., (2, 4) = 0., (2, 5) = 0., (3, 1) = .946678528081553, (3, 2) = .255701169898258, (3, 3) = 0., (3, 4) = 0., (3, 5) = 0., (4, 1) = 3.31482518706849, (4, 2) = 2.89612401597212, (4, 3) = .998641913997781, (4, 4) = 0., (4, 5) = 0., (5, 1) = 1.22122450922627, (5, 2) = 6.01913448128775, (5, 3) = 12.5370833293209, (5, 4) = -.687886036105895, (5, 5) = 0., (6, 1) = 1.22122450922627, (6, 2) = 6.01913448128775, (6, 3) = 12.5370833293209, (6, 4) = -.687886036105895, (6, 5) = 1.}), Matrix(6, 5, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (2, 1) = -5.66880000000000, (2, 2) = 0., (2, 3) = 0., (2, 4) = 0., (2, 5) = 0., (3, 1) = -2.43009335683376, (3, 2) = -.206359915708912, (3, 3) = 0., (3, 4) = 0., (3, 5) = 0., (4, 1) = -.107352905814526, (4, 2) = -9.59456225102190, (4, 3) = -20.4702861480962, (4, 4) = 0., (4, 5) = 0., (5, 1) = 7.49644331396861, (5, 2) = -10.2468043146412, (5, 3) = -33.9999035281991, (5, 4) = 11.7089089320616, (5, 5) = 0., (6, 1) = 8.08324679592241, (6, 2) = -7.98113298806279, (6, 3) = -31.5215943287437, (6, 4) = 16.3193054312314, (6, 5) = -6.05881823883405}), Matrix(3, 5, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (2, 1) = 10.1262350834469, (2, 2) = -7.48799587760763, (2, 3) = -34.8009186155574, (2, 4) = -7.99277170756873, (2, 5) = 1.02513772329562, (3, 1) = -.676280339280690, (3, 2) = 6.08771465167861, (3, 3) = 16.4308432089246, (3, 4) = 24.7672251141837, (3, 5) = -6.59438912571678}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Matrix(2, 2, {(1, 1) = 0., (1, 2) = 0., (2, 1) = 0., (2, 2) = 0.}), Matrix(2, 2, {(1, 1) = 0., (1, 2) = 0., (2, 1) = 0., (2, 2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Matrix(2, 2, {(1, 1) = 0., (1, 2) = 0., (2, 1) = 0., (2, 2) = 0.}), Matrix(2, 6, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (1, 6) = 0., (2, 1) = 0., (2, 2) = 0., (2, 3) = 0., (2, 4) = 0., (2, 5) = 0., (2, 6) = 0.}), Vector[row](2, {(1) = 0, (2) = 0}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](4, {(1) = 0., (2) = 0., (3) = 0., (4) = 0.}), 0, 0, 0, 0, 0, 0, Vector[row](0, {}), Vector[row](2, {(1) = 0, (2) = 0}), 0

"here"

Vector[row](2, {(1) = .912138160666093, (2) = 0.800644210349721e-1}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = -0.957745068699397e-1, (2) = 0.832073739631121e-1}), proc (N, X, Y, YP) option `[Y[1] = ca(t), Y[2] = cb(t)]`; YP[1] := (-1)*.105000000000000*Y[1]; YP[2] := .100000000000000*Y[1]+(-1)*.100000000000000*Y[2]; 0 end proc, -1, Vector[row](2, {(1) = .100000000000000, (2) = .100000000000000}), Vector[row](65, {(1) = 2, (2) = 2, (3) = 0, (4) = 0, (5) = 0, (6) = 0, (7) = 1, (8) = 1, (9) = 0, (10) = 0, (11) = 0, (12) = 0, (13) = 1, (14) = 2, (15) = 3, (16) = 0, (17) = 0, (18) = 22, (19) = 30000, (20) = 5, (21) = 0, (22) = 1, (23) = 4, (24) = 0, (25) = 1, (26) = 15, (27) = 1, (28) = 0, (29) = 1, (30) = 3, (31) = 3, (32) = 0, (33) = 1, (34) = 0, (35) = 0, (36) = 0, (37) = 0, (38) = 0, (39) = 0, (40) = 0, (41) = 0, (42) = 0, (43) = 1, (44) = 0, (45) = 0, (46) = 0, (47) = 0, (48) = 0, (49) = 0, (50) = 50, (51) = 1, (52) = 0, (53) = 0, (54) = 0, (55) = 0, (56) = 0, (57) = 0, (58) = 0, (59) = 10000, (60) = 0, (61) = 1000, (62) = 0, (63) = 0, (64) = -1, (65) = 0}), Vector[row](28, {(1) = 1., (2) = 1.00000000000000*(1/1000000), (3) = .881714524951008, (4) = 5.00001000000000*(1/1000000000000000), (5) = 0., (6) = 0.504765875584155e-1, (7) = 0., (8) = 1.00000000000000*(1/1000000), (9) = 0., (10) = 0., (11) = 0., (12) = 0., (13) = 1., (14) = 0., (15) = .499999999999990, (16) = 0., (17) = 1., (18) = 1., (19) = 0., (20) = 0., (21) = 1., (22) = 1., (23) = 0., (24) = 0., (25) = 1.00000000000000*(1/1000000000000000), (26) = 0., (27) = 0., (28) = 0.}), Array(1..6, 0..2, {(1, 1) = .8758457905848228, (1, 2) = .9121381606660928, (2, 0) = .9121381606660928, (2, 1) = 0.8006442103497206e-1, (2, 2) = 1.0962744218225748, (3, 0) = 1.0962744218225748, (3, 1) = .8912691536438646, (3, 2) = 0.9797584316374096e-1, (4, 0) = 0.9797584316374096e-1, (4, 1) = 1.316703053060327, (4, 2) = .8708776141403536, (5, 0) = .8708776141403536, (5, 1) = .11504702599633022, (5, 2) = 1.5371316842980791, (6, 0) = 1.5371316842980791, (6, 1) = .8509526190302513, (6, 2) = .13130657350916902}, datatype = float[8], order = C_order), 0, Array(1..2, 0..2, {(1, 1) = .0, (1, 2) = 1.0, (2, 0) = 1.0, (2, 1) = .0, (2, 2) = 1.0}, datatype = float[8], order = C_order), 0, 0, 0, 0, 0, 0, 0, 0, Matrix(4, 7, {(1, 1) = 0., (1, 2) = .203125000000000, (1, 3) = .304687500000000, (1, 4) = .750000000000000, (1, 5) = .812500000000000, (1, 6) = .406250000000000, (1, 7) = .812500000000000, (2, 1) = 0.637817382812500e-1, (2, 2) = 0., (2, 3) = .279296875000000, (2, 4) = .272378921508789, (2, 5) = -0.968685150146484e-1, (2, 6) = 0.195693969726562e-1, (2, 7) = .538158416748047, (3, 1) = 0.318908691406250e-1, (3, 2) = 0., (3, 3) = -.343750000000000, (3, 4) = -.335235595703125, (3, 5) = .229614257812500, (3, 6) = .417480468750000, (3, 7) = 11.4807128906250, (4, 1) = 0.971052050590515e-1, (4, 2) = 0., (4, 3) = .403503417968750, (4, 4) = 0.202974677085876e-1, (4, 5) = -0.605428218841553e-2, (4, 6) = -0.477004051208496e-1, (4, 7) = .778585672378540}), Matrix(6, 6, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (1, 6) = 1., (2, 1) = .250000000000000, (2, 2) = 0., (2, 3) = 0., (2, 4) = 0., (2, 5) = 0., (2, 6) = 1., (3, 1) = .187500000000000, (3, 2) = .562500000000000, (3, 3) = 0., (3, 4) = 0., (3, 5) = 0., (3, 6) = 2., (4, 1) = .235839843750000, (4, 2) = -.878906250000000, (4, 3) = .890625000000000, (4, 4) = 0., (4, 5) = 0., (4, 6) = .268188476562500, (5, 1) = .127273559570312, (5, 2) = -.500976562500000, (5, 3) = .449218750000000, (5, 4) = -0.128936767578125e-1, (5, 5) = 0., (5, 6) = 0.626220703125000e-1, (6, 1) = -0.927734375000000e-1, (6, 2) = .626220703125000, (6, 3) = -.432617187500000, (6, 4) = .141830444335938, (6, 5) = -0.861053466796875e-1, (6, 6) = .313110351562500}), Vector[row](6, {(1) = 0., (2) = .386000000000000, (3) = .210000000000000, (4) = .630000000000000, (5) = 1., (6) = 1.}), Vector[row](6, {(1) = .250000000000000, (2) = -.104300000000000, (3) = .103500000000000, (4) = -0.362000000000000e-1, (5) = 0., (6) = 0.}), Matrix(6, 5, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (2, 1) = 1.54400000000000, (2, 2) = 0., (2, 3) = 0., (2, 4) = 0., (2, 5) = 0., (3, 1) = .946678528081553, (3, 2) = .255701169898258, (3, 3) = 0., (3, 4) = 0., (3, 5) = 0., (4, 1) = 3.31482518706849, (4, 2) = 2.89612401597212, (4, 3) = .998641913997781, (4, 4) = 0., (4, 5) = 0., (5, 1) = 1.22122450922627, (5, 2) = 6.01913448128775, (5, 3) = 12.5370833293209, (5, 4) = -.687886036105895, (5, 5) = 0., (6, 1) = 1.22122450922627, (6, 2) = 6.01913448128775, (6, 3) = 12.5370833293209, (6, 4) = -.687886036105895, (6, 5) = 1.}), Matrix(6, 5, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (2, 1) = -5.66880000000000, (2, 2) = 0., (2, 3) = 0., (2, 4) = 0., (2, 5) = 0., (3, 1) = -2.43009335683376, (3, 2) = -.206359915708912, (3, 3) = 0., (3, 4) = 0., (3, 5) = 0., (4, 1) = -.107352905814526, (4, 2) = -9.59456225102190, (4, 3) = -20.4702861480962, (4, 4) = 0., (4, 5) = 0., (5, 1) = 7.49644331396861, (5, 2) = -10.2468043146412, (5, 3) = -33.9999035281991, (5, 4) = 11.7089089320616, (5, 5) = 0., (6, 1) = 8.08324679592241, (6, 2) = -7.98113298806279, (6, 3) = -31.5215943287437, (6, 4) = 16.3193054312314, (6, 5) = -6.05881823883405}), Matrix(3, 5, {(1, 1) = 0., (1, 2) = 0., (1, 3) = 0., (1, 4) = 0., (1, 5) = 0., (2, 1) = 10.1262350834469, (2, 2) = -7.48799587760763, (2, 3) = -34.8009186155574, (2, 4) = -7.99277170756873, (2, 5) = 1.02513772329562, (3, 1) = -.676280339280690, (3, 2) = 6.08771465167861, (3, 3) = 16.4308432089246, (3, 4) = 24.7672251141837, (3, 5) = -6.59438912571678}), Vector[row](2, {(1) = .862852234180585, (2) = .121646667791134}), Vector[row](2, {(1) = 0., (2) = 0.}), Vector[row](2, {(1) = -0.873057666308174e-1, (2) = 0.684701246507017e-1}), Vector[row](2, {(1) = 0., (2) = 0.}), Matrix(2, 2, {(1, 1) = 0., (1, 2) = 0., (2, 1) = 0., (2, 2) = 0.}), Matrix(2, 2, {(1, 1) = 0., (1, 2) = 0., (2, 1) = 0., (2, 2) = 0.}), Vector[row](2, {(1) = 0., (2) = 0.}), Matrix(2, 2, {(1, 1) = 0., (1, 2) = 0., (2, 1) = 0., (2, 2) = 0.}), Matrix(2, 6, {(1, 1) = -0.957745068699397e-1, (1, 2) = -0.935578053069745e-1, (1, 3) = -0.925071732935370e-1, (1, 4) = -0.879217450216900e-1, (1, 5) = -0.872861411194737e-1, (1, 6) = -0.914468108408395e-1, (2, 1) = 0.832073739631121e-1, (2, 2) = 0.792621008622733e-1, (2, 3) = 0.774422703214511e-1, (2, 4) = 0.695044462636171e-1, (2, 5) = 0.684014621613813e-1, (2, 6) = 0.755991753809135e-1}), Vector[row](2, {(1) = 0, (2) = 0}), Vector[row](2, {(1) = .912138160666093, (2) = 0.800644210349721e-1}), Vector[row](2, {(1) = .831483491722071, (2) = .146782245215054}), Vector[row](2, {(1) = 1.66780067534233*(1/1000000000), (2) = 6.93290889974829*(1/1000000000)}), Vector[row](2, {(1) = .912138160666093, (2) = 0.800644210349721e-1}), Vector[row](2, {(1) = -0.903900345181292e-1, (2) = 0.737580972195178e-1}), Vector[row](4, {(1) = 0., (2) = 0., (3) = 0., (4) = 0.}), 0, 0, 0, 0, 0, 0, Vector[row](0, {}), Vector[row](2, {(1) = 0, (2) = 0}), 0

Matrix(2, 3, {(1, 1) = 0., (1, 2) = 1.000000000000000, (1, 3) = 0., (2, 1) = 1.000000000000000, (2, 2) = .9003245213933421, (2, 3) = 0.9025791441096123e-1})

dsolve/numeric/SC/IVPrun: Completed external hardware nonstiff integrator

dsolve/numeric/SC/IVPrun: Time for integration= 0.

dsolve/numeric/SC/IVPrun: Time for integration= 0.

 

 

What we need to do is,  write a procedure that will take inputs as the number of odes, n, procedure f, initial conditions for Y vector, initial time, final time, and update the arguments and provide the final Y value from the arguments in evalhf form by direcly calling `dsolve/numeric/SC/hw_int`(_passed[1 .. _npassed]);


 

Download learningdsolvenumeric.mw

Trying to bump up this thread to get the attention of @acer, @tomleslie, and others.
Hopefully, this example convinces everyone that just choosing a different optimization package (global optimization, stochastic, genetic algorithm, etc) may not help in achieving the optimal results if the numerical discretization (in this case nvar for the number time horizons) is not providing sufficient precision. We would need nvar>=256 for this problem and approach.


If anyone has access to old Maple versions, calling the external dsolve/numeric (Without the print statements) and getting an output array as shown in the link below and example towards the end for the NAG subroutine should help.

The link is at 
https://www.maplesoft.com/support/helpJP/Maple/view.aspx?path=NAG/d02cjc

Though Maple does not support NAG toolbox anymore, if anyone has access to Maple 10 or earlier and if you are able to run the example on the NAG help page, please let me know.

Thanks

 

 

@sursumCorda 

This is based on my limited testing of combining Maple's code with its global optimization toolbox.

Currently, you pay a separate license for the same.

Currently, this is offered by Nimbus which provides Genetic Algorithms.
A decade or so earlier, Maple provided the Global Optimization toolbox from Pintur. That had method=reducedgradient which was very robust and fast. 
Hope this helps. 

@tomleslie 

I like DirectSearch and I have nothing against it or genetic algorithms (GA) which are heavily used in the literature.

This discussion and analysis (from @acer and the solution from him) is the best and most efficient way to solve this model. 


The next biggest step can be wrapping dsolve/numeric (with or without compile=true) and making the NLPSolve work with the same in evalhf form for computational efficiency. 

The last 5-10 years have seen an exponential increase in open access/open software codes in Python, Julia, etc. Many times I have wondered why students/researchers used GA for well-defined models (with convexity). Now, I partially understand the reason. Many times wrappers are written for optimizers or different algorithms and then called from user-friendly software. But the wrapper is probably poorly written so that the gradient information is not properly passed with a sufficient number of options (tolerances in this case).

PS, note that only gradient-based methods have theoretical backing for locally optimal results for optimization and I am weary of using methods that don't have a good theoretical basis, though they might work very well (but they will be always slower for problems that can be solved with gradient based methods).

 

@acer 

I think any opimizer or nonlinearsolver should take the required tolerance as input and return the current-best objective, iterate.
@acer, I can confirm that this approach works with nonlinear constraints + dsolve/numeric/parameters + sqp. thanks again

@acer 

There is no point in doing optimization to 1e-16 accuracy when we are trying to increase nvar(number of stages), solving ODEs/DAEs to 1e-6 accuracy.

I like your answer and it works.

Note that for some weird reason, for this problem stiff=true in the call to dsolve/numeric works without tweaking anything in the dsolve command or optimization command.  

Knowing where it fails can help. For example, ideally NLPSolve should have given the answer that "objective was solved to 1e-6 precision, but not beyond that", instead of just returning "initial point satisfies first-order optimality condition".

fsolve has the same issue as well, and if I am not wrong, we can't force it to solve to a prescribed precision.

@acer, as again, thanks lot. I believe that you can fix almost any issues!

Adding abstol=1e-12, reltol=1e-12 to dsolve/numeric calls solves the problem for this model. (Thanks to Allan Wittkopf)

He had also suggested using Digits:=18 inside ss (I am not a big fan of this)

With default tolerances use of, maxstep = 0.05 (assuming that your maximum time horizon is 0.1 with nvar = 10)seems to work as well.

fdiff or the procedures used by NLPSolve should be made more robust.

@dharr 

Your code is not using dsolve numeric in parametric form. I will be posting the use of parametric form working with NLPSolve (thanks to Allan Wittkopf).

As I said, we don't want analytical solutions.

Also, one parameter optimization is different from multiple parameters (scalar vs vector).

To be clear, I am not looking for an answer for the model. My goal was to use NLPSolve with dsolve/numeric/parameters with sqp. 
 

1 2 3 4 5 6 7 Last Page 1 of 18