acer

32385 Reputation

29 Badges

19 years, 335 days
Ontario, Canada

Social Networks and Content at Maplesoft.com

MaplePrimes Activity


These are replies submitted by acer

restart:

wvals:=[1.2,1.6,2,2.5,3,3.5,4]:

S:=[seq({w=wvals[i]},i=1..7)]:

sys := {exp(-.1204819277*(2.4039*t+15.44745000*t^2-11.03552334*t^3+2.595300000*
t^4+.508258/t-44.6834-(2.40397*ln(t)+30.8949*t-16.55325000*t^2+3.460399999*t^3+.2541195000/t^2
-49.812126)*t)/t)*a2*a4-a1*a3, exp(-.1204819277*(-2.071844454/t+.3999293136/t^2+2.897999999*
t^3-0.2404762368e-1/t^3-6.278629824*t^2+1.49670934*t-.7274250000*t^4+4.532401680*ln(1000*t)
+4.532401680*ln(298)+134.6934679-(-4.532276160/t-1.035931727/t^2-.9699000000*
t^3+.2666044800/t^3+4.347000000*t^2-12.55719488*t-0.1794936000e-1/t^4-27.04489066*ln(1000*t)
+27.04489066*ln(298)+28.54167*ln(t)+190.6774129)*t)/t)*a4*((1/2)*a1+(1/2)*a2+(1/2)*a3+(1/2)*
a4+(1/2)*a5)-a1*a2, (1/4)*exp(-.1204819277*(95.3768*t-71.65195000*t^2+24.69369999*t^3
-3.579500000*t^4+1.105339/t+179.76736-(95.37701*ln(t)-143.3039*t+37.04055000*t^2-4.772666666*
t^3+.5525695000/t^2+363.377422)*t)/t)*(a1+a2+a3+a4+a5)^2*a5*a4-a1^3*a2, 2*a1+2*a4+4*a5-1.6-2*
w, a2+a3+a5+a6-1, a2+2*a3+a4-.77-w, -202.86-180.476*w-a1*(33.0661*t-5.681700000*
t^2+3.810933333*t^3-.6932000000*t^4+.158558/t-9.9807)-a2*(25.5675*t+3.048050000*
t^2+1.351533333*t^3-.6678250000*t^4-.1310/t-118.0118)-a3*(24.9973*t+27.59345000*t^2
-11.23045667*t^3+1.987075000*t^4+.1366/t-403.5951)-a4*(30.092*t+3.416250000*t^2+2.264466667*
t^3-.6336000000*t^4-0.821e-1/t-250.8806)-a5*(-.703*t+54.23865000*t^2-14.17383333*
t^3+1.465675000*t^4-.678565/t-76.84066)-a6*(27.04489066*t+.2287298242*t^2-4.532401680*ln(1000*
t)+2.181502454/t-.3999293136/t^2+0.2404762368e-1/t^3-11.80536790-4.532401680*ln(298))}:

Q:=proc(W)
option remember;
local sol;
   sol:=fsolve(eval(sys,w=W),{a1=0..5,a2=0..5,a3=0..5,a4=0..5,
                              a5=0..0.01,a6=0..1,t=0..10});
   if type(sol,specfunc(anything,fsolve)) then return infinity; end if;
   userinfo(1,Q,`w: `,SFloat(W),` a6: `,eval(a6,sol));
   eval(a6,sol);
end proc:

infolevel[Q]:=1:
seq(Q(W), W=wvals);

Q: w:  1.2  a6:  .7517956383
Q: w:  1.6  a6:  .6785304588
Q: w:  2.  a6:  .6048390436
Q: w:  2.5  a6:  .5123765780
Q: w:  3.  a6:  .4196778172
Q: w:  3.5  a6:  .3268293547
Q: w:  4.  a6:  .2338801069

    0.7517956383, 0.6785304588, 0.6048390436, 0.5123765780, 

      0.4196778172, 0.3268293547, 0.2338801069

Digits:=25:
#infolevel[Q]:=0:
bestw:=Optimization:-Minimize(w->Q(w)^2, 4.0..5.5, evaluationlimit=1000);

Q: w:  4.750000000000000000000000  a6:  0.9432920823737770920383573e-1
Q: w:  4.6000000000000000330  a6:  .1222489966063624783865032
Q: w:  4.8999999999999999670  a6:  0.6640540360969581612635774e-1
Q: w:  5.1999999999999999835  a6:  0.1054699500801678343604655e-1
Q: w:  5.207462449804077120503378  a6:  0.9157357390875404820782231e-2
Q: w:  5.217053042742124003482895  a6:  0.7371411210113695352313544e-2
Q: w:  5.226810877117608229829761  a6:  0.5554308483958523215588129e-2
Q: w:  5.235359085728641370834348  a6:  0.3962451322739621103744091e-2
Q: w:  5.242122633590410804178822  a6:  0.2702928607649564905334537e-2
Q: w:  5.247098435625523355564968  a6:  0.1776319841750556792206673e-2
Q: w:  5.250566106592425147856027  a6:  0.1130557763443463589611914e-2
Q: w:  5.252884282731984909057095  a6:  0.6988578051239328936689900e-3
Q: w:  5.254383967095018246723003  a6:  0.4195802431188276069159383e-3
Q: w:  5.255328797107586247545547  a6:  0.2436298468071556057656181e-3
Q: w:  5.255911244193198125923985  a6:  0.1351639309075331254300514e-3
Q: w:  5.256263832572096252520510  a6:  0.6950331478014708798835004e-4
Q: w:  5.256474016927888983822312  a6:  0.3036181732445063492170901e-4
Q: w:  5.256597672194150296093724  a6:  0.7334162097749632411748103e-5
Q: w:  5.256605163889973413359217  a6:  0.5939023807664598330459472e-5
Q: w:  5.256612892461751190231369  a6:  0.4499773372343510584122660e-5
Q: w:  5.256619725229378462722800  a6:  0.3227343728548901476691799e-5
Q: w:  5.256625166154515951144404  a6:  0.2214109437077906051483203e-5
Q: w:  5.256629187652926055280815  a6:  0.1465207365589411599266267e-5
Q: w:  5.256632000173841209782138  a6:  0.9414466848882062288426848e-6
Q: w:  5.256633885551984153117712  a6:  0.5903428278826666356849762e-6
Q: w:  5.256635107928129012022444  a6:  0.3627062758920563138299327e-6
Q: w:  5.256635879426013942357952  a6:  0.2190343633960873090688642e-6
Q: w:  5.256636355725876141421800  a6:  0.1303355950760237162773895e-6
Q: w:  5.256636644416356515002338  a6:  0.7657431552455497703954369e-7
Q: w:  5.256636816692452974310978  a6:  0.4449226220794789254696128e-7
Q: w:  5.256636918138089384772480  a6:  0.2560058551412829082784443e-7
Q: w:  5.256636977190863453382295  a6:  0.1460350416970467037616338e-7
Q: w:  5.256637011222506828774813  a6:  0.8265973775933530203568246e-8
Q: w:  5.256637030662054626898215  a6:  0.4645851066747551300755468e-8
Q: w:  5.256637041679662147121867  a6:  0.2594101118677601899145947e-8
Q: w:  5.256637047880554713273803  a6:  0.1439342158774916548616673e-8
Q: w:  5.256637051348714616551256  a6:  0.7934853438124990315636785e-9
Q: w:  5.256637053277523293635290  a6:  0.4342936503503359042384550e-9
Q: w:  5.256637054344744765058769  a6:  0.2355507204918190327542489e-9
Q: w:  5.256637054932498065326818  a6:  0.1260965761954674033113475e-9
Q: w:  5.256637055254816459292479  a6:  0.6607295066004139848471163e-10
Q: w:  5.256637055306536219279970  a6:  0.5644145719150079662863247e-10
Q: w:  5.256637055346633610854782  a6:  0.4897433504801449213808810e-10
Q: w:  5.256637055375694729140130  a6:  0.4356243887870340262522906e-10
Q: w:  5.256637055395718731932851  a6:  0.3983347625090165864130435e-10
Q: w:  5.256637055408986514455987  a6:  0.3736268828234876813655990e-10
Q: w:  5.256637055417508872121442  a6:  0.3577561532973973494655039e-10
Q: w:  5.256637055422846989761947  a6:  0.3478152631615522985267680e-10
Q: w:  5.256637055426121855543666  a6:  0.3417166562858644645872149e-10
Q: w:  5.256637055426438088566320  a6:  0.3411277524910528662345077e-10

      [                             -21                              ]
      [1.163681435195970249846016 10   , [5.256637055426438088566320]]

fsolve(eval(sys,w=bestw[2][1]),{a1=0..5,a2=0..5,a3=0..5,a4=0..5,
                              a5=0..0.01,a6=0..1,t=0..10});

            /                                 
           { a1 = 1.744892730385765416291333, 
            \                                 

             a2 = 0.2846704517694181108534820, 

             a3 = 0.7152203437523213833081690, 

             a4 = 4.311525916152377211096500, 

             a5 = 0.0001092044441477305892437208, 

                                               -11  
             a6 = 3.411277524910528662345077 10   , 

                                           \ 
             t = 1.091241531285591797803840 }
                                           / 

Digits:=10:
infolevel[Q]:=0:
plot(Q,4..5.5,numpoints=20);

So, without add-ons and using only stock routines from Maple, it doesn't seem very hard to establish that the w which approximately drives a6 to zero is about,

evalf[12](bestw[2][1]);
                         5.25663705543

acer

@icegood That's another good example. (So, there are problems for data all near either DBL_MIN or DBL_EPSILON.)

The plot structure for this example also contains data adequate to produce a smooth curve in the plot, but the plot drivers cannot handle it.

If it is this easy to repair the (already computed) plot data structure "manually" in Maple code, then the plotting routines might be able to accomodate it too.

 

restart:

f:=(y,a,b)->(8*(b+a))*(exp(-y))^2*y^2/(-2*exp(-y*(1+2*b+2*a))+4*exp(-y*(a+2*b+1))+4*exp(-y*(a+2*b+2))-exp(-2*y*(b+a+1))-exp(-(2*(b+a))*y)+4*exp(-y*(a+1))-4*exp(-2*y*(b+1))+4*exp(-y*a)+((2*(b+a))*y+4*b+1)*exp(-2*y)+(-6+(4*(b+a))*y)*exp(-y)+(2*(b+a))*y-4*b-3)-(32*(b+a))*(exp(-y)+1)*y*exp(-y)/(-2*exp(-y*(1+2*b+2*a))+4*exp(-y*(a+2*b+1))+4*exp(-y*(a+2*b+2))-exp(-2*y*(b+a+1))-exp(-(2*(b+a))*y)+4*exp(-y*(a+1))-4*exp(-2*y*(b+1))+4*exp(-y*a)+((2*(b+a))*y+4*b+1)*exp(-2*y)+(-6+(4*(b+a))*y)*exp(-y)+(2*(b+a))*y-4*b-3)+(16*(b+a))*(exp(-y)+1)*y^2*exp(-y)*((2*(1+2*b+2*a))*exp(-y*(1+2*b+2*a))-(4*(a+2*b+1))*exp(-y*(a+2*b+1))-(4*(a+2*b+2))*exp(-y*(a+2*b+2))+(2*(b+a+1))*exp(-2*y*(b+a+1))+(2*(b+a))*exp(-(2*(b+a))*y)-(4*(a+1))*exp(-y*(a+1))+(8*(b+1))*exp(-2*y*(b+1))-4*a*exp(-y*a)+(2*(b+a))*exp(-2*y)-(2*((2*(b+a))*y+4*b+1))*exp(-2*y)+(4*(b+a))*exp(-y)-(-6+(4*(b+a))*y)*exp(-y)+2*a+2*b)/(-2*exp(-y*(1+2*b+2*a))+4*exp(-y*(a+2*b+1))+4*exp(-y*(a+2*b+2))-exp(-2*y*(b+a+1))-exp(-(2*(b+a))*y)+4*exp(-y*(a+1))-4*exp(-2*y*(b+1))+4*exp(-y*a)+((2*(b+a))*y+4*b+1)*exp(-2*y)+(-6+(4*(b+a))*y)*exp(-y)+(2*(b+a))*y-4*b-3)^2+(8*(b+a))*(exp(-y)+1)*y^2*exp(-y)/(-2*exp(-y*(1+2*b+2*a))+4*exp(-y*(a+2*b+1))+4*exp(-y*(a+2*b+2))-exp(-2*y*(b+a+1))-exp(-(2*(b+a))*y)+4*exp(-y*(a+1))-4*exp(-2*y*(b+1))+4*exp(-y*a)+((2*(b+a))*y+4*b+1)*exp(-2*y)+(-6+(4*(b+a))*y)*exp(-y)+(2*(b+a))*y-4*b-3)+(8*(b+a))*(exp(-y)+1)^2/(-2*exp(-y*(1+2*b+2*a))+4*exp(-y*(a+2*b+1))+4*exp(-y*(a+2*b+2))-exp(-2*y*(b+a+1))-exp(-(2*(b+a))*y)+4*exp(-y*(a+1))-4*exp(-2*y*(b+1))+4*exp(-y*a)+((2*(b+a))*y+4*b+1)*exp(-2*y)+(-6+(4*(b+a))*y)*exp(-y)+(2*(b+a))*y-4*b-3)-(16*(b+a))*(exp(-y)+1)^2*y*((2*(1+2*b+2*a))*exp(-y*(1+2*b+2*a))-(4*(a+2*b+1))*exp(-y*(a+2*b+1))-(4*(a+2*b+2))*exp(-y*(a+2*b+2))+(2*(b+a+1))*exp(-2*y*(b+a+1))+(2*(b+a))*exp(-(2*(b+a))*y)-(4*(a+1))*exp(-y*(a+1))+(8*(b+1))*exp(-2*y*(b+1))-4*a*exp(-y*a)+(2*(b+a))*exp(-2*y)-(2*((2*(b+a))*y+4*b+1))*exp(-2*y)+(4*(b+a))*exp(-y)-(-6+(4*(b+a))*y)*exp(-y)+2*a+2*b)/(-2*exp(-y*(1+2*b+2*a))+4*exp(-y*(a+2*b+1))+4*exp(-y*(a+2*b+2))-exp(-2*y*(b+a+1))-exp(-(2*(b+a))*y)+4*exp(-y*(a+1))-4*exp(-2*y*(b+1))+4*exp(-y*a)+((2*(b+a))*y+4*b+1)*exp(-2*y)+(-6+(4*(b+a))*y)*exp(-y)+(2*(b+a))*y-4*b-3)^2+(8*(b+a))*(exp(-y)+1)^2*y^2*((2*(1+2*b+2*a))*exp(-y*(1+2*b+2*a))-(4*(a+2*b+1))*exp(-y*(a+2*b+1))-(4*(a+2*b+2))*exp(-y*(a+2*b+2))+(2*(b+a+1))*exp(-2*y*(b+a+1))+(2*(b+a))*exp(-(2*(b+a))*y)-(4*(a+1))*exp(-y*(a+1))+(8*(b+1))*exp(-2*y*(b+1))-4*a*exp(-y*a)+(2*(b+a))*exp(-2*y)-(2*((2*(b+a))*y+4*b+1))*exp(-2*y)+(4*(b+a))*exp(-y)-(-6+(4*(b+a))*y)*exp(-y)+2*a+2*b)^2/(-2*exp(-y*(1+2*b+2*a))+4*exp(-y*(a+2*b+1))+4*exp(-y*(a+2*b+2))-exp(-2*y*(b+a+1))-exp(-(2*(b+a))*y)+4*exp(-y*(a+1))-4*exp(-2*y*(b+1))+4*exp(-y*a)+((2*(b+a))*y+4*b+1)*exp(-2*y)+(-6+(4*(b+a))*y)*exp(-y)+(2*(b+a))*y-4*b-3)^3-(4*(b+a))*(exp(-y)+1)^2*y^2*(-2*(1+2*b+2*a)^2*exp(-y*(1+2*b+2*a))+4*(a+2*b+1)^2*exp(-y*(a+2*b+1))+4*(a+2*b+2)^2*exp(-y*(a+2*b+2))-4*(b+a+1)^2*exp(-2*y*(b+a+1))-4*(b+a)^2*exp(-(2*(b+a))*y)+4*(a+1)^2*exp(-y*(a+1))-16*(b+1)^2*exp(-2*y*(b+1))+4*a^2*exp(-y*a)-(8*(b+a))*exp(-2*y)+(4*((2*(b+a))*y+4*b+1))*exp(-2*y)-(8*(b+a))*exp(-y)+(-6+(4*(b+a))*y)*exp(-y))/(-2*exp(-y*(1+2*b+2*a))+4*exp(-y*(a+2*b+1))+4*exp(-y*(a+2*b+2))-exp(-2*y*(b+a+1))-exp(-(2*(b+a))*y)+4*exp(-y*(a+1))-4*exp(-2*y*(b+1))+4*exp(-y*a)+((2*(b+a))*y+4*b+1)*exp(-2*y)+(-6+(4*(b+a))*y)*exp(-y)+(2*(b+a))*y-4*b-3)^2:

Digits:=400:
P:=plot(f(y,0.173734641949551155, 0.02), y=5.4938108636702..5.49381086367034):
P;

data:=evalf[30](op([1,1],indets(P,specfunc(anything,CURVES)))):
const:={evalf[14](data[..,2][])}[1];
newdata:=[seq(evalf[30]([t[1],(t[2]-const)*10^14]),t=data)]:
miny,maxy:=min(newdata[..,2]),max(newdata[..,2]);

0.65555435660341e-12

0.687994651740957e-13, 0.1725660332336273e-12

plots:-display(subsindets(P,specfunc(anything,CURVES),
                          z->CURVES(newdata,op(2..nops(z),z))),
               ytickmarks=[seq(evalf[17](miny+(i-1)*(maxy-miny)/5
                               =nprintf("%a",
                                        const+(miny+(i-1)
                                               *(maxy-miny)/5)
                                               *10^(-14))),
                               i=1..6)]);

 

 

icegood_plot01.mw

@gkokovidis Thanks. I'm somewhat sure that Classic was available for Maple 14 on Solaris, but had forgotten if it had been dropped for Maple 15.

@gkokovidis Thanks. I'm somewhat sure that Classic was available for Maple 14 on Solaris, but had forgotten if it had been dropped for Maple 15.

Is the Classic GUI available with Maple on Solaris?

acer

Is the Classic GUI available with Maple on Solaris?

acer

@alex_01 

You write that you worry about the performance of your LS approach, by which you likely mean its robustness and correctness since your concern covers the two cases where its error norm is much higher. But you have not acknowledged that using the explictly formed Matrix inverse of (A^%T.A) is not the best numerical way to compute (A^%T.A)^(-1).A^%T which was illustrated by numeric example above. That exponent-(-1) is just a convenient way to denote the Matrix inverse for publication. That does not mean that it explicit inverse calculation is always numerically appropriate. But you can use LinearSolve and still validly claim that what you're doing is computing that same formula. And so you ought to, as it might clear up one of the two cases of your concern over your LS approach.

You write that you are concerned about the performance of the SVD method as used by the LeastSquares command for the over-determined case (m>n), and you give an example where it (and the other) approaches all produce a noticable error norm. But what value did you expect? It won't typically be anywhere close to zero, for the kinds of random entries you're using in the Matrix, because the m>n (df>0) system will very likely be overdetermined. That term means that there cannot be a solution which solves the system exactly -- an overdetermined system is inconsistent, and its least squares solution will have a non-zero error norm.

You may have to show the existence of significantly a better solution, before the method=SVD solution is cast in serious doubt.

You asked about eigenvalues. If you mean you want the eigenvalues of (A^%T.A) then those are the singular values of Matrix A. Note that one does not necessarily use the time and memory to explicit form (A^%T.A) as a Maple Matrix in order to compute the singular values of floating-point Matrix A. The computational work is done in compiled C functions, and clever workspace use/re-use may also mean that some forms are not produced in any stand-alone/returnable fashion.

In your first comment you claim that your Maple 13 does not have an SVD method implemented within LeastSquare. But that is untrue.

> infolevel[LinearAlgebra]:=1:

> LinearAlgebra[LeastSquares](X, Y, method=SVD):

SingularValues: calling external function
SingularValues: NAG hw_f08kef
SingularValues: NAG hw_f08kff
SingularValues: NAG hw_f08kff
SingularValues: NAG hw_f08mef
LeastSquares: calling external function
LeastSquares: NAG hw_f06paf
LeastSquares: NAG hw_f06pbf
LeastSquares: NAG hw_f06paf

> LinearAlgebra[LeastSquares](X, Y):

LeastSquares: calling external function
LeastSquares: NAG hw_f08aef
LeastSquares: NAG hw_f08agf
LeastSquares: NAG hw_f07tef

> kernelopts(version);
          Maple 13.02, X86 64 WINDOWS, Oct 12 2009, Build ID 436951

What was more recently added was the ability of LeastSquares to make use of the so-called "thin SVD" which makes it much more efficient in the greatly overdetermined case m>>n. (See here for a stand-alone implementation of that, outside of LinearAlgebra.)

If you want to see a more user-level implementation of the SVD approach then you could do worse than look over several questions posed by member herclau in the second half of 2010. eg. 1, 2, 3.

Note that in some cases the QR method may be faster, while the SVD method might be more robust. This is a commom dichotomy: better speed vs. better assurance of correctness.

You might think that everyone else should be able to figure out, without explanation beforehand, your intended meanings that df = num.rows - num.cols, or that "ey" means "Estimated Y" and is computed via A.x, and that data1 is the solution in the code while `x` is the solution elsewhere, and X is the Matrix in the code while A is the Matrix elsewhere, and that sometimes Y means lowercase y while X does not mean lowercase x. But it does make it harder to read. I think that it isn't as polite as can be, for your audience.

acer

@Markiyan Hirnyk You misunderstand my points, I believe. Maple has not had session dependent set ordering based on memory addresses in several major releases, so it is not relevent that you get the same order upon multiple reexecution here.

What I was trying to say is that the order will depend upon the lexicographic order (although that too can vary with option `setsort` as Maple is launched). So, choose some other name for the parameter, ie. Z instead of b, and its position in the set or list will be different. So choosing B[2] is ad hoc and will not reliably produce the same kind of result for other examples.

And also, one can choose a formal parameter x and along with additional (scoped) b, or a formal parameter b along with additional (scoped) x. And both of this two could produce the same list B, in which case the code that you showed, alone, will be insufficient to determine which is the additional parameter. That is why, when manipulating the inert form, I deliberately excluded the formal parameters (of an operator present) before calling FromInert.

This is just another instance of the fact that lexicographic set ordering is, essentially, about as indeterminate as was the old address-based set ordering. The latter is indeterminate due to what may or may not have occurred previosuly in the session, while the former is indeterminate because the user has free reign to choose his or her variable names at will.

Post-conversion to a list does not pertain to or alter the above.

@Markiyan Hirnyk You misunderstand my points, I believe. Maple has not had session dependent set ordering based on memory addresses in several major releases, so it is not relevent that you get the same order upon multiple reexecution here.

What I was trying to say is that the order will depend upon the lexicographic order (although that too can vary with option `setsort` as Maple is launched). So, choose some other name for the parameter, ie. Z instead of b, and its position in the set or list will be different. So choosing B[2] is ad hoc and will not reliably produce the same kind of result for other examples.

And also, one can choose a formal parameter x and along with additional (scoped) b, or a formal parameter b along with additional (scoped) x. And both of this two could produce the same list B, in which case the code that you showed, alone, will be insufficient to determine which is the additional parameter. That is why, when manipulating the inert form, I deliberately excluded the formal parameters (of an operator present) before calling FromInert.

This is just another instance of the fact that lexicographic set ordering is, essentially, about as indeterminate as was the old address-based set ordering. The latter is indeterminate due to what may or may not have occurred previosuly in the session, while the former is indeterminate because the user has free reign to choose his or her variable names at will.

Post-conversion to a list does not pertain to or alter the above.

@Markiyan Hirnyk Let's also recall that the positioning in the indets result may depend upon set ordering.

So determining the placement is problematic.

> restart:
> h := Statistics:-Distribution(PDF = (x-> Dirac(x-b))):
> B := indets(ToInert(eval(h:-PDF)), atomic);           
               B := {1, "Dirac", "arrow", "b", "x", "operator"}

> h := Statistics:-Distribution(PDF = (x-> Dirac(x-A))):
> B := indets(ToInert(eval(h:-PDF)), atomic);           
               B := {1, "A", "Dirac", "arrow", "x", "operator"}

> h := Statistics:-Distribution(PDF = (x-> Dirac(x-Z))):
> B := indets(ToInert(eval(h:-PDF)), atomic);           
               B := {1, "Dirac", "Z", "arrow", "x", "operator"}

Since this may have been intended for user-defined distribution, the formal parameter name(s) or the PDF operator may also be ad hoc,

> h := Statistics:-Distribution(PDF = (b-> Dirac(b-x))):
> B := indets(ToInert(eval(h:-PDF)), atomic);           
               B := {1, "Dirac", "arrow", "b", "x", "operator"}

In this last example, we would need to determine the parameter name as well, so as to not misidentify.

This all relates to why I originally sieved out the formal parameter names and some other atomics, when processing the inert form.

But I suppose that all this is moot now, as member icegood may choose another interface for his code's users.

@Markiyan Hirnyk Let's also recall that the positioning in the indets result may depend upon set ordering.

So determining the placement is problematic.

> restart:
> h := Statistics:-Distribution(PDF = (x-> Dirac(x-b))):
> B := indets(ToInert(eval(h:-PDF)), atomic);           
               B := {1, "Dirac", "arrow", "b", "x", "operator"}

> h := Statistics:-Distribution(PDF = (x-> Dirac(x-A))):
> B := indets(ToInert(eval(h:-PDF)), atomic);           
               B := {1, "A", "Dirac", "arrow", "x", "operator"}

> h := Statistics:-Distribution(PDF = (x-> Dirac(x-Z))):
> B := indets(ToInert(eval(h:-PDF)), atomic);           
               B := {1, "Dirac", "Z", "arrow", "x", "operator"}

Since this may have been intended for user-defined distribution, the formal parameter name(s) or the PDF operator may also be ad hoc,

> h := Statistics:-Distribution(PDF = (b-> Dirac(b-x))):
> B := indets(ToInert(eval(h:-PDF)), atomic);           
               B := {1, "Dirac", "arrow", "b", "x", "operator"}

In this last example, we would need to determine the parameter name as well, so as to not misidentify.

This all relates to why I originally sieved out the formal parameter names and some other atomics, when processing the inert form.

But I suppose that all this is moot now, as member icegood may choose another interface for his code's users.

Thanks for uploadind your worksheet. Please note that we cannot yet run it, as it relies on some procedures like procBombcurve and (presumably assigned names of expressions like) PreBombcurve and PostBombcurve. Is it possible for you to upload everything required to run the worksheet? (Uploads and links to multiple files could be included, if you at liberty to do so.)

Also, I notice that the lines of the worksheet must be executed out of order, even if the missing bits were available. For example it executes a test of procedures `p` and `procRsum` before it defines/assigns procedures `procPatrientres` and `procC14incell`. That is not critical, but may confuse someone trying to run it.

It's slightly unweildy for procedures such as procPatientres to assign to globals like `residual`, when it might be more straightforward to just have it return the computed value for that. (And, in turn, any other procedure which called that could assign the result to its own local `residual`. Doing it in that way can reduce mix-ups, and keep the global name space clean and avoid need for unassignments at the top-level to avoid inadvertant mistakes.

acer

I really like purely exact approaches to such problems: no hidden float log, no `floor` or `ceil`, no outright or hidden `evalf` calls at all. The above method seems to be like that -- just exact multiplication and `op`. A reduced risk of the task's being liable to mistaken exclusion/inclusion due to roundoff or other numerical difficulty.

And it's probably quite fast.

And trailing-zero removal may be applied afterwards.

I'm supposing that `union` itself is not invoking evalf, and is relying only on evalb here.

Of course, for such tasks the speed can depend heavily on details. Is it for cases where a) there are many duplicates of just a few "unique" values, b) few duplicates of many "unique" values, or c) some mixed scenario.

acer

I really like purely exact approaches to such problems: no hidden float log, no `floor` or `ceil`, no outright or hidden `evalf` calls at all. The above method seems to be like that -- just exact multiplication and `op`. A reduced risk of the task's being liable to mistaken exclusion/inclusion due to roundoff or other numerical difficulty.

And it's probably quite fast.

And trailing-zero removal may be applied afterwards.

I'm supposing that `union` itself is not invoking evalf, and is relying only on evalb here.

Of course, for such tasks the speed can depend heavily on details. Is it for cases where a) there are many duplicates of just a few "unique" values, b) few duplicates of many "unique" values, or c) some mixed scenario.

acer

@alex_01 I could be mistaken in thinking that your problem is necessarily nonlinear. And you are right: solving it could likely be faster if it could be reformulated as LP. (There are constraints to accomodate, albeit simple bounds.) I'll try and give it some proper thought.

Ideally, the help-pages and Example worksheets are supposed to explain how to get Matrix form, with examples that explain it all. I agree, it can be messy. But as you say, seeing patterns is still not enough: one has to fully understand the structures in order to be able to get it for any new problem.  If I can find time, I'll convert this example, forcing use of NLPSolve.

Cheers. Sorry if I've been overbearing in this thread.

 

First 417 418 419 420 421 422 423 Last Page 419 of 592