mmcdara

847 Reputation

12 Badges

3 years, 183 days

MaplePrimes Activity


These are answers submitted by mmcdara

@Harry Garst 

Sorry for this late answer but I was busy  with some other stuff and I just forgot this thread.
Concerning the EM algorithm to identify the (known number of) components of a mixture of non necessarily gaussian distributions, I wrote something for personal use times ago.
This is just an implementation of the EM algorithm (classical one without jumps to account for situations with unknown number of components).
If you're interested I can deliver it after some modifications (and comments). But I'm not sure it will fit your educational purposes requirements.

The R code for tetra/poly choric correlations is rather long and complex to translate into Maple (package psych, function tetrachoric). I will ry to do something simpler, at least fro tetrachoric correlations.
I'll keep you in touch in the few days to come.

For now on here is a "raw" version of the EM algorithm I use

 

restart:

with(Statistics):

local D;
Digits := 20:
K  := 3:

 


STEP 1: Simulate a TrueMixture of 3 random variables

NbOfComponents := 3:

TrueMixture := table([
  # 1 = table([law = ExponentialDistribution, weight = 0.5, param = [4]       ]),
  # 1 = table([law = NonCentralChiSquare    , weight = 0.4, param = [3, 2]    ]),
  # 1 = table([law = MoyalDistribution      , weight = 0.4, param = [-1, 1]   ]),
    1 = table([law = NormalDistribution     , weight = 0.4, param = [0, 1]    ]),
    2 = table([law = NormalDistribution     , weight = 0.4, param = [3, 1]    ]),
    3 = table([law = NormalDistribution     , weight = 0.2, param = [10, 3]   ])
]);


for k from 1 to K do
   G__||k := RandomVariable(TrueMixture[k][law](op(TrueMixture[k][param]))):
end do:


# The sampling can be done in wide variety of ways
# The simpler, non purely stochastic method is draw a sample
# for each G__k proportional to its weight:
N    := 10^4:

P    := [seq(TrueMixture[k][weight], k=1..NbOfComponents)];

NS   := floor~(N *~ P);
S__1 := Sample(G__1, NS[1]):
S__2 := Sample(G__2, NS[2]):
S__3 := Sample(G__3, NS[3]):
S    := < seq((S__||k)^+, k=1..K) >;

Histogram(S):

for k from 1 to K do
   o__||k := TrueMixture[k][weight]:
   m__||k := op(1, TrueMixture[k][param]):
   s__||k := op(2, TrueMixture[k][param]):
end do:



 

TrueMixture := table([1 = table([law = NormalDistribution, weight = .4, param = [0, 1]]), 2 = table([law = NormalDistribution, weight = .4, param = [3, 1]]), 3 = table([law = NormalDistribution, weight = .2, param = [10, 3]])])

 

P := [.4, .4, .2]

 

NS := [4000, 4000, 2000]

 

Matrix(%id = 18446744078239458838)

(1)


STEP 2: Estimate the parameters of the mixture

mroll := rand(-5.0 .. 15.0);
sroll := rand(0 .. 10.0)

proc () options operator, arrow; RandomTools:-Generate(float('range' = -5.0 .. 15.0, 'method' = 'uniform')) end proc

 

proc () options operator, arrow; RandomTools:-Generate(float('range' = 0 .. 10.0, 'method' = 'uniform')) end proc

(2)

# Initialize Mixture with the correct distributions

MaxMix := 3:

Mixture := table([
  seq(k = table([law = TrueMixture[k][law], weight = 1/MaxMix, param = [mroll(), sroll()] ]), k=1..MaxMix)
]):



# EM algorithm

choix := combinat:-randperm([$(1..N)])[1..1000]:

verbose := false:


#-------------------------------------------------------------
# Initialisation
#
# (could be done differently)
for k from 1 to MaxMix do
   o__||k := Mixture[k][weight]:
   m__||k := Mixture[k][param][1]:
   s__||k := Mixture[k][param][2]:
end do:

if verbose then print("m = ", seq(m__||k, k=1..MaxMix)) end if;
if verbose then print("s = ", seq(s__||k, k=1..MaxMix)) end if;

 

#-------------------------------------------------------------
# algo.


printf(cat(seq("-------------------------", k=1..MaxMix), "---\n"));
printf(cat(seq(cat("   |     Component ", k, "     "), k=1..MaxMix), "  |\n"));
printf(cat(seq("-------------------------", k=1..MaxMix), "---\n"));
printf(cat(seq("   | weight  par 1  par2 ", k=1..MaxMix), "  |\n"));
printf(cat(seq("-------------------------", k=1..MaxMix), "---\n"));

R := 20:
for r from 1 to R do
   for k from 1 to MaxMix do
      pdf    := t -> Mixture[k][weight] * PDF(Mixture[k][law](op(Mixture[k][param])), t);
      L__||k := pdf~(S[choix]);
   end do:

   LL := `+`~(seq(L__||k, k=1..MaxMix));
   if verbose then print(seq(L__||k, k=1..MaxMix), LL) end if;
   
   for k from 1 to MaxMix do
      post__||k := L__||k /~ LL;
   end do:
   if verbose then print(seq(post__||k, k=1..MaxMix)) end if;

   for k from 1 to MaxMix-1 do
      o__||k := Mean(post__||k)[1]
   end do:
   o__||MaxMix := 1-add(o__||k, k=1..MaxMix-1):
   if verbose then print("o = ", seq(o__||k, k=1..MaxMix)) end if;

   for k from 1 to MaxMix do
      m__||k := add(post__||k *~ S[choix]) /~ add(post__||k);
   end do:
   if verbose then print("m = ", seq(m__||k, k=1..MaxMix)) end if;

   for k from 1 to MaxMix do
      s__||k := sqrt(add(post__||k *~ (S[choix] -~ m__||k)^~2) /~ add(post__||k));
   end do:
   if verbose then print("s = ", seq(s__||k, k=1..MaxMix)) end if;
   
   for k from 1 to MaxMix do
      if numelems(Mixture[k][param])=1 then
         Mixture[k][param] := m__||k;
      else
         Mixture[k][param] := [m__||k, s__||k];
      end if
   end do:

   if verbose then print();print() end if;
 
   printf(
           cat("%2d ", seq("  %-23a", k=1..MaxMix), "\n"),
           r,
           op(evalf[4]([seq([o__||k, op(Mixture[k][param])], k=1..MaxMix)]))
   )
end do:

------------------------------------------------------------------------------

   |     Component 1        |     Component 2        |     Component 3       |
------------------------------------------------------------------------------
   | weight  par 1  par2    | weight  par 1  par2    | weight  par 1  par2   |
------------------------------------------------------------------------------

 1   [.2836, 4.398, 4.942]    [.2630, 3.411, .8269]    [.4534, 1.707, 3.358]  

 2   [.3541, 4.668, 4.999]    [.2497, 3.125, .8237]    [.3962, 1.225, 2.419]  

 3   [.3373, 5.380, 4.960]    [.2579, 2.997, .8450]    [.4048, .8177, 1.760]  

 4   [.3169, 5.947, 4.793]    [.2726, 2.974, .8691]    [.4104, .5431, 1.444]  

 5   [.3028, 6.301, 4.652]    [.2919, 2.981, .8756]    [.4053, .3468, 1.261]  

 6   [.2936, 6.525, 4.552]    [.3078, 2.979, .8683]    [.3986, .2152, 1.141]  

 7   [.2885, 6.660, 4.482]    [.3192, 2.965, .8593]    [.3923, .1296, 1.064]  

 8   [.2862, 6.734, 4.436]    [.3269, 2.945, .8551]    [.3869, .7433e-1, 1.017]

 9   [.2853, 6.773, 4.406]    [.3322, 2.925, .8557]    [.3825, .3820e-1, .9876]

10   [.2850, 6.793, 4.387]    [.3360, 2.907, .8593]    [.3789, .1386e-1, .9693]

11   [.2851, 6.804, 4.375]    [.3389, 2.893, .8642]    [.3761, -.3123e-2, .9573]

12   [.2851, 6.810, 4.366]    [.3410, 2.881, .8692]    [.3738, -.1536e-1, .9492]

13   [.2852, 6.814, 4.360]    [.3427, 2.872, .8740]    [.3721, -.2442e-1, .9435]

14   [.2853, 6.817, 4.356]    [.3440, 2.864, .8781]    [.3707, -.3126e-1, .9394]

15   [.2853, 6.819, 4.353]    [.3451, 2.858, .8816]    [.3696, -.3649e-1, .9363]

16   [.2854, 6.821, 4.350]    [.3459, 2.854, .8846]    [.3687, -.4055e-1, .9340]

17   [.2854, 6.822, 4.348]    [.3466, 2.850, .8870]    [.3680, -.4371e-1, .9323]

18   [.2854, 6.823, 4.347]    [.3471, 2.847, .8889]    [.3675, -.4618e-1, .9309]

19   [.2854, 6.824, 4.345]    [.3476, 2.845, .8905]    [.3670, -.4813e-1, .9299]

20   [.2854, 6.825, 4.344]    [.3479, 2.843, .8918]    [.3667, -.4967e-1, .9290]

 

FormalMixture := evalf[4](add(o__||k * Mixture[k][law](op(evalf[4](Mixture[k][param]))), k=1..MaxMix));

.2854*NormalDistribution(6.825, 4.344)+.3479*NormalDistribution(2.843, .8918)+.3667*NormalDistribution(-0.4967e-1, .9290)

(3)

mix := add(Mixture[k][weight] * PDF(Mixture[k][law](op(Mixture[k][param])), t), k=1..MaxMix):
plots:-display(
   Histogram(S[choix]),
   plot(mix, t=min(S[choix])..max(S[choix]), color=red, thickness=3),
   seq(plot(Mixture[k][weight] * PDF(Mixture[k][law](op(Mixture[k][param])), t), t=min(S[choix])..max(S[choix]), color=gold), k=1..MaxMix)
)

 

 


 

Download EM_algorithm.mw

The error message indicates you cannot write 1/F__i = 0 .. 10  as a range.
Write instead F__i=0.1..SomeBigNumber.

Doing this you will get the clear error message
Error, (in Plot:-Inequality) additional variables not matching range variables found: {A__c, Z__b, Z__t, k__b, k__t, `(e__0)__mp`, M__max__B, M__min__B, sigma__ci, sigma__cs, sigma__ti, sigma__ts}
saying there exist non instanciated quantities in the inequalities to plot: just give them values

As suggested in the code below, use Explore to help you giving values tio the remaining indets
 

restart:

with(plots):

eqns :=
  {
    F__i*(1-e__0/k__b)/A__c+M__min__B/Z__t >= sigma__ti,
    .8*F__i*(1-e__0/k__t)/A__c-M__max__B/Z__b >= sigma__ts,
    .8*F__i*(1-e__0/k__b)/A__c+M__max__B/Z__t <= sigma__cs,
    F__i*(1-e__0/k__t)/A__c-M__min__B/Z__b <= sigma__ci, abs(e__0) <= `(e__0)__mp`
  };

inequal(eqns, F__i = 0.1..100, e__0 = -5 .. 5, color = "Niagara 2")

{sigma__ti <= F__i*(1-e__0/k__b)/A__c+M__min__B/Z__t, sigma__ts <= .8*F__i*(1-e__0/k__t)/A__c-M__max__B/Z__b, .8*F__i*(1-e__0/k__b)/A__c+M__max__B/Z__t <= sigma__cs, F__i*(1-e__0/k__t)/A__c-M__min__B/Z__b <= sigma__ci, abs(e__0) <= `(e__0)__mp`}

 

Error, (in Plot:-Inequality) additional variables not matching range variables found: {A__c, Z__b, Z__t, k__b, k__t, `(e__0)__mp`, M__max__B, M__min__B, sigma__ci, sigma__cs, sigma__ti, sigma__ts}

 

indets(eqns, name) minus {F__i, e__0}

{A__c, Z__b, Z__t, k__b, k__t, `(e__0)__mp`, M__max__B, M__min__B, sigma__ci, sigma__cs, sigma__ti, sigma__ts}

(1)

Explore(inequal(eqns, F__i = 0.1..100, e__0 = -5 .. 5, color = "Niagara 2"))

 


 

Download inequal.mw

 

By change of coordinates

Change_coordinates.mw
 

restart;

g := (x, y) -> min((3+((x-y)^(2))/(10)+(abs(x+y))/(sqrt(2))),(-abs(x-y)+(7)/(sqrt(2))));

proc (x, y) options operator, arrow; min(3+(1/10)*(x-y)^2+abs(x+y)/sqrt(2), -abs(x-y)+7/sqrt(2)) end proc

(1)

# the terms (x-y) and (x+y) suggest changing frame [0, x, y]into frame [O, u, v] by a rotation of angle Pi/2

xy2uv := [u=(x+y)/sqrt(2), v=(x-y)/sqrt(2)];
uv2xy := op(solve(xy2uv, [x, y]));

[u = (1/2)*(x+y)*2^(1/2), v = (1/2)*(x-y)*2^(1/2)]

 

[x = (1/2)*2^(1/2)*(u+v), y = (1/2)*2^(1/2)*(u-v)]

(2)

G := unapply(simplify(subs(uv2xy, g(x, y))), [u, v]);

proc (u, v) options operator, arrow; min(-(1/2)*2^(1/2)*(2*abs(v)-7), 3+(1/5)*v^2+abs(u)) end proc

(3)

q := 0;
H := (u, v) -> Heaviside(G(u, v)-q);  # means H(u, v) is either 0 or 1
 

0

 

proc (u, v) options operator, arrow; Heaviside(G(u, v)-q) end proc

(4)

L := 10:
plots:-contourplot(H(u, v), u=-L..L, v=-L..L, contours=[0.999], filled=true, grid=[100, 100]);  # blue = 1, red = 0

 

# let

f := x -> exp(-x^2/2)/sqrt(2*Pi);

# then the integrand is

phi := (x, y) -> 'h(x, y)' * f(x) * f(y):

# apply the rotation

Phi := (u, v) -> simplify('H(u, v)' * subs(uv2xy, f(x)*f(y))):

Phi(u, v)

proc (x) options operator, arrow; exp(-(1/2)*x^2)/sqrt(2*Pi) end proc

 

(1/2)*H(u, v)*exp(-(1/2)*u^2-(1/2)*v^2)/Pi

(5)

# then p := int(int(h(x, y)*exp((-x^2-y^2)*(1/2))/(2*Pi), y = -infinity .. infinity), x = -infinity .. infinity);
# just becomes  p := int(int(Phi(u, v), u = -infinity .. infinity), v = -a..a);
# where a is the half width of the blue strip.
#
# More of this, H(u, v)=1 inside this plur strip, then

p := int(int(f(u)*f(v), u = -infinity .. infinity), v = -a..a);  # here "a" is still unknown

erf((1/2)*2^(1/2)*a)

(6)

H := unapply(subs(uv2xy, h(x, y)), [u, v])

proc (u, v) options operator, arrow; h((1/2)*2^(1/2)*(u+v), (1/2)*2^(1/2)*(u-v)) end proc

(7)

# Find the position of the horizontal lines which bound the blue strip

solutions := [solve(G(u, v)=0, v)];

strip := sort(select(is, solutions, real));

eval(p, a=strip[2]);

evalf(%);

[7/2, -7/2, I*(5*abs(u)+15)^(1/2), -I*(5*abs(u)+15)^(1/2)]

 

[-7/2, 7/2]

 

erf((7/4)*2^(1/2))

 

.9995347418

(8)

 


 

Download Change_coordinates.mw
 

I'm going to bed, here are some elements to answer points 1 to 4.
5 is obviously ExcelTools:-Export(....)

I have no idea of your knowledge about all this stuff, so pardon me if reading this worksheet makes you feel  I'm condescending to you.

restart:

interface(version)

`Standard Worksheet Interface, Maple 2015.2, Mac OS X, December 21 2015 Build ID 1097895`

(1)

with(Statistics):

Simple model set the framework of Ordinary Least Squares (OLS)
X1 and X2 are deterministic variables
B1 and B2 are parameters of unknown values

E is a random Error (with dome adhoc properties [homoskedasticity]  I assume verified here [OLS framework again]
Y = B1*X1 +B2*X2 + E

Given a collection if triples (x1[n], x2[n], y[n]), find approximations b1 and b2 of B1 and B2


Step 1: simulation

N := 100:
SampleOfX__1 := Sample(Uniform(0, 10), N): # just to "create" x1 points in an arbitrary way
SampleOfX__2 := Sample(Uniform(0, 10), N): # just to "create" x1 points in an arbitrary way

Error := Sample(Normal(0, 10), N); # usual choice (= unbiased observation error + homoskedasticity)


B1 := 3 ; B2 := -4:   # remember these values are UNKNOWN in practical situations

ExactResponse := B1 *~ SampleOfX__1 +~ B2 *~ SampleOfX__2:

ObservedResponse := ExactResponse +~ Error:
 

Error := Vector(4, {(1) = ` 1 .. 100 `*Vector[row], (2) = `Data Type: `*float[8], (3) = `Storage: `*rectangular, (4) = `Order: `*Fortran_order})

 

3

(2)

# Rotate the plot to verify the points do not lie on a single plane (because of the additive error)

ScatterPlot3D(< SampleOfX__1, SampleOfX__2, ObservedResponse>^+, symbol=solidcircle, symbolsize=20);

 

Estimation of B1 and B2

OLS_result := LinearFit(
                         [x__1, x__2],
                         < SampleOfX__1, SampleOfX__2>^+,
                         ObservedResponse^+,
                         [x__1, x__2],
                         output=[parametervalues, leastsquaresfunction, residualstandarddeviation]
                       )

OLS_result := [Vector(2, {(1) = 3.24372606226451, (2) = -3.69397058902010}), 3.24372606226451*`#msub(mi("x"),mi("1"))`-3.69397058902010*`#msub(mi("x"),mi("2"))`, 10.2959448666143]

(3)

Interpretation:
The best linear unbiased estimation (BLUE) of B1 equals 3.24372...
The best linear unbiased estimation (BLUE) of B2 equal -3.39397...
The residual standard deviation has value 10.29...  
This values characterizes the "spreading" of the points around the optimal OLS plane base on the BLUE estimators above


 

plots:-display(
   ScatterPlot3D(< SampleOfX__1, SampleOfX__2, ObservedResponse>^+, symbol=solidcircle, symbolsize=20),
   plot3d(OLS_result[2], x__1=0..10, x__2=0..10, color=blue, transparency=0)  # adjust transparency and/or rotate
);

 

The main problem with the residualstandarddeviation is that it only gives an information about the quality of representation of the "BLUE" plane
Then this error is named "REPRESENTATION Error" or also "LEARNING Error"

What we are often interested in is to assess the representability of the model OUTSIDE of the points it was constructed on.
TRhat is to assess the GENERALIZATION error
The classical strategy to realize this is to use a resampling procedure.
For instance :
    1/ Leave One Out (LOO) where all the points but one are used to build the optimal model while the discarded one is used to assess the generalization error
        Repeat this for all the points and assemble these N estimators ot obtain the desired generalization error


   2/ Leave k Out (LkO) similar to LOO but with k points discarded (N!/k!/(N-k)! local estimators can obtained that way)

  3/  Bootstrap (simpler to refer to the corresponding Maple's help page)

  4/ Cross Validation, or CV, (close to LkO): split the DATA base (the N triples here) in a LEARNING base used to build the optimal model, plus a TEST or GENERALISATION
      base to assess its generalization error.
      As always repeat the process.

About the 25%-75% splitting parts I have a few things to say. Some others will use 1/3-2/3.
There is no strict rule beyond the fact that the model must remain  constructible on the choosen fraction of the DATA base
 

# Simulation

Data_base := < SampleOfX__1, SampleOfX__2, ObservedResponse>^+:

Learning_ratio := 0.25:
Learning_size  := round(Learning_ratio*N);
Test_size      := N - Learning_size;

KeptForLearning := combinat[randperm]([$1..N])[1..Learning_size];  # randomly select 25% of the data base
KeptForTest     := convert({$1..N} minus {KeptForLearning[]}, list);
 

25

 

75

 

[13, 1, 53, 86, 89, 59, 34, 75, 69, 42, 61, 90, 46, 74, 11, 44, 72, 35, 6, 84, 87, 27, 22, 45, 91]

 

[2, 3, 4, 5, 7, 8, 9, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 36, 37, 38, 39, 40, 41, 43, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 58, 60, 62, 63, 64, 65, 66, 67, 68, 70, 71, 73, 76, 77, 78, 79, 80, 81, 82, 83, 85, 88, 92, 93, 94, 95, 96, 97, 98, 99, 100]

(4)

Learning_base := Data_base[KeptForLearning, ..];
Test_base     := Data_base[KeptForTest, ..]    ;
 

Learning_base := Vector(4, {(1) = ` 25 x 3 `*Matrix, (2) = `Data Type: `*float[8], (3) = `Storage: `*rectangular, (4) = `Order: `*Fortran_order})

 

Matrix(%id = 18446744078279553862)

(5)

# Learnt (=Trained) model

CV_result := LinearFit(
                         [x__1, x__2],
                         Learning_base,
                         [x__1, x__2],
                         output=[parametervalues, leastsquaresfunction, residualstandarddeviation]
                       )

CV_result := [Vector(2, {(1) = 3.52749864138740, (2) = -3.52762830240327}), 3.52749864138740*`#msub(mi("x"),mi("1"))`-3.52762830240327*`#msub(mi("x"),mi("2"))`, 9.05975305600152]

(6)

# Its predictions

CV_model := unapply(CV_result[2], [x__1, x__2]);

CV_predictions := Vector[column](Test_size, n -> op(CV_model(entries(Test_base[n, 1..2]))))

CV_model := proc (`#msub(mi("x"),mi("1"))`, `#msub(mi("x"),mi("2"))`) options operator, arrow; 3.5274986413873965*`#msub(mi("x"),mi("1"))`+(-1)*3.527628302403268*`#msub(mi("x"),mi("2"))` end proc

 

Vector[column](%id = 18446744078279555902)

(7)

# Its generalization error:

CV_Gen_Error := StandardDeviation(CV_predictions -~ Test_base[..,3])

HFloat(10.58269840059723)

(8)

A value not very far from the one of the representation error obtained in the OLS test, but it is related to the fact that the TRUE model is ALSO the
regression model (you could change the TRUE model by writting for instance Y = B1*X1 +B2*X2^2 + E and keep the regretion model Y = B1*X1 +B2*X2
unchanged: you will then see large differences between the trainin and the test errors)

Let's generate now R realizations of this test error:

R := 100: # it's just illustrative

CV_All := NULL:

for r from 1 to R do
  KeptForLearning := combinat[randperm]([$1..N])[1..Learning_size];  
  KeptForTest     := convert({$1..N} minus {KeptForLearning[]}, list);
  Learning_base   := Data_base[KeptForLearning, ..];
  Test_base       := Data_base[KeptForTest, ..]    ;
  CV_result       := LinearFit(
                         [x__1, x__2],
                         Learning_base,
                         [x__1, x__2],
                         output=[parametervalues, leastsquaresfunction, residualstandarddeviation]
                    ):
  CV_model        := unapply(CV_result[2], [x__1, x__2]);
  CV_predictions  := Vector[column](Test_size, n -> op(CV_model(entries(Test_base[n, 1..2])))):
  CV_Gen_Error    := StandardDeviation(CV_predictions -~ Test_base[..,3]):
  
  CV_All := CV_All, [CV_result[], CV_Gen_Error]:
end do:

CV_All := [CV_All]:

All_Gen_Errors := op~(-1, CV_All);
 

[HFloat(9.651751409842975), HFloat(11.073297744209476), HFloat(10.582065315202824), HFloat(11.370905480893054), HFloat(10.66062548594883), HFloat(10.197860333787814), HFloat(12.41984696724287), HFloat(10.26697182510627), HFloat(11.282596469419106), HFloat(10.953890045733575), HFloat(10.805611200949137), HFloat(10.269181816780955), HFloat(11.738688054559878), HFloat(11.038255433740698), HFloat(10.288690840528693), HFloat(10.302616068603378), HFloat(9.92059362228101), HFloat(11.369276018689709), HFloat(10.531187095376493), HFloat(10.528157324620775), HFloat(10.794603635544576), HFloat(10.687373377037863), HFloat(10.093956416633349), HFloat(9.765740103319256), HFloat(10.04186222004179), HFloat(10.960507825341455), HFloat(10.681157271733277), HFloat(10.95331271884746), HFloat(10.49331163470941), HFloat(10.606985758324798), HFloat(10.322822452553421), HFloat(10.831019288718446), HFloat(10.690102166263637), HFloat(10.707222219250296), HFloat(11.496121117213429), HFloat(10.202813494517128), HFloat(10.870415933896236), HFloat(10.228559285899076), HFloat(9.901316980202198), HFloat(11.235324777668138), HFloat(10.902603625122918), HFloat(10.914117298801584), HFloat(11.456526480997686), HFloat(10.09331224647256), HFloat(11.537103244138848), HFloat(10.13310322832185), HFloat(10.928632140543822), HFloat(10.206451530904193), HFloat(11.120120136270378), HFloat(9.729935966292539), HFloat(10.186651165055293), HFloat(10.098925551423877), HFloat(11.25433646374303), HFloat(10.736132209751101), HFloat(11.02680833986016), HFloat(10.109821507353962), HFloat(10.426438708344806), HFloat(10.283779712420099), HFloat(10.695264381144266), HFloat(10.31222432561016), HFloat(10.89859930478856), HFloat(11.080714909982953), HFloat(10.153367981091353), HFloat(10.322124849198017), HFloat(9.635392933723857), HFloat(9.67503021569015), HFloat(10.990485781584994), HFloat(10.580001065747831), HFloat(10.98300811348783), HFloat(10.819589530631744), HFloat(10.530170012359214), HFloat(10.980387844133084), HFloat(11.454124102451425), HFloat(11.917027702421562), HFloat(10.560728962174448), HFloat(10.303471444788272), HFloat(10.247638749777725), HFloat(10.53435615420061), HFloat(10.74631482502725), HFloat(11.091629780383638), HFloat(10.435095538197611), HFloat(10.592305219979297), HFloat(10.128061684406104), HFloat(10.343475968239149), HFloat(10.593868706318588), HFloat(9.677543534261027), HFloat(10.375985506158676), HFloat(9.944500315401031), HFloat(10.774385240991624), HFloat(10.274926237145262), HFloat(10.967918055475566), HFloat(10.74535182383358), HFloat(10.861251445658647), HFloat(10.471201895181126), HFloat(9.6799192328998), HFloat(11.025356009868046), HFloat(10.519396354811088), HFloat(10.4657467263297), HFloat(11.225574737612337), HFloat(10.16744656966274)]

(9)

# the better estimate of the generalization error of a bilinear model trained on 25% of the data base is

GENERALIZED_Error := sqrt(Mean(All_Gen_Errors^~2));

HFloat(10.630183338660515)

(10)

# Matrix of all the BLUE b1 and b2
#
# Scatter plot of all the BLUEs from all the test bases, plus (red asterixk) the OLS BLUE

CV_BLUE := convert(op~(1, CV_All), Matrix)^+;

plots:-display(
   plot(CV_BLUE, style=point, color=black),
   plot([[entries(OLS_result[1], nolist)]], color=red, symbol=asterisk, symbolsize=30, style=point)
);

CV_BLUE := Vector(4, {(1) = ` 100 x 2 `*Matrix, (2) = `Data Type: `*float[8], (3) = `Storage: `*rectangular, (4) = `Order: `*Fortran_order})

 

 

# Let's built a very simple aggregated model

Aggregated_Model := Mean(CV_BLUE[..,1]) * x__1 + Mean(CV_BLUE[..,2]) * x__2;

HFloat(3.295250552411322)*x__1-HFloat(3.7572438843065274)*x__2

(11)

In some sense, this aggregated model can be said to have a generalization error equal to   
Typesetting:-mrow(Typesetting:-mi("GENERALIZED_Error", italic = "true", mathvariant = "italic"), Typesetting:-mo("&coloneq;", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.2777778em", rspace = "0.2777778em"), Typesetting:-mn("10.6301833386605", mathvariant = "normal"), Typesetting:-mspace(height = "0.0ex", width = "0.0em", depth = "0.0ex", linebreak = "newline"), Typesetting:-mi("From", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("this", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo("error", bold = "false", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("point", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("of", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("view", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("it", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("is", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("a", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("better", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("model", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("than", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("the", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("original", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("OLS", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("model", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo("for", bold = "false", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("which", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("NO", bold = "true", italic = "false", mathvariant = "bold", fontweight = "bold"), Typesetting:-mo(" ", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("generalization", bold = "true", italic = "false", mathvariant = "bold", fontweight = "bold"), Typesetting:-mo(" ", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo("error", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo(" ", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("can", bold = "true", italic = "false", mathvariant = "bold", fontweight = "bold"), Typesetting:-mo(" ", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("be", bold = "true", italic = "false", mathvariant = "bold", fontweight = "bold"), Typesetting:-mo(" ", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("assessed", bold = "true", italic = "false", mathvariant = "bold", fontweight = "bold"), Typesetting:-mspace(height = "0.0ex", width = "0.0em", depth = "0.0ex", linebreak = "newline"), Typesetting:-mo(" ", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mspace(height = "0.0ex", width = "0.0em", depth = "0.0ex", linebreak = "newline"), Typesetting:-mo(" ", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("But", italic = "false", mathvariant = "normal"), Typesetting:-mo(",", mathvariant = "normal", fence = "false", separator = "true", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.3333333em"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo("for", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("the", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("representation", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("point", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("of", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("wiew", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("this", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("aggregated", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("model", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("is", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("worse", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("than", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("the", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("OLS", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("model", italic = "false", mathvariant = "normal"), Typesetting:-mo(":", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.2777778em", rspace = "0.2777778em"), Typesetting:-mspace(height = "0.0ex", width = "0.0em", depth = "0.0ex", linebreak = "newline"), Typesetting:-mi("The", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mfrac(Typesetting:-mrow(Typesetting:-mi(""), Typesetting:-mfenced(Typesetting:-mrow(Typesetting:-mi("N", italic = "true", mathvariant = "italic"), Typesetting:-mo("&minus;", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.2222222em", rspace = "0.2222222em"), Typesetting:-mn("1", mathvariant = "normal")), mathvariant = "normal")), Typesetting:-mrow(Typesetting:-mfenced(Typesetting:-mrow(Typesetting:-mi("N", italic = "true", mathvariant = "italic"), Typesetting:-mo("&minus;", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.2222222em", rspace = "0.2222222em"), Typesetting:-mn("3", mathvariant = "normal")), mathvariant = "normal")), linethickness = "1", denomalign = "center", numalign = "center", bevelled = "false"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("factor", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("comes", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo("from", bold = "true", mathvariant = "bold", fontweight = "bold", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("technical", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("considerations", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("ense", italic = "false", mathvariant = "normal"), Typesetting:-mo(",", mathvariant = "normal", fence = "false", separator = "true", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.3333333em"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mfenced(Typesetting:-mrow(Typesetting:-mi("N", italic = "false", mathvariant = "normal"), Typesetting:-mo("&minus;", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.2222222em", rspace = "0.2222222em"), Typesetting:-mn("3", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("degrees", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("of", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("freedom", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo("to", bold = "false", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("assess", italic = "false", mathvariant = "normal"), Typesetting:-mo(" ", mathvariant = "normal", fence = "false", separator = "false", stretchy = "false", symmetric = "false", largeop = "false", movablelimits = "false", accent = "false", lspace = "0.0em", rspace = "0.0em"), Typesetting:-mi("residualstandarddeviation", italic = "false", mathvariant = "normal")), mathvariant = "normal"), Typesetting:-mspace(height = "0.0ex", width = "0.0em", depth = "0.0ex", linebreak = "newline"), Typesetting:-mspace(height = "0.0ex", width = "0.0em", depth = "0.0ex", linebreak = "auto"))

Predictons_of_Aggregated_Model := Vector[column](N, n -> op(CV_model(entries(Data_base[n, 1..2]))));

Representation_error_of_Aggregated_Model := StandardDeviation(Predictons_of_Aggregated_Model -~ Data_base[.., 3]) * (N-1) / (N-3)

Predictons_of_Aggregated_Model := Vector(4, {(1) = ` 1 .. 100 `*Vector[column], (2) = `Data Type: `*anything, (3) = `Storage: `*rectangular, (4) = `Order: `*Fortran_order})

 

HFloat(10.490314893535418)

(12)

 


 

Download OLS.mw

 

Kitonum has been  faster than me !!!

I was prepared to delete my reply, very close to Kitonum's with whom I agree on a lot of points....
excepted the final result which, for a reason I do not know about, is not the same .
So I finally decided to send "my" proposal... maybe there is an obvious typo error in it or there is something Kitonum maybe will be able to explain?


 

restart

k := T -> 2/cosh(2/T)/coth(2/T)

proc (T) options operator, arrow; 2/(cosh(2/T)*coth(2/T)) end proc

(1)

local delta, e:

delta := T -> sqrt(1-k(T)^2*sin(theta)^2)

proc (T) options operator, arrow; sqrt(1-k(T)^2*sin(theta)^2) end proc

(2)

e := T -> -2*tanh(2/T)*diff(k(T)/2/Pi*Int(sin(theta)^2/delta(T)/(1+delta(T)), theta=0..Pi), T)

proc (T) options operator, arrow; -2*tanh(2/T)*(diff((1/2)*k(T)*(Int(sin(theta)^2/(delta(T)*(1+delta(T))), theta = 0 .. Pi))/Pi, T)) end proc

(3)

ee := evalf(e(T)):  # e(T) does the differentiation accortinng to T and evalf replaces Int by int

eval(ee, T=1):      # substitute T by 1 in ee

evalf(%);           # compute the result

-.3586637209

(4)

 


 

Download proposal.mw

You made a confusion between 
c[1] := 2; c[2] := 5; 
and farther
c := (beta[1]*s(t)*p(t).(lambda[2](t)-lambda[1](t)))/(w[2]*(a+p(t)))+(lambda[2](t).e(t)+(i(t)+alpha.e(t)).lambda[3](t)-(gamma.i(t)+p(t)).lambda[4](t))/w[2]; 

Then, in the lambda[2](t) edo, the first term of the rhs contains c[1] and appears as (beta[1]*s(t)*p(t)......./w[2])instead of (I guess) 2.
Idem in the lambda[2](t) edo, the first term of the rhs contains c[2] and appears as (beta[1]*s(t)*p(t)......./w[2])instead of (I guess) 5.
This explains the error you get Error, (in fproc) unable to store '-1.*HFloat(0.0)[1]': the HFloat(0.0)[1] refers to (beta[1]*s(t)*p(t)......./w[2])


It's to you to correct this.
For example by writting 
C:= (beta[1]*s(t)*p(t).(lambda[2](t)-lambda[1](t)))/(w[2]*(a+p(t)))+(lambda[2](t).e(t)+(i(t)+alpha.e(t)).lambda[3](t)-(gamma.i(t)+p(t)).lambda[4](t))/w[2]; 
u[2] := min(max(0, C), 1);


Once done it should be work better, but don't be in a hurry, with maxmesh=2400 Maple takes its time to provide a solution (about 5 minutes).

Download MaybeThis.mw

Is this suit you?
(don't worry about the expression of A: I just copied-pasted the content of your question).

PS: Given a Graph G, the command AdjacencyMatrix(G) returns the equivalent of your matrix A, but, I do not know about a Maple's procedure which construct a graph G given its AdjacencyMatrix.
 

restart

with(GraphTheory):

A := Matrix(4, 10, {(1, 1) = 0, (1, 2) = 0, (1, 3) = 0, (1, 4) = 0, (1, 5) = 0, (1, 6) = 0, (1, 7) = 0, (1, 8) = 0, (1, 9) = 0, (1, 10) = 0, (2, 1) = 0, (2, 2) = 0, (2, 3) = 1, (2, 4) = 1, (2, 5) = 0, (2, 6) = 1, (2, 7) = 0, (2, 8) = 0, (2, 9) = 0, (2, 10) = 1, (3, 1) = 1, (3, 2) = 1, (3, 3) = 0, (3, 4) = 0, (3, 5) = 1, (3, 6) = 0, (3, 7) = 1, (3, 8) = 1, (3, 9) = 1, (3, 10) = 0, (4, 1) = 0, (4, 2) = 0, (4, 3) = 0, (4, 4) = 0, (4, 5) = 0, (4, 6) = 0, (4, 7) = 0, (4, 8) = 0, (4, 9) = 0, (4, 10) = 0})

A := Matrix(4, 10, {(1, 1) = 0, (1, 2) = 0, (1, 3) = 0, (1, 4) = 0, (1, 5) = 0, (1, 6) = 0, (1, 7) = 0, (1, 8) = 0, (1, 9) = 0, (1, 10) = 0, (2, 1) = 0, (2, 2) = 0, (2, 3) = 1, (2, 4) = 1, (2, 5) = 0, (2, 6) = 1, (2, 7) = 0, (2, 8) = 0, (2, 9) = 0, (2, 10) = 1, (3, 1) = 1, (3, 2) = 1, (3, 3) = 0, (3, 4) = 0, (3, 5) = 1, (3, 6) = 0, (3, 7) = 1, (3, 8) = 1, (3, 9) = 1, (3, 10) = 0, (4, 1) = 0, (4, 2) = 0, (4, 3) = 0, (4, 4) = 0, (4, 5) = 0, (4, 6) = 0, (4, 7) = 0, (4, 8) = 0, (4, 9) = 0, (4, 10) = 0})

(1)

edges := map(x -> if A[op(x)] = 1 then {x[]} end if, {indices(A)});

{{1, 3}, {2, 3}, {2, 4}, {2, 6}, {2, 10}, {3, 5}, {3, 7}, {3, 8}, {3, 9}}

(2)

G := Graph(edges);

GRAPHLN(undirected, unweighted, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], Array(%id = 18446744078096242318), `GRAPHLN/table/1`, 0)

(3)

DrawGraph(G, style=spring);

 

 


 

Download Graph.mw

@a_simsim 

A third way is "table" ; maybe easier to manipulate (for instance to add or to remove a "field")


Table.mw

PS: a "field" can itself be a table, and so on

Personally I prefer using records

I find them more readable than arrays (Tom's proposal) and the structure is less artificial than the one obtained with arrays.
Betond, I don't have enough enough skill in Maple to tell you which of these two structures is more efficient in terms of memory usage or manipulation time.

Last by not least, as you mention Matlab, record is closer than struct than array is
 

restart:

#              the
#              name
#              you
#              want
#               |
#               |
#               V
r := Record(
             'MyArray' = Array(1..3, 1..3,1..3),
             'data'    = Vector([2,1,3,5,6]),
             'status'  = true,
             'x'       = 123.456,
             'n'       = 17,
             'name'    = "astring",
             'MyVect'  = Vector([true, false, true, true, true, false]),
             'MyMat'   = Matrix(7,7, symbol=x),
             'MyPlot'  = plot(x^2, x=0..5)
            ):

r:-data

Vector[column]([[2], [1], [3], [5], [6]])

(1)

r:-n

17

(2)

r:-MyPlot

 

 


 

Download Record.mw

No, Maple hasn't

What are you looking for?

If you just need sampling a Levy(m, s)  distribution (m = location parameter, s = dispersion parameter [classical stat/prob notations], see for instance https://en.wikipedia.org/wiki/Lévy_distribution where m is noted mu and s is noted c) you can use this:

(a few extra material is given to help you define a LevyDistribution)


 

restart:

with(Statistics):

U := RandomVariable(Uniform(1/2, 1)):

N  := 10^4:
SU := Sample(U, N);

SU := Vector(4, {(1) = ` 1 .. 10000 `*Vector[row], (2) = `Data Type: `*float[8], (3) = `Storage: `*rectangular, (4) = `Order: `*Fortran_order})

(1)

m := 3;
s := 2;

3

 

2

(2)

SL := m +~ s /~ ( Quantile~(Normal(0, 1), SU) )^~2 ;

SL := Vector(4, {(1) = ` 1 .. 10000 `*Vector[row], (2) = `Data Type: `*anything, (3) = `Storage: `*rectangular, (4) = `Order: `*Fortran_order})

(3)

Histogram(log[10]~(SL), frequencyscale=absolute, minbins=100)

 

For comparison, here is the result returned by R with N=10^4, m=3 and s=2

#

m := 'm':
s := 's':

pdf := unapply(piecewise(x<0, 0, sqrt(s/2/Pi)*exp(-s/2/(x-m))/(x-m)^(3/2)), x);
cdf := unapply(erfc(sqrt(s/2/(x-m))), x);

proc (x) options operator, arrow; piecewise(x < 0, 0, (1/2)*2^(1/2)*(s/Pi)^(1/2)*exp(-(1/2)*s/(x-m))/(x-m)^(3/2)) end proc

 

proc (x) options operator, arrow; erfc((1/2)*2^(1/2)*(s/(x-m))^(1/2)) end proc

(4)

Levy := Distribution(PDF=(x->pdf(x)), CDF= (x->cdf(x)), Conditions = [s > 0])

_m4469968896

(5)

X := RandomVariable(Levy)

_R11002

(6)

Levy__mean := Mean(X);

int((1/2)*_t1*2^(1/2)*(s/Pi)^(1/2)*exp(-(1/2)*s/(_t1-m))/(_t1-m)^(3/2), _t1 = 0 .. infinity)

(7)

Levy__median := Median(X);

(1/2)*(2*RootOf(2*erfc(_Z)-1)^2*m+s)/RootOf(2*erfc(_Z)-1)^2

(8)

evalf(subs({m=0, s=1}, Levy__median));

# compare with wikipedia

evalf(subs({m=0, s=1}, s/2*(1/erfc(1/2))^2));

2.198109338

 

2.174665977

(9)

# plot of the pdf (wiki ways the mode, in case m=0 equals s/3 =1/3 here)

plots:-display(
  plot(subs({m=0, s=1}, PDF(X, x)), x=0..2, gridlines=true),
  plot([[1/3, 0], [1/3, 0.5]], color=blue)
);

 

ms := {m=0, s=1}:

MyLevy := Distribution(PDF=(x->subs(ms, pdf(x))), CDF= (x->subs(ms, cdf(x))))

_m4469962720

(10)

MyX := RandomVariable(MyLevy):

A := Sample(MyX, 10^4, method = [envelope, range = 0.00001..6]):

plots:-display(
   Histogram(A, minbins=100, color=blue, transparency=0.5, view=[0..2, default]),
   plot(subs({m=0, s=1}, PDF(X, x)), x=0..2, color=red, thickness=3,
        title="A scaling problem I've not solved yet\n", font=[Roman, bold, 14])
)

 

 


 

Download Sampling-Levy_Distribution.mw

Here are a few examples.


PS: All RandomVariables have floating point outcomes. If you want to generate "true integer" outcomes, use random instead ... but you will loss the concept of random variable.

RollingDice.mw
 

restart:

with(Statistics):

Dice1 := RandomVariable(DiscreteUniform(1, 6));
Dice2 := RandomVariable(DiscreteUniform(1, 6));

_R

 

_R0

(1)

# example 1:

f1 := (Dice1, Dice2) -> Dice1 + Dice2:
Sample(f1(Dice1, Dice2), 10^5):
Histogram(%, title=f1('Dice1', 'Dice2'));

 

# example 2:

f2 := (Dice1, Dice2) -> Dice1^2 + Dice2:
Sample(f2(Dice1, Dice2), 10^5):
Histogram(%, title=f2('Dice1', 'Dice2'));

 

# example 3:

f3 := (Dice1, Dice2) -> exp(Dice1)*cos(Dice2):
Sample(f3(Dice1, Dice2), 10^5):
Histogram(%, title=f3('Dice1', 'Dice2'));

 

# Finally: a lot of symbolic computations can be done, for instance

print~([seq(Mean__f||n, n=1..3)] =~ Mean~([seq(f||n(Dice1, Dice2), n=1..3)]))
 

Mean__f1 = 7

 

Mean__f2 = 56/3

 

Mean__f3 = (1/36)*exp(1)*cos(1)+(1/36)*exp(2)*cos(1)+(1/36)*exp(3)*cos(1)+(1/36)*exp(4)*cos(1)+(1/36)*exp(5)*cos(1)+(1/36)*exp(6)*cos(1)+(1/36)*exp(1)*cos(2)+(1/36)*exp(2)*cos(2)+(1/36)*exp(3)*cos(2)+(1/36)*exp(4)*cos(2)+(1/36)*exp(5)*cos(2)+(1/36)*exp(6)*cos(2)+(1/36)*exp(1)*cos(3)+(1/36)*exp(2)*cos(3)+(1/36)*exp(3)*cos(3)+(1/36)*exp(4)*cos(3)+(1/36)*exp(5)*cos(3)+(1/36)*exp(6)*cos(3)+(1/36)*exp(1)*cos(4)+(1/36)*exp(2)*cos(4)+(1/36)*exp(3)*cos(4)+(1/36)*exp(4)*cos(4)+(1/36)*exp(5)*cos(4)+(1/36)*exp(6)*cos(4)+(1/36)*exp(1)*cos(5)+(1/36)*exp(2)*cos(5)+(1/36)*exp(3)*cos(5)+(1/36)*exp(4)*cos(5)+(1/36)*exp(5)*cos(5)+(1/36)*exp(6)*cos(5)+(1/36)*exp(1)*cos(6)+(1/36)*exp(2)*cos(6)+(1/36)*exp(3)*cos(6)+(1/36)*exp(4)*cos(6)+(1/36)*exp(5)*cos(6)+(1/36)*exp(6)*cos(6)

 

[]

(2)

 

 

 

Like Tom I'm not sure to correctly understand your problem.
It seems to me you want to sample truncated Poisson distributions (between 0 and 10 included) whose parameter lambda is itself a linear combination of random variables (am I right? )

If yes, this piece of code will possibly answer your question.
Be careful: not all the values of lambda you create from your model are positive!!!
In the attached file I show you how to estimate the frequency of negative lambda values. This frequency goes to zero as the value of N (that is the size of the vector lambda) tends to +infinity. For N=10 the ratio of negative lambda values is close to 1/4.


 

restart:

with(Statistics):

randomize():

N    := 10;
x__0 := Vector[row](P, [1$N]);
x__1 := Sample(Binomial(N, 0.4), N);
x__2 := Sample(Normal(0, 1), N);

N := 10

 

`#msub(mi("x"),mi("0"))` := Vector[row](10, {(1) = 1, (2) = 1, (3) = 1, (4) = 1, (5) = 1, (6) = 1, (7) = 1, (8) = 1, (9) = 1, (10) = 1})

 

`#msub(mi("x"),mi("1"))` := Vector[row](10, {(1) = 5.0, (2) = 1.0, (3) = 6.0, (4) = 3.0, (5) = 5.0, (6) = 2.0, (7) = 7.0, (8) = 2.0, (9) = 6.0, (10) = 8.0}, datatype = float[8])

 

`#msub(mi("x"),mi("2"))` := Vector[row](10, {(1) = .5691573712734501, (2) = .10685006448894822, (3) = -.39789312201697974, (4) = -.5395405825678521, (5) = .6183149648978897, (6) = .5512360714665099, (7) = 1.2991554584043454, (8) = .18083457234126987, (9) = -.47061639469370886, (10) = 0.8967688506232983e-1}, datatype = float[8])

(1)

local lambda:

lambda := unapply(add(beta__||k *~ x__||k, k=0..2), (seq(beta__||k, k=0..2)) );

 

 

B := (alpha, NMC) -> Sample(Poisson(alpha), NMC, method=[discrete, range=0..10])

proc (alpha, NMC) options operator, arrow; Statistics:-Sample(Poisson(alpha), NMC, method = [discrete, range = 0 .. 10]) end proc

(3)

beta := (0.25, 0.5, 3.2);
lambda~(beta);

beta := .25, .5, 3.2

 

Vector[row]([4.57130358807504, 1.09192020636463, 1.97674200954566, 0.234701357828733e-1, 4.72860788767325, 3.01395542869283, 7.90729746689391, 1.82867063149206, 1.74402753698013, 4.53696603219946])

(4)

MC := 10:

NumberOfNegativeLambdaValues := 0:

U := NULL:

for u in lambda~(beta) do
   if u > 0 then
     b := round~(convert(B(u, MC), list)) ;
     U := U, b[];
   else
     # printf("Error, parameter is negative(%a)\n", u):
     NumberOfNegativeLambdaValues := NumberOfNegativeLambdaValues + 1:
   end if;
end do;

printf("%2.f %% of the lambda have a negative value\n", evalf(NumberOfNegativeLambdaValues/MC)*100);

DataSummary([U]);
Histogram([U], discrete, minbins=N+1, thickness=20)

 0 % of the lambda have a negative value

 

Vector[column]([[mean = 3.30000000000000], [standarddeviation = 2.75790873780490], [skewness = .542012393898590], [kurtosis = 2.20622450437294], [minimum = 0.], [maximum = 10.], [cumulativeweight = 100.]])

 

 

REMARKS

The frequency of occurrences of negative lambda values can be easily determined

RV__0 := 1:
RV__1 := RandomVariable(Binomial(N, 4/10));
RV__2 := RandomVariable(Normal(0, 1));

RV__3 := unapply( add(beta__||k *~ RV__||k, k=0..2), (seq(beta__||k, k=0..2)) )

_R11

 

_R12

 

proc (beta__0, beta__1, beta__2) options operator, arrow; _R11*beta__1+_R12*beta__2+beta__0 end proc

(5)

(Mean, StandardDeviation)(RV__3(seq(beta__||k, k=0..2)) );

4*beta__1+beta__0, (1/5)*(60*beta__1^2+25*beta__2^2)^(1/2)

(6)

(Mean, StandardDeviation)(RV__3(beta) );

FrequencyOfNegativeLambda := Probability(RV__3(beta) <= 0);

2.25, 3.292415527

 

.2472407931

(7)

 


 

Download Poisson.mw


 

Statistics:-LinearFit can do the job quite well. So there is no bug in Statistics:-LinearFit  (or at least not on this problem).

The issue comes from the fact that the problem is not relevant, per se, of the framework of Ordinary Least Squares (OLS) regression where we aim to assess the values of the parameters A and B of the model:

" Conditional expectation of Y given X=x = B*x+A "

The reason for this is that the slope is known.
In the attached file I show how a rewritting of the initial problem sets it back in the OLS framework. But this is neither immediate not natural.

However, the simplest method to assess the value of "a" comes from classical estimation results in OLS : the best unbiased estimator of "a" is just the mean of pts2 - 3*pts1

 


 

restart:

with(Statistics):

pts1 := Vector([3, 5, 21]);

pts2 := Vector([4, 6, 16])

pts1 := Vector(3, {(1) = 3, (2) = 5, (3) = 21})

 

pts2 := Vector(3, {(1) = 4, (2) = 6, (3) = 16})

(1)

It couldn't be easier

# From standard results in Ordinary Least Squares (OLS) regression,
# the Best Linear Unbiased Estimator (BLUE) of the intercept A
# in the model: Expectation(Y | X) = B*X+A is just
# BLUE(A) = Mean(Y) - BLUE(B)*Mean(X)
#
# Here BLUE(B) is supposed to be known and set to 3.
# Then

BLUE__A := Mean(pts2 - 3 *~ pts1);
add (pts2 - 3 *~ pts1) / 3

HFloat(-20.333333333333332)

 

-61/3

(2)

Using Statistics:-LinearFit

# The problem is not relevant of OLS but can be solved with LinearFit
# (at the price of a few pirouettes)
#
# Rewrite the problem this way:
#   1/ define Z as Y-3*X

Z := (X, Y) -> Y - 3*X;

#   2/ imagine you repeat 3 times the same experiment for a same value of the
#      control variable "a" ("a" is the only regressor of the problem)
#      Write Newpts2 the vector of the outcomes of these experiments
Newpts2 := Z~(pts1, pts2);
#   3/ define then the "design matrix" as an arbitrary constant vector,
#      for instance < 1, 1, 1>

Newpts1 := Vector([1, 1, 1]);

#   4/ apply LinearFit as usual, remembering that the only regressor is "a"

L := LinearFit([a], Newpts1, Newpts2, a);

#   5/ Substitute "a" by the value it takes in the vector Newpts1

A := subs(a=1, L);
convert(%, rational);

Z := proc (X, Y) options operator, arrow; Y-3*X end proc

 

Newpts2 := Vector(3, {(1) = -5, (2) = -9, (3) = -47})

 

Newpts1 := Vector(3, {(1) = 1, (2) = 1, (3) = 1})

 

-HFloat(20.33333333333334)*a

 

HFloat(-20.33333333333334)

 

-61/3

(3)

 

 


 

Download Simplest.mw

Simulation of a "true" Galton board is very difficult.
You have, in all rigor, to simulate the drop of a sphere (a disc in 2D) shocking many fixed cylindrical pins (discs in 2D).
The trajectory of a disc (assuming the problem is 2D) is described by a succession of free flights submitted to the gravitational field (rather simple to code), each of them ending when the falling disc hurts a fixed pin (another disc).
Determining the impact point is a difficult problem.
More of this you have to describe the physics of the shock (elastic or not), account for possible slipping and rolling...
Last but not least a true Galton board has a limited width an the pins are confined in a kind of funnel whose walls modify the fall at at boundaries of the board.

What I propose you is a simpler simulation based on a random walk on a "triangular lattice" (Pascal's lattice).
Starting from the position (i, j) a "ball" can move either to position (i+1, j+1) or (i+1, j-1) with equal probabilities. The process is repeated from the initial position (0, 0) a given number of times.
Think to this to some kind of "discrete Galton board".

Look to the animation in the attached file.
If it suits you I will give you some explanation about it... if required


WATCH OUT: I faced difficulties to load the file with the animations, so I commented the corresponding line (red written on yellow background).
To run the animation, once the plot after the command Fallouts(100, 10) has been displayed, just clik on it and run the animation by using the keys in the upper left of your Maple worksheet.

It's likely that this code can be written in clever and more performing way.
 

restart:

see for instance https://en.wikipedia.org/wiki/Random_walk

with(Statistics):
with(plots):
with(plottools):

randomize():

R := rand(0 .. 1):

Fallouts := proc(NP, N)
   local  Fallout:
   local  GaltonBoard, depart, t, n, c, T, GBH;
   global cs, GB:
   
 
   # fallout of a single "ball"
 

   Fallout := proc(np)
      local GBH, depart, t, n, c, T;
      global cs, GB:


      GBH := copy(GB):
      depart := [0, 0];
      t      := depart:
      for n from 1 to N do
         depart := depart +~ [1, (2*R()-1)];
         t      := t, depart
      end do:   
      t  := [t, [depart[1]+1, depart[2]]];
      c  := unapply(CurveFitting:-Spline(op~(1, t), op~(2, t), x, degree=1), x);
      T  := transform((x,y) -> [y+N+1, x]):
  
      if np > 1 then
         GBH := display(GBH, T(Histogram([cs], discrete, thickness=5, frequencyscale=absolute)) )
      end if;
      
      cs := cs, depart[2];
   
   plots[animate](plot, [c(x), x=0..A, color=red, thickness=2], A=0..N+1, frames=N+2, background=GBH);
   
   end proc:

   GaltonBoard := [seq(seq([i, j], j in [seq(-i..i, 2)]), i=0..N)]:
   GB := plot(GaltonBoard, style=point, symbol=solidcircle, symbolsize=10, scaling=constrained, gridlines=true, color=blue):

   cs := NULL:

   plots[animate]( Fallout, [np], np=1..NP, frames=NP);

end proc:

global cs:

# drop 100 "balls" on a "discrete Galton Board" made oh 10 layers of pins

# Fallouts(100, 10);   # TO BE UNCOMMENTED

cs

cs

(1)

# Histogram([cs])

 


 

Download Galton-RandomWalk.mw

Maybe you could try this


 

restart

with(Statistics):

XG := h -> RandomVariable(Normal(0, h))

proc (h) options operator, arrow; Statistics:-RandomVariable(Normal(0, h)) end proc

(1)

MaxIt := 1000

1000

(2)

randomize():

# Be careful: h must be strictly positive

h := evalf([seq(1-k/MaxIt, k=0..MaxIt-1)]):
h[1],h[-1];

1., 0.1000000000e-2

(3)

# do you really need to print the results ?
#
# for i from 1 to MaxIt do
#    printf("%a\n%a\n", Sample(XG(h[i]), 5), i);
# end do:

# Maybe this could suit you?


# Stage 1 ; generate a random matrix by sampling Normal(0, 1)

M := Sample(XG(1), [MaxIt, 5], method = [envelope, range = -1 .. 1]):


# Watch out: stage 2 is computationnaly expensive.
#            It is presented here to help understanding the method
#
# Stage 2: rescale M according to the standard deviations 1, 0.999, ...0.001

S := LinearAlgebra:-DiagonalMatrix(h):

# Stage 3: do the product S.M

Res := S.M

Res := Vector(4, {(1) = ` 1000 x 5 `*Matrix, (2) = `Data Type: `*float[8], (3) = `Storage: `*rectangular, (4) = `Order: `*Fortran_order})

(4)

# The same thing in a more efficient way

M   := CodeTools:-Usage( Sample(XG(1), [MaxIt, 5], method = [envelope, range = -1 .. 1]) ):
Res := CodeTools:-Usage( <  seq(M[n, ..] *~ h[n], n=1..MaxIt) > );

memory used=0.73MiB, alloc change=0 bytes, cpu time=35.00ms, real time=11.00ms, gc time=0ns
memory used=0.60MiB, alloc change=0 bytes, cpu time=9.00ms, real time=3.00ms, gc time=0ns

 

Res := Vector(4, {(1) = ` 1000 x 5 `*Matrix, (2) = `Data Type: `*float[8], (3) = `Storage: `*rectangular, (4) = `Order: `*Fortran_order})

(5)

for i from 1 to MaxIt do
  # printf("%3d : %a\n", i, [entries(Res[i,..], nolist)]);
end do:

 


 

Download Gaussian_Reply.mw

3 4 5 6 7 8 9 Page 5 of 11