Acceso abierto

A comparison of the convergence rates of Hestenes’ conjugate Gram-Schmidt method without derivatives with other numerical optimization methods


Cite

Fig. 1

Level curves of Rosenbrock’s Banana Valley function.
Level curves of Rosenbrock’s Banana Valley function.

j.ijmce-2025-0010.tab.002

Step 1: restart: Digits:=50;
Step 2: f [1] := (x, y)− > 2 ∗ (x − 1) − 400 ∗ x(yxx);
Step 3: f [2] := (x, y)− > 200 ∗ (yxx);
Step 4: h := 0.001
Step 5: t[1] := 0; x1[1] := −1.2; x2[1] := 1.0;
Step 6: Perform the following iteration:
for i from 1 to 100 do
  for j from 1 to 2 do
k1[ j] := f [ j](x1[i], x2[i])
  end for:
  for j from 1 to 2 do
k2[ j] := f [ j](x1[i] + (h/2) ∗ k1[1], x2[i] + (h/2) ∗ k1[2])
  end for:
  for j from 1 to 2 do
k3[ j] := f [ j](x1[i] + (h/2) ∗ k2[1], x2[i] + (h/2) ∗ k2[2])
  end for:
  for j from 1 to 2 do
k4[ j] := f [ j](x1[i] + hk3[1], x2[i] + hk3[2])
  end for: x1[i + 1] := x1[i] + (h/6) ∗ (k1[1] + 2 ∗ k2[1] + 2 ∗ k3[1] + k4[1]):
x2[i + 1] := x2[i] + (h/6) ∗ (k1[2] + 2 ∗ k2[2] + 2 ∗ k3[2] + k4[2]):
t[i + 1] := t[i] + h
end for:
Step 7: print:
for i from 1 by 10 to 100 do print(t[i], x1[i], x2[i]);
end for;

j.ijmce-2025-0010.tab.003

Step 1: restart: Digits:=150;
Step 2: with(Student[LinearAlgebra]):
Step 3: Perform the following iteration:
x :=< −1.2, 1 >:
for i from 1 to 9 do
Jacobian :=< −400 ∗ (x[2] − x[1]2) + 800 ∗ x[1]2 + 2, −400 ∗ x[1], 200 >:
Jacobian(−1):
x := x − (Jacobian(−1)). < −400 ∗ x[1] ∗ x[2] + 400 ∗ x[1]3 + 2 ∗ (x[1] − 1), 200 ∗ (x[2] − x[1]2) >:
print(x[1], x[2]):
y[i, 1] := x[1];y[i, 2] = x[2];
end for

j.ijmce-2025-0010.tab.004

Step 1: restart: Digits:=50;
Step 2: gradient[1] := (x, y)− > 2 ∗ (x − 1) − 400 ∗ x(yxx);
Step 3: gradient[2] := (x, y)− > 200 ∗ (yxx);
Step 4: NORMofGRADIENT:=sqrt((2 ∗ (x − 1) − 400 ∗ x∗ (yxx))2 + (200 ∗ (yxx))2);
Step 5: NORMALIZED[1]:=(x,y)>(2*(1x)+400*x*(yx*x))sqrt((2*(x1)400*x*(yx*x))2+(200*(yx*x))2) {\rm{NORMALIZED}}[1]: = (x,y) - > \frac{{(2*(1 - x) + 400*x*(y - x*x))}}{{{\rm{sqrt}}({{(2*(x - 1) - 400*x*(y - x*x))}^2} + {{(200*(y - x*x))}^2})}} ;
Step 6: NORMALIZED[2]:=(x,y)>200*(x*xy)sqrt((2*(x1)400*x*(yx*x))2+(200*(yx*x))2) {\rm{NORMALIZED}}[2]: = (x,y) - > \frac{{200*(x*x - y)}}{{{\rm{sqrt}}({{(2*(x - 1) - 400*x*(y - x*x))}^2} + {{(200*(y - x*x))}^2})}} ;
Step 7: f[1]:=(x,y)>(2*(1x)+400*x*(yx*x))sqrt((2*(x1)400*x*(yx*x))2+(200*(yx*x))2) f[1]: = (x,y) - > \frac{{(2*(1 - x) + 400*x*(y - x*x))}}{{{\rm{sqrt}}({{(2*(x - 1) - 400*x*(y - x*x))}^2} + {{(200*(y - x*x))}^2})}}
Step 8: f[2]:=(x,y)>200*(x*xy)sqrt((2*(x1)400*x*(yx*x))2+(200*(yx*x))2) f[2]: = (x,y) - > \frac{{200*(x*x - y)}}{{{\rm{sqrt}}({{(2*(x - 1) - 400*x*(y - x*x))}^2} + {{(200*(y - x*x))}^2})}}
Step 9: h := 0.005
Step 10: t[1] := 0; x1[1] := −1.2; x2[1] := 1.0;
Step 11: Perform the following iteration:
for i from 1 to 100 do
  for j from 1 to 2 do
k1[ j] := f [ j](x1[i], x2[i])
  end for:
  for j from 1 to 2 do
k2[ j] := f [ j](x1[i] + (h/2) ∗ k1[1], x2[i] + (h/2) ∗ k1[2])
  end for:
  for j from 1 to 2 do
k3[ j] := f [ j](x1[i] + (h/2) ∗ k2[1], x2[i] + (h/2) ∗ k2[2])
  end for:
  for j from 1 to 2 do
k4[ j] := f [ j](x1[i] + hk3[1], x2[i] + hk3[2])
  end for: x1[i + 1] := x1[i] + (h/6) ∗ (k1[1] + 2 ∗ k2[1] + 2 ∗ k3[1] + k4[1]):
x2[i + 1] := x2[i] + (h/6) ∗ (k1[2] + 2 ∗ k2[2] + 2 ∗ k3[2] + k4[2]):
t[i + 1] := t[i] + h
end for:
Step 12: print:
for i from 1 by 10 to 100 do print(t[i], x1[i], x2[i]);
end for;

j.ijmce-2025-0010.tab.005

Step 1: restart: Digits:=400;
Step 2: sigma := 1e−120; rho := 2 ∗ sigma; epsilon := .1e−60;
Step 3: u[1, 1] := 1; u[1, 2] := 0; u[2, 1] := 0; u[2, 2] := 1;
Step 4: p[1, 1] := u[1, 1]; p[1, 2] := u[1, 2];
Step 5: f := (x1, x2) → 100 ∗ (x2x1)2 + (x1 − 1)2;
Step 6: x[1, 1] := −1.2; x[1, 2] := 1;
Step 7: Perform the following iteration:
for j from 1 to 10 do
c[1] := (f (x[1, 1]) − sigmap[1, 1], x[1, 2] − sigmap[1, 2]) − f (x[1, 1] + sigmap[1, 1], x[1, 2] + sigma
p[1, 2]))/(2 ∗ sigma):
d[1] := (f (x[1, 1] − sigmap[1, 1], x[1, 2] − sigmap[1, 2]) − 2 ∗ f (x[1, 1], x[1, 2]) + f (x[1, 1] + sigma
p[1, 1], x[1, 2] + sigmap[1, 2]))/(sigma)2
a[1] := c[1]/d[1]:
x[2, 1] := x[1, 1] + a[1] ∗ p[1, 1]:
x[2, 2] := x[1, 2] + a[1] ∗ p[1, 2]:
  for i for 1 to 2 do
x[2, i] := x[1, i] + a[1, i]
  end for:
c[2, 1] := (f (x[1, 1] + rhou[2, 1] − sigmap[1, 1], x[1, 2] + rhou[2, 2] − sigmap[1, 2]) − f (x[1, 1] + rho
u[2, 1] + sigmap[1, 1], x[1, 2] + rhou[2, 2] + sigmap[1, 2]))/(2 ∗ sigma):
a[2, 1] := c[2, 1]/d[1]:
b[2, 1] := (a[2, 1] − a[1])/rho:
  for i from 1 to 2 do
pbar[2, i] := u[2, i] + b[2, 1] ∗ p[1, i]
  end for:
  for i from 1 to 2 do
p[2, i] := pbar[2, i]/sqrt((pbar[2, 1])2 + (pbar[2, 2])2)
  end for:
  for k from 2 to 2 do
c[k] := (f (x[1, 1] − sigmap[k, 1], x[1, 2] − sigmap[k, 2]) − f (x[1, 1] + sigmap[k, 1], x[1, 2] + sigma
p[k, 2]))/(2 ∗ sigma)
  end for:
  for k from 2 to 2 do
d[k] := (f (x[1, 1] − sigmap[k, 1], x[1, 2] − sigmap[k, 2]) − 2 ∗ f (x[1, 1], x[1, 2]) + f (x[1, 1] + sigma
p[k, 1], x[1, 2] + sigmap[k, 2]))/(sigma)2
  end for:
  for k from 2 to 2 do
a[k] := c[k]/d[k]
  end for;
  for i from 1 to 2 do
x[3, i] := x[2, i] + a[2] ∗ p[2, i]
  end for;
  for i from 1 to 2 do
x[1, i] := x[3, i]
  end for;
print (x[3, 1], x[3, 2]);
y[ j, 1] := x[3, 1]; y[ j, 2] := x[3, 2];
end for

j.ijmce-2025-0010.tab.006

for i from 1 to 9 do
  C[i] = sqrt((y[i + 1, 1] − 1)2 + (y[i + 1, 2] − 1)2)/(sqrt((y[i, 1] − 1)2 + (y[i, 2] − 1)2));
end for;

The asymptotic constants of a nonlinear function.

Asymptotic Constant CGS method Newton method Steepest Descent method Runge-Kutta method

C1 0.857485 0.857485 0.453589 0.785244
C2 0.027425 0.027425 0.454544 0.785678
C3 0.243358 0.243358 0.455502 0.786108
C4 0.003072 0.003072 0.456463 0.786534
C5 0.200003 0.200003 0.457428 0.786955
C6 0.003073 0.003073 0.458396 0.787372
C7 0.200002 0.200002 0.459367 0.787783
C8 0.003073 0.003073 0.460341 0.788190
eISSN:
2956-7068
Idioma:
Inglés
Calendario de la edición:
2 veces al año
Temas de la revista:
Computer Sciences, other, Engineering, Introductions and Overviews, Mathematics, General Mathematics, Physics