avatar

Language:

English, Chinese

Programming:

Python, Julia, Matlab, LaTex

Links:

      

C++ numerical computation

I need to say that c++ is much faster than python in linear algebra. Here I implement several optimization algorithms in c++ and found that we need the eigen package to finish the computation just like importing packages in python.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
#include<iostream>
#include<math.h>

using namespace std;


//here we define a template function
typedef double(*fun)(double);

class Fast_gradient{
public:
Fast_gradient(fun, fun, double, double);//this is the construction function
double fast_gradient();
double newton();
double armoji(double);
double quasi_newton(double);
double CG(double);
private:
fun target_function;
fun grad_fast_gradient;
double init_x;
double tol;
};

Fast_gradient::Fast_gradient(fun target_function, fun grad_fast_gradient, double init_x, double tol){
this->target_function = target_function;
this->grad_fast_gradient = grad_fast_gradient;
this->init_x = init_x;
this->tol = tol;//
}

double Fast_gradient::newton(){
double current_tol = 1.0;
double x = init_x;
int step = 0;
while(current_tol>tol){
x = init_x - target_function(init_x)/grad_fast_gradient(init_x);
current_tol = abs(x - init_x);
init_x = x;
step += 1;
}
return x, step;
}

double Fast_gradient::armoji(double x){
double init_step = 1.0;
double rho = 0.9;
double current_step = init_step;
// double current_step = init_step * rho;
while (target_function(x)>target_function(x+current_step)){
current_step *= rho;
}
return current_step;
}

double Fast_gradient::fast_gradient(){
double current_tol = 1.0;
double x = init_x;
double direction;
double step;
int iter;
while(current_tol>tol){
direction = - grad_fast_gradient(x);
step = armoji(x);
x = x + step * direction;
current_tol = abs(step * direction);
iter += 1;
}
return x, iter;
}
double target_function(double x){
return 1 - pow(x, 2);
}

double grad_target_function(double x){
return -2*x;
}

int main(){
double x0 = 0.5, x;
double tol = 1e-8;
int step;
Fast_gradient solver(target_function, grad_target_function, x0, tol);
x, step = solver.newton();
printf("Using newton solver to get%f\n", x);
printf("The number of iterations%d", step);
}

I currently finish the first two methods, other methods will be accomplished later.