root/PythonPackages/OpenOpt/openopt/examples/nlp_1.py

Revision 1039, 2.5 kB (checked in by dmitrey, 4 months ago)

OO examples: changes toward Python3 compatibility

Line  
1 """
2 Example:
3 (x0-5)^2 + (x2-5)^2 + ... +(x149-5)^2 -> min
4
5 subjected to
6
7 # lb<= x <= ub:
8 x4 <= 4
9 8 <= x5 <= 15
10
11 # Ax <= b
12 x0+...+x149 >= 825
13 x9 + x19 <= 3
14 x10+x11 <= 9
15
16 # Aeq x = beq
17 x100+x101 = 11
18
19 # c(x) <= 0
20 2*x0^4-32 <= 0
21 x1^2+x2^2-8 <= 0
22
23 # h(x) = 0
24 (x[149]-1)**6 = 0
25 (x[148]-1.5)**6 = 0
26 """
27
28 from openopt import NLP
29 from numpy import cos, arange, ones, asarray, zeros, mat, array
30 N = 150
31
32 # objective function:
33 f = lambda x: ((x-5)**2).sum()
34
35 # objective function gradient (optional):
36 df = lambda x: 2*(x-5)
37
38 # start point (initial estimation)
39 x0 = 8*cos(arange(N))
40
41 # c(x) <= 0 constraints
42 c = [lambda x: 2* x[0] **4-32, lambda x: x[1]**2+x[2]**2 - 8]
43
44 # dc(x)/dx: non-lin ineq constraints gradients (optional):
45 dc0 = lambda x: [8 * x[0]**3] + [0]*(N-1)
46 dc1 = lambda x: [0, 2 * x[1],  2 * x[2]] + [0]*(N-3)
47 dc = [dc0, dc1]
48
49 # h(x) = 0 constraints
50 def h(x):
51     return (x[N-1]-1)**6, (x[N-2]-1.5)**6
52     # other possible return types: numpy array, matrix, Python list, tuple
53 # or just h = lambda x: [(x[149]-1)**6, (x[148]-1.5)**6]
54
55
56 # dh(x)/dx: non-lin eq constraints gradients (optional):
57 def dh(x):
58     r = zeros((2, N))
59     r[0, -1] = 6*(x[N-1]-1)**5
60     r[1, -2] = 6*(x[N-2]-1.5)**5
61     return r
62    
63 # lower and upper bounds on variables
64 lb = -6*ones(N)
65 ub = 6*ones(N)
66 ub[4] = 4
67 lb[5], ub[5] = 8, 15
68
69 # general linear inequality constraints
70 A = zeros((3, N))
71 A[0, 9] = 1
72 A[0, 19] = 1
73 A[1, 10:12] = 1
74 A[2] = -ones(N)
75 b = [7, 9, -825]
76
77 # general linear equality constraints
78 Aeq = zeros(N)
79 Aeq[100:102] = 1
80 beq = 11
81
82 # required constraints tolerance, default for NLP is 1e-6
83 contol = 1e-7
84
85 # If you use solver algencan, NB! - it ignores xtol and ftol; using maxTime, maxCPUTime, maxIter, maxFunEvals, fEnough is recommended.
86 # Note that in algencan gtol means norm of projected gradient of  the Augmented Lagrangian
87 # so it should be something like 1e-3...1e-5
88 gtol = 1e-7 # (default gtol = 1e-6)
89
90 # Assign problem:
91 # 1st arg - objective function
92 # 2nd arg - start point
93 p = NLP(f, x0, df=df,  c=c,  dc=dc, h=h,  dh=dh,  A=A,  b=b,  Aeq=Aeq,  beq=beq, 
94         lb=lb, ub=ub, gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
95
96 #optional: graphic output, requires pylab (matplotlib)
97 p.plot = True
98
99 #optional: user-supplied 1st derivatives check
100 p.checkdf()
101 p.checkdc()
102 p.checkdh()
103
104 solver = 'ralg'
105 #solver = 'algencan'
106 #solver = 'ipopt'
107 #solver = 'scipy_slsqp'
108
109 # solve the problem
110
111 r = p.solve(solver, plot=0) # string argument is solver name
112
113
114 # r.xf and r.ff are optim point and optim objFun value
115 # r.ff should be something like 132.05
Note: See TracBrowser for help on using the browser.