Here we briefly explore some of the SciPy tools for constrained optimization.
Table of Contents¶
1. Rosenbrock Function¶
We start with a simple problem of optimizing a function constrained to a rectangular domain so we can plot the iterations:
$$ \min f(x, y), \qquad -r \leq x,y \leq -r. $$As an objective function, we use a modified Rosenbrock function
$$ f(x, y) = (x-x_0)^2 + 100\left(y - y_0\left(\frac{x}{x_0}\right)^2\right)^2. $$To specify the constraints, we need a function g(x)
that returns the inequalities and the objective function f(x)
.
First we demonstrate the SLSQP algorithm, which seems to work very well. This could further be improved by adding an analytic Jacobian, but that can always be done later as part of optimization.
To keep track of everything, we package our methods with a class, and add a few extra features to allow plotting the function with a callback during iterations, and at the end.
from __future__ import division
from IPython.display import display, clear_output
%pylab inline --no-import-all
import mpld3
import scipy.optimize
sp = scipy
class Minimization(object):
a = 100.0
r = 1.0
x = [1.2, 1.6]
x0 = [0, 0]
display = True
def f(self, x):
"""Objective Function"""
self._tries.append(1*x) # For plotting only
x, y = x
x0, y0 = self.x
return (x0 - x)**2 + self.a * (y - y0*(x/x0)**2)**2
def g(self, x):
"""Constraints: should be positive"""
self._tries.append(x) # For plotting only
x, y = x
return [1 - x, x - (-1), 1 - y, y - (-1)]
def minimize(self, **kw):
self._steps = [] # For plotting only
self._tries = [] # For plotting only
self.fig = None # For plotting only
args = dict(
fun=self.f,
x0=self.x0,
callback=self.callback,
constraints=[dict(type='ineq', fun=self.g)])
args.update(kw)
return sp.optimize.minimize(**args)
def callback(self, x=None, _display=False):
"""Callback function to plot attempts."""
if x is not None:
self._steps.append(1*x)
if not _display and not self.display:
return
if not self.fig:
self.fig = plt.figure(figsize=(10,5))
self.lines = [plt.plot([], [], '.', alpha=0.5)[0],
plt.plot([], [], 'o', alpha=0.5)[0]]
x = np.linspace(-1.1, 1.1, 100)
y = np.linspace(-1.1, 1.1, 100)
X, Y = np.meshgrid(x, y)
f = self.f((X, Y)); self._tries.pop()
plt.contour(X, Y, f, 100, colors='gray')
plt.plot([-1, 1, 1, -1, -1], [-1, -1, 1, 1, -1], '-y')
plt.axis([x.min(),x.max(),x.min(),x.max()])
plt.gca().set_aspect(1)
if self._tries:
self.lines[0].set_data(*zip(*self._tries))
if self._steps:
self.lines[1].set_data(*zip(*self._steps))
display(self.fig)
clear_output(wait=True)
def plot(self):
self.callback(_display=True)
m = Minimization()
res = m.minimize(method='SLSQP')
m.plot()
print(res)
mpld3.display()
The COBYLB method alo supports constraints, but performs much more poorly for non-linear functions of this type. Note that the COBYLB method does not support a callback, so we just plot the results at the end.
m.display = False
m.x = [2.0, 2.0]
res = m.minimize(method='COBYLA', options=dict(disp=True))
m.plot()
print(res)
mpld3.display()