I am trying to do a simple minimisation as below using SciPy optimise, but the expected results are NOT matching the optimiser output:
x0 = [0.2, 0.2, 0.2, 0.4]
x_expected = np.array([0., 0., 0., 1])
bounds = ((0, 1), (0, 1), (0, 1), (0, 1))
df = pd.DataFrame(
        {'t1': [1, 2, 3, 2, 1], 't2': [3, 4, 5, 6, 2], 't3': [2, 3, 2, 3, 5], 'c': [1, 1.1, 1.2, 1.3, 1.4]}).to_numpy()
    factors = pd.DataFrame(
        {"tk": ["t1", "t2", "t3", "c"], "class": ["x", "y", "x", "z"]})
    min_max_class = pd.DataFrame(
        {"class": ["x", "y", "z"], "min_allocation": [0., 0., 0.], "max_allocation": [0.0001, 0.5, 1.0000]})
    allocation = factors.join(min_max_class.set_index('class'), on='class')
    min = allocation['min_allocation'].to_numpy()
    max = allocation['max_allocation'].to_numpy()
    ineq1 = {'type': 'ineq',
             "fun": lambda x: np.greater_equal(x, min).sum() + np.less_equal(x, max).sum() - 2 * df.shape[1]}
    eq1 = {'type': 'eq', "fun": lambda x: 1 - np.sum(x)}
    result = sp.minimize(fun=lambda x: np.std(np.dot(df, x.T), ddof=1), x0=x0, bounds=bounds, method='SLSQP',
                         options={'disp': True}, constraints=[eq1, ineq1])
    print(f"final result : {np.round(result.x,2)}, objective value: {result.fun}")
    print(f" Manual : {x_expected}, objective value: {np.std(np.dot(df, x_expected.T))}")
I would expect the final results to be close to “x_expected”… but that is not the case.. any ideas ?
Advertisement
Answer
SLSQP solver failed to find the optimal value for your problem.
def obj(x):
    return np.std(np.dot(df, x.T), ddof=1)
def ineq(x):
    # Non-negative
    return np.greater_equal(x, min).sum() + np.less_equal(x, max).sum() - 2 * df.shape[1]
def eq(x):
    # Zero
    return 1 - np.sum(x)
result = scipy.optimize.minimize(fun=obj, x0=x0, bounds=bounds, method='SLSQP', options={'disp': True}, constraints=[eq1, ineq1])
> "Positive directional derivative for linesearch    (Exit mode 8)"
Therefore, inequality constraint does not hold.
x_opt = np.round(result.x,2)
print(f"opt.   result : {x_opt}, objective: {obj(x_opt)}, ineq: {ineq(x_opt)}, eq: {eq(x_opt)}")
print(f"manual result : {x_expected}, objective: {obj(x_expected)}, ineq: {ineq(x_expected)}, eq: {eq(x_expected)}")
opt. result : [0.03 0. 0. 0.97], objective: 0.1554107460891942, ineq: -1, eq: 0.0 manual result : [0. 0. 0. 1.], objective: 0.15811388300841892, ineq: 0, eq: 0.0
Initial guess [0, 0, 0, 0] can help find the optimal value for your problem.
x0 = [0.0, 0.0, 0.0, 0.0]
bounds = ((0, 1), (0, 1), (0, 1), (0, 1))
ineq1 = {'type': 'ineq', "fun": ineq}
eq1 = {'type': 'eq', "fun": eq}
result = scipy.optimize.minimize(fun=obj, x0=x0, bounds=bounds, method='SLSQP', options={'disp': True}, constraints=[eq1, ineq1])
> Optimization terminated successfully    (Exit mode 0)
print(f"opt.   result : {x_opt}, objective: {obj(x_opt)}, ineq: {ineq(x_opt)}, eq: {eq(x_opt)}")
> opt.   result : [0. 0. 0. 1.], objective: 0.15811388300841892, ineq: 0, eq: 0.0