Instead of solving the problem as a two-stage Benders and a SDDP problem in stage 2, add the first-stage of your Benders as a node to the policy graph, and add the state variables from Benders as state variables to your SDDP model.
If you consider the example from Example: deterministic to stochastic · SDDP.jl, we could extend the problem to have a capacity expansion decision in the first-stage with:
model = SDDP.LinearPolicyGraph(
stages = T + 1,
sense = :Min,
lower_bound = 0.0,
optimizer = HiGHS.Optimizer,
) do sp, t
@variable(sp, x_capacity >= 0, SDDP.State, initial_value = 0)
@variable(sp, x_storage >= 0, SDDP.State, initial_value = reservoir_initial)
if t == 1
# Benders first-stage
@constraint(sp, x_capacity.out <= reservoir_max)
@constraint(sp, x_storage.out == x_storage.in)
@stageobjective(sp, x_capacity.out)
else
# SDDP model
@constraint(sp, x_storage.out <= x_capacity.in)
@constraint(sp, x_capacity.out == x_capacity.in)
@variable(sp, 0 <= u_flow <= flow_max)
@variable(sp, 0 <= u_thermal)
@variable(sp, 0 <= u_spill)
@variable(sp, ω_inflow)
Ω, P = [-2, 0, 5], [0.3, 0.4, 0.3]
SDDP.parameterize(sp, Ω, P) do ω
fix(ω_inflow, data[t, :inflow] + ω)
return
end
@constraint(sp, x_storage.out == x_storage.in - u_flow - u_spill + ω_inflow)
@constraint(sp, u_flow + u_thermal == data[t-1, :demand])
@stageobjective(sp, data[t-1, :cost] * u_thermal)
end
return
end
There’s no need to solve a separate Benders decomposition problem.