Chapter 06 - Shape optimization¶
In [1]:
from math import sqrt
import matplotlib.pyplot as plt
import torch
from torchfem import Truss
from utils import plot_contours
plt.rcParams["text.usetex"] = True
plt.rcParams["font.size"] = 14
torch.set_default_dtype(torch.double)
In [2]:
def box_constrained_decent(
func, x_init, x_lower, x_upper, eta=0.1, max_iter=100, tol=1e-10, **extra_args
):
x = x_init.clone().requires_grad_()
for _ in range(max_iter):
x_old = x.clone()
grad = torch.autograd.grad(func(x, **extra_args).sum(), x)[0]
x = x - eta * grad
x = torch.clamp(x, x_lower, x_upper)
if torch.norm(x - x_old) < tol:
return x
return x
In [3]:
def MMA(func, x_k, L_k, U_k):
x_lin = x_k.clone().requires_grad_()
grads = torch.autograd.grad(func(x_lin), x_lin)[0]
neg_grad = grads < 0.0
pos_grad = ~neg_grad
f_k = func(x_k)
def approximation(x):
p = torch.zeros_like(grads)
p[pos_grad] = (U_k[pos_grad] - x_k[pos_grad]) ** 2 * grads[pos_grad]
q = torch.zeros_like(grads)
q[neg_grad] = -((x_k[neg_grad] - L_k[neg_grad]) ** 2) * grads[neg_grad]
return (
f_k
- torch.sum(p / (U_k - x_k) + q / (x_k - L_k))
+ torch.sum(p / (U_k - x) + q / (x - L_k))
)
return approximation, grads
Example 31 - Three bar truss shape optimization¶
In [ ]:
# Structure
nodes = torch.tensor([[1.0, 0.0], [0.0, 0.0], [0.0, 1.0]])
elements = torch.tensor([[0, 1], [0, 2], [1, 2]])
# Truss
three_bar_truss = Truss(nodes.clone(), elements)
# Forces
three_bar_truss.forces[0, 1] = -0.2
# Constraints
three_bar_truss.constraints[1, 0] = True
three_bar_truss.constraints[1, 1] = True
three_bar_truss.constraints[2, 0] = True
# Areas
three_bar_truss.areas[:] = 1.0
three_bar_truss.moduli[:] = 10.0
# Plot
three_bar_truss.plot()
In [5]:
def compute_lengths(truss):
start_nodes = truss.nodes[truss.elements[:, 0]]
end_nodes = truss.nodes[truss.elements[:, 1]]
dx = end_nodes - start_nodes
return torch.linalg.norm(dx, dim=-1)
In [6]:
mask = torch.zeros_like(nodes, dtype=bool)
mask[0, 1] = True
mask[2, 1] = True
# Limits on design variables
x_0 = nodes[mask].ravel()
x_min = x_0 - 0.5
x_max = x_0 + 0.5
# Compute volume restriction
l = compute_lengths(three_bar_truss)
V0 = torch.inner(three_bar_truss.areas, l)
In [ ]:
three_bar_truss.plot()
# Modify masked nodes to maximum values
three_bar_truss.nodes[mask] = x_max
three_bar_truss.plot(default_color="lightgray", node_labels=False)
# Modify masked nodes to minimum values
three_bar_truss.nodes[mask] = x_min
three_bar_truss.plot(default_color="darkgray", node_labels=False)
plt.axis([-0.5, 1.5, -1.0, 1.6])
plt.savefig(
"../figures/three_bar_truss_shapes.svg", transparent=True, bbox_inches="tight"
)
plt.show()
# Reset nodes to initial values
three_bar_truss.nodes = nodes.clone()
In [8]:
def f(xx):
# Compute compliance
three_bar_truss.nodes[mask] = xx.reshape(three_bar_truss.nodes[mask].shape)
u_k, f_k = three_bar_truss.solve()
return torch.inner(f_k.ravel(), u_k.ravel())
def g(xx):
# Compute constraint
three_bar_truss.nodes[mask] = xx.reshape(three_bar_truss.nodes[mask].shape)
return torch.inner(three_bar_truss.areas, compute_lengths(three_bar_truss)) - V0
# Compute initial asymptotes
s = 0.7
x_k = x_0
L_k = x_k - s * (x_max - x_min)
U_k = x_k + s * (x_max - x_min)
# Compute the current approximation function:
f_tilde, _ = MMA(f, x_k, L_k, U_k)
g_tilde, _ = MMA(g, x_k, L_k, U_k)
In [9]:
# Perform function evaluations at all points of the domain
buffer = 0.05
x_1 = torch.linspace(x_min[0] - buffer, x_max[0] + buffer, 150)
x_2 = torch.linspace(x_min[1] - buffer, x_max[1] + buffer, 150)
x = torch.stack(torch.meshgrid(x_1, x_2, indexing="xy"), dim=2)
f_eval = torch.zeros((len(x_1) * len(x_2)))
f_tilde_eval = torch.zeros((len(x_1) * len(x_2)))
for i, xx in enumerate(x.reshape((-1, 2))):
f_eval[i] = f(xx)
f_tilde_eval[i] = f_tilde(xx)
g_eval = torch.zeros((len(x_2), len(x_1)))
g_tilde_eval = torch.zeros((len(x_2), len(x_1)))
for i in range(x.shape[0]):
for j in range(x.shape[1]):
g_eval[i, j] = g(x[i, j])
g_tilde_eval[i, j] = g_tilde(x[i, j])
In [ ]:
# Plot original problem
plot_contours(
x,
f_eval.reshape(x.shape[0], x.shape[1]),
box=[x_min, x_max],
figsize=(4, 4),
)
plt.contour(x_1, x_2, g_eval.detach(), [0.0], colors="k", linewidths=3)
plt.contourf(x_1, x_2, g_eval.detach(), [0, 10], colors="gray", alpha=0.5)
plt.plot([x_0[0]], [x_0[1]], "o", color="black")
plt.annotate("$\\mathbf{x}^0$", (0.02, 1.05), color="black")
plt.axis("equal")
plt.savefig(
"../figures/three_bar_truss_shape_original.svg",
transparent=True,
bbox_inches="tight",
)
In [ ]:
# Plot approximated problem
plot_contours(
x,
f_tilde_eval.reshape(x.shape[0], x.shape[1]),
box=[x_min, x_max],
figsize=(4, 4),
),
plt.contour(x_1, x_2, g_eval.detach(), [0.0], colors="k", linewidths=3)
plt.contourf(x_1, x_2, g_eval.detach(), [0, 10], colors="gray", alpha=0.5)
plt.contour(
x_1, x_2, g_tilde_eval.detach(), [0.0], colors="k", linewidths=3, linestyles="--"
)
plt.contourf(x_1, x_2, g_tilde_eval.detach(), [0, 10], colors="gray", alpha=0.5)
plt.plot([x_0[0]], [x_0[1]], "o", color="black")
plt.annotate("$\\mathbf{x}^0$", (0.02, 1.05), color="black")
plt.axis("equal")
plt.savefig(
"../figures/three_bar_truss_shape_mma.svg", transparent=True, bbox_inches="tight"
)
Optimization¶
In [12]:
def optimize(truss, x_0, x_min, x_max, V_0, mask, iter=3):
L = []
U = []
s = 0.7
x = [x_0]
# Define the initial value, lower bound, and upper bound of "mu"
mu_0 = torch.tensor([0.01])
mu_lower = torch.tensor([1e-10])
mu_upper = torch.tensor([100.0])
def f(xx):
# Solve the truss problem at point x_k
truss.nodes[mask] = xx.reshape(truss.nodes[mask].shape)
u_k, f_k = truss.solve()
return torch.inner(f_k.ravel(), u_k.ravel())
def g(xx):
truss.nodes[mask] = xx.reshape(truss.nodes[mask].shape)
return torch.inner(truss.areas, compute_lengths(truss)) - V_0
# Iterate solutions
for k in range(iter):
# Update asymptotes with heuristic procedure
if k <= 1:
L.append(x[k] - s * (x_max - x_min))
U.append(x[k] + s * (x_max - x_min))
else:
L_k = torch.zeros_like(L[k - 1])
U_k = torch.zeros_like(U[k - 1])
# Shrink all oscillating asymptotes
osci = (x[k] - x[k - 1]) * (x[k - 1] - x[k - 2]) < 0.0
L_k[osci] = x[k][osci] - s * (x[k - 1][osci] - L[k - 1][osci])
U_k[osci] = x[k][osci] + s * (U[k - 1][osci] - x[k - 1][osci])
# Expand all non-oscillating asymptotes
L_k[~osci] = x[k][~osci] - 1 / sqrt(s) * (x[k - 1][~osci] - L[k - 1][~osci])
U_k[~osci] = x[k][~osci] + 1 / sqrt(s) * (U[k - 1][~osci] - x[k - 1][~osci])
L.append(L_k)
U.append(U_k)
# Compute lower move limit in this step
x_min_k = torch.max(x_min, 0.9 * L[k] + 0.1 * x[k])
x_max_k = torch.min(x_max, 0.9 * U[k] + 0.1 * x[k])
# Compute the current approximation function:
f_tilde, f_grad = MMA(f, x[k], L[k], U[k])
g_tilde, g_grad = MMA(g, x[k], L[k], U[k])
# Define the Lagrangian
def lagrangian(xx, mu):
return f_tilde(xx) + mu * g_tilde(xx)
# Define a_star by minimizing the Lagrangian w. r. t. a analytically
def x_star(mu):
x_hat = torch.zeros_like(f_grad)
# Case 1
cond1 = (f_grad > 0.0) & (g_grad > 0.0)
x_hat[cond1] = x_min[cond1]
# Case 2
cond2 = (f_grad < 0.0) & (g_grad < 0.0)
x_hat[cond2] = x_max[cond2]
# Case 3
cond3 = (f_grad > 0.0) & (g_grad < 0.0)
roots = torch.sqrt(
(-mu * g_grad[cond3] * (x[k][cond3] - L[k][cond3]) ** 2)
/ (U[k][cond3] - x[k][cond3]) ** 2
/ f_grad[cond3]
)
x_hat[cond3] = (U[k][cond3] * roots + L[k][cond3]) / (1 + roots)
# Case 4
cond4 = (f_grad < 0.0) & (g_grad > 0.0)
roots = torch.sqrt(
(-mu * g_grad[cond4] * (U[k][cond4] - x[k][cond4]) ** 2)
/ (x[k][cond4] - L[k][cond4]) ** 2
/ f_grad[cond4]
)
x_hat[cond4] = (U[k][cond4] + L[k][cond4] * roots) / (1 + roots)
return torch.clamp(x_hat, x_min_k, x_max_k)
# Define (-1 times) the dual function
def dual_function(mu):
return -lagrangian(x_star(mu), mu)
# Compute the maximum of the dual function
mu_star = box_constrained_decent(
dual_function, mu_0, mu_lower, mu_upper, eta=0.001
)
# # Plot the dual function (just for debuging)
mu = torch.linspace(0.0, 0.1, 50)
dual = [-dual_function(m) for m in mu]
with torch.no_grad():
plt.plot(mu, dual)
plt.axvline(mu_star.item(), color="black")
plt.title(f"Dual function in iteration {k}")
plt.show()
# Compute current optimal point with dual solution
x.append(x_star(mu_star).detach())
return x
Optimization of the three bar truss¶
In [ ]:
x_opt = optimize(three_bar_truss, x_0, x_min, x_max, V0, mask)
In [ ]:
# Plot original problem
plot_contours(
x,
f_eval.reshape(x.shape[0], x.shape[1]),
box=[x_min, x_max],
figsize=(4, 4),
paths={"MMA": x_opt},
opti=[x_opt[-1][0], x_opt[-1][1]],
)
plt.contour(x_1, x_2, g_eval.detach(), [0.0], colors="k", linewidths=3)
plt.contourf(x_1, x_2, g_eval.detach(), [0, 10], colors="gray", alpha=0.5)
plt.plot([x_0[0]], [x_0[1]], "o", color="black")
plt.annotate("$\\mathbf{x}^0$", (0.02, 1.05), color="black")
plt.axis("equal")
plt.savefig(
"../figures/three_bar_truss_shape_optimization.svg",
transparent=True,
bbox_inches="tight",
)
plt.show()
# Modify masked nodes to maximum values
three_bar_truss.nodes[mask] = x_0
three_bar_truss.plot(default_color="lightgray", node_labels=False)
three_bar_truss.nodes[mask] = x_opt[-1]
three_bar_truss.plot()
plt.savefig(
"../figures/three_bar_truss_shape_optimized.svg",
transparent=True,
bbox_inches="tight",
)
Bridge¶
In [ ]:
# Dimensions
A = 17
B = 2
# Nodes
n1 = torch.linspace(0.0, 5.0, A)
n2 = torch.linspace(0.0, 0.5, B)
n1, n2 = torch.stack(torch.meshgrid(n1, n2, indexing="xy"))
nodes = torch.stack([n1.ravel(), n2.ravel()], dim=1)
# Elements
elements = []
for i in range(A - 1):
for j in range(B):
elements.append([i + j * A, i + 1 + j * A])
for i in range(A):
for j in range(B - 1):
elements.append([i + j * A, i + A + j * A])
for i in range(A - 1):
for j in range(B - 1):
elements.append([i + j * A, i + 1 + A + j * A])
elements.append([i + 1 + j * A, i + A + j * A])
elements = torch.tensor(elements)
# Truss
bridge = Truss(nodes.clone(), elements)
# Forces at bottom edge
bridge.forces[1 : A - 1, 1] = -0.1
# Constraints by the supports
bridge.constraints[0, 0] = True
bridge.constraints[0, 1] = True
bridge.constraints[A - 1, 1] = True
# Areas
bridge.areas[:] = 1
bridge.moduli[:] = 500.0
# Mask for design variables.
mask = torch.zeros_like(nodes, dtype=bool)
mask[A : 2 * A, 1] = True
# Limits on design variables
x_0 = nodes[mask].ravel()
x_min = x_0 - 0.4
x_max = x_0 + 0.5
# Compute current volume
l = compute_lengths(bridge)
V0 = torch.inner(bridge.areas, l)
# Optimize
x_opt = optimize(bridge, x_0, x_min, x_max, V0, mask, iter=50)
# Visualize
bridge.nodes[mask] = x_opt[-1].reshape(bridge.nodes[mask].shape)
bridge.plot(node_labels=False)
plt.savefig(
"../figures/bridge_shape_optimized.svg", transparent=True, bbox_inches="tight"
)