Minimal optimization

Minimal optimization#

Pedagogical example of gradient-based optimization with pymiediff: optimize the radius of a homogeneous sphere to maximize scattering at one wavelength.

author: P. Wiecha, 03/2026

imports#

import torch
import pymiediff as pmd

setup#

wl0 = torch.tensor([700.0])  # nm
k0 = 2 * torch.pi / wl0

n_p = 3.5
n_env = 1.0

optimization#

# initial guess
r_opt = torch.tensor([60.0], requires_grad=True)
optimizer = torch.optim.Adam([r_opt], lr=0.5)

for i in range(100):
    optimizer.zero_grad()
    particle = pmd.Particle(
        mat_env=n_env,
        r_layers=r_opt,
        mat_layers=[n_p],
    )
    q_sca = particle.get_cross_sections(k0)["q_sca"]

    loss = -q_sca  # *maximize* scattering: minus sign
    loss.backward()
    optimizer.step()

    print(f"iter {i:02d}: r = {r_opt} nm, Q_sca = {q_sca}")
iter 00: r = tensor([60.5000], requires_grad=True) nm, Q_sca = 0.18496468376820724
iter 01: r = tensor([61.0004], requires_grad=True) nm, Q_sca = 0.1922647180158999
iter 02: r = tensor([61.5014], requires_grad=True) nm, Q_sca = 0.19982345328516576
iter 03: r = tensor([62.0032], requires_grad=True) nm, Q_sca = 0.20765438236250375
iter 04: r = tensor([62.5062], requires_grad=True) nm, Q_sca = 0.21577197071516183
iter 05: r = tensor([63.0105], requires_grad=True) nm, Q_sca = 0.2241917923149316
iter 06: r = tensor([63.5164], requires_grad=True) nm, Q_sca = 0.23293063044173437
iter 07: r = tensor([64.0242], requires_grad=True) nm, Q_sca = 0.24200654921045686
iter 08: r = tensor([64.5340], requires_grad=True) nm, Q_sca = 0.25143907598337584
iter 09: r = tensor([65.0461], requires_grad=True) nm, Q_sca = 0.2612493957647751
iter 10: r = tensor([65.5608], requires_grad=True) nm, Q_sca = 0.27146041138194305
iter 11: r = tensor([66.0783], requires_grad=True) nm, Q_sca = 0.28209673550984043
iter 12: r = tensor([66.5988], requires_grad=True) nm, Q_sca = 0.2931853185049834
iter 13: r = tensor([67.1226], requires_grad=True) nm, Q_sca = 0.30475548849903994
iter 14: r = tensor([67.6498], requires_grad=True) nm, Q_sca = 0.31683932076742033
iter 15: r = tensor([68.1808], requires_grad=True) nm, Q_sca = 0.3294716470915768
iter 16: r = tensor([68.7157], requires_grad=True) nm, Q_sca = 0.3426910002151752
iter 17: r = tensor([69.2548], requires_grad=True) nm, Q_sca = 0.35653943790720294
iter 18: r = tensor([69.7983], requires_grad=True) nm, Q_sca = 0.37106368838784315
iter 19: r = tensor([70.3465], requires_grad=True) nm, Q_sca = 0.3863153521053127
iter 20: r = tensor([70.8996], requires_grad=True) nm, Q_sca = 0.4023519242617624
iter 21: r = tensor([71.4579], requires_grad=True) nm, Q_sca = 0.41923757902768033
iter 22: r = tensor([72.0215], requires_grad=True) nm, Q_sca = 0.437044337043237
iter 23: r = tensor([72.5909], requires_grad=True) nm, Q_sca = 0.4558536327323797
iter 24: r = tensor([73.1661], requires_grad=True) nm, Q_sca = 0.4757571935542218
iter 25: r = tensor([73.7476], requires_grad=True) nm, Q_sca = 0.49685982660856204
iter 26: r = tensor([74.3355], requires_grad=True) nm, Q_sca = 0.5192818377080124
iter 27: r = tensor([74.9303], requires_grad=True) nm, Q_sca = 0.5431612591526228
iter 28: r = tensor([75.5321], requires_grad=True) nm, Q_sca = 0.5686584061637675
iter 29: r = tensor([76.1414], requires_grad=True) nm, Q_sca = 0.5959608459256813
iter 30: r = tensor([76.7585], requires_grad=True) nm, Q_sca = 0.6252886273066243
iter 31: r = tensor([77.3837], requires_grad=True) nm, Q_sca = 0.6569018631098519
iter 32: r = tensor([78.0174], requires_grad=True) nm, Q_sca = 0.6911120958199889
iter 33: r = tensor([78.6601], requires_grad=True) nm, Q_sca = 0.7282933512427576
iter 34: r = tensor([79.3121], requires_grad=True) nm, Q_sca = 0.7688995126762762
iter 35: r = tensor([79.9738], requires_grad=True) nm, Q_sca = 0.8134866685155012
iter 36: r = tensor([80.6456], requires_grad=True) nm, Q_sca = 0.862740505663337
iter 37: r = tensor([81.3281], requires_grad=True) nm, Q_sca = 0.917516886205482
iter 38: r = tensor([82.0214], requires_grad=True) nm, Q_sca = 0.9788937373188918
iter 39: r = tensor([82.7258], requires_grad=True) nm, Q_sca = 1.0482410094854353
iter 40: r = tensor([83.4417], requires_grad=True) nm, Q_sca = 1.1273174327526132
iter 41: r = tensor([84.1688], requires_grad=True) nm, Q_sca = 1.2184055698695975
iter 42: r = tensor([84.9071], requires_grad=True) nm, Q_sca = 1.3244902161388357
iter 43: r = tensor([85.6559], requires_grad=True) nm, Q_sca = 1.4495158036984301
iter 44: r = tensor([86.4145], requires_grad=True) nm, Q_sca = 1.5987247473150903
iter 45: r = tensor([87.1813], requires_grad=True) nm, Q_sca = 1.779127666992308
iter 46: r = tensor([87.9549], requires_grad=True) nm, Q_sca = 2.0001272255688853
iter 47: r = tensor([88.7331], requires_grad=True) nm, Q_sca = 2.2743045110122124
iter 48: r = tensor([89.5141], requires_grad=True) nm, Q_sca = 2.6183608670477514
iter 49: r = tensor([90.2961], requires_grad=True) nm, Q_sca = 3.0540029034789744
iter 50: r = tensor([91.0785], requires_grad=True) nm, Q_sca = 3.6082403123786544
iter 51: r = tensor([91.8618], requires_grad=True) nm, Q_sca = 4.311621710818441
iter 52: r = tensor([92.6484], requires_grad=True) nm, Q_sca = 5.191187622863343
iter 53: r = tensor([93.4430], requires_grad=True) nm, Q_sca = 6.251992344740558
iter 54: r = tensor([94.2508], requires_grad=True) nm, Q_sca = 7.440187549771791
iter 55: r = tensor([95.0711], requires_grad=True) nm, Q_sca = 8.592812220461175
iter 56: r = tensor([95.8785], requires_grad=True) nm, Q_sca = 9.428947711249668
iter 57: r = tensor([96.6079], requires_grad=True) nm, Q_sca = 9.700602482480686
iter 58: r = tensor([97.1940], requires_grad=True) nm, Q_sca = 9.465103498673408
iter 59: r = tensor([97.6151], requires_grad=True) nm, Q_sca = 9.037862595186354
iter 60: r = tensor([97.8813], requires_grad=True) nm, Q_sca = 8.6566177050499
iter 61: r = tensor([98.0119], requires_grad=True) nm, Q_sca = 8.400048455568156
iter 62: r = tensor([98.0261], requires_grad=True) nm, Q_sca = 8.272234501966263
iter 63: r = tensor([97.9406], requires_grad=True) nm, Q_sca = 8.25828987686952
iter 64: r = tensor([97.7697], requires_grad=True) nm, Q_sca = 8.342079487196582
iter 65: r = tensor([97.5263], requires_grad=True) nm, Q_sca = 8.508438449658234
iter 66: r = tensor([97.2230], requires_grad=True) nm, Q_sca = 8.740155701124362
iter 67: r = tensor([96.8734], requires_grad=True) nm, Q_sca = 9.01303696257738
iter 68: r = tensor([96.4937], requires_grad=True) nm, Q_sca = 9.291313928084007
iter 69: r = tensor([96.1047], requires_grad=True) nm, Q_sca = 9.527329549473448
iter 70: r = tensor([95.7317], requires_grad=True) nm, Q_sca = 9.671642442396692
iter 71: r = tensor([95.4031], requires_grad=True) nm, Q_sca = 9.695572192765797
iter 72: r = tensor([95.1444], requires_grad=True) nm, Q_sca = 9.613049284798416
iter 73: r = tensor([94.9721], requires_grad=True) nm, Q_sca = 9.47809526205237
iter 74: r = tensor([94.8916], requires_grad=True) nm, Q_sca = 9.355090971638653
iter 75: r = tensor([94.8985], requires_grad=True) nm, Q_sca = 9.288934717502745
iter 76: r = tensor([94.9821], requires_grad=True) nm, Q_sca = 9.294850101411523
iter 77: r = tensor([95.1274], requires_grad=True) nm, Q_sca = 9.362930199738061
iter 78: r = tensor([95.3168], requires_grad=True) nm, Q_sca = 9.467122227333165
iter 79: r = tensor([95.5312], requires_grad=True) nm, Q_sca = 9.574836422225319
iter 80: r = tensor([95.7509], requires_grad=True) nm, Q_sca = 9.657075489341883
iter 81: r = tensor([95.9575], requires_grad=True) nm, Q_sca = 9.6973270382921
iter 82: r = tensor([96.1353], requires_grad=True) nm, Q_sca = 9.695439585925708
iter 83: r = tensor([96.2733], requires_grad=True) nm, Q_sca = 9.664448988415954
iter 84: r = tensor([96.3651], requires_grad=True) nm, Q_sca = 9.622850901290082
iter 85: r = tensor([96.4087], requires_grad=True) nm, Q_sca = 9.58723204658002
iter 86: r = tensor([96.4060], requires_grad=True) nm, Q_sca = 9.568183405948568
iter 87: r = tensor([96.3615], requires_grad=True) nm, Q_sca = 9.569425787538272
iter 88: r = tensor([96.2823], requires_grad=True) nm, Q_sca = 9.588741309625233
iter 89: r = tensor([96.1768], requires_grad=True) nm, Q_sca = 9.619660783890753
iter 90: r = tensor([96.0551], requires_grad=True) nm, Q_sca = 9.653499254652322
iter 91: r = tensor([95.9278], requires_grad=True) nm, Q_sca = 9.681669217873395
iter 92: r = tensor([95.8056], requires_grad=True) nm, Q_sca = 9.698009176214175
iter 93: r = tensor([95.6982], requires_grad=True) nm, Q_sca = 9.700509783583016
iter 94: r = tensor([95.6136], requires_grad=True) nm, Q_sca = 9.691699260370628
iter 95: r = tensor([95.5572], requires_grad=True) nm, Q_sca = 9.677367074567771
iter 96: r = tensor([95.5316], requires_grad=True) nm, Q_sca = 9.664165788829372
iter 97: r = tensor([95.5361], requires_grad=True) nm, Q_sca = 9.657184720881682
iter 98: r = tensor([95.5675], requires_grad=True) nm, Q_sca = 9.65845350249842
iter 99: r = tensor([95.6206], requires_grad=True) nm, Q_sca = 9.666795208934342

result#

print(f"\nOptimized radius: {r_opt.item():.2f} nm")
print(f"Scattering at {wl0.item():.1f} nm: Q_sca = {q_sca.item():.4f}")
Optimized radius: 95.62 nm
Scattering at 700.0 nm: Q_sca = 9.6668

Total running time of the script: (0 minutes 3.616 seconds)

Estimated memory usage: 649 MB

Gallery generated by Sphinx-Gallery