From 741bf44126768f792c9b91c8a3bba96a173b9097 Mon Sep 17 00:00:00 2001 From: Bradlee Speice Date: Sun, 10 Nov 2024 16:43:02 -0500 Subject: [PATCH] Deploy website - based on 6dcbc1a72c807487794da173fa5948acd8b76f35 --- .devcontainer/Dockerfile | 6 - .devcontainer/devcontainer.json | 13 - .gitignore | 8 - .nojekyll | 0 .vale.ini | 7 - 2011/11/webpack-industrial-complex/index.html | 32 + 2015/11/autocallable/index.html | 95 + 2015/11/welcome/index.html | 47 + 2015/12/testing-cramer/index.html | 40 + 2016/01/cloudy-in-seattle/index.html | 41 + .../complaining-about-the-weather/index.html | 30 + 2016/02/guaranteed-money-maker/index.html | 75 + .../index.html | 51 + .../index.html | 48 + 2016/03/tweet-like-me/index.html | 59 + 2016/04/tick-tock/index.html | 83 + 2016/05/the-unfair-casino/index.html | 180 ++ .../index.html | 74 + 2016/10/rustic-repodcasting/index.html | 187 ++ 2016/11/pca-audio-compression/index.html | 66 + 2018/01/captains-cookbook-part-1/index.html | 88 + 2018/01/captains-cookbook-part-2/index.html | 75 + 2018/05/hello/index.html | 9 + 2018/06/dateutil-parser-to-rust/index.html | 142 ++ 2018/09/isomorphic-apps/index.html | 184 ++ .../primitives-in-rust-are-weird/index.html | 99 + 2018/10/case-study-optimization/index.html | 92 + 2018/12/allocation-safety/index.html | 77 + .../index.html | 19 + 2019/02/08/compiler-optimizations/index.html | 46 + 2019/02/a-heaping-helping/index.html | 122 + 2019/02/stacking-up/index.html | 210 ++ 2019/02/summary/index.html | 26 + 2019/02/the-whole-world/index.html | 133 + .../index.html | 83 + 2019/05/making-bread/index.html | 29 + 2019/06/high-performance-systems/index.html | 267 ++ 2019/09/binary-format-shootout/index.html | 151 ++ 2019/12/release-the-gil/index.html | 151 ++ 404.html | 25 +- CNAME | 2 +- Gemfile | 29 - Gemfile.lock | 78 - _config.yml | 44 - _includes/footer.html | 23 - _includes/head.html | 7 - _includes/nav.html | 7 - _includes/page_header.html | 15 - _pages/about.md | 13 - _posts/2018-05-28-hello.md | 38 - _posts/2018-06-25-dateutil-parser-to-rust.md | 177 -- ...2018-09-01-primitives-in-rust-are-weird.md | 323 --- _posts/2018-09-15-isomorphic-apps.md | 294 --- _posts/2018-10-08-case-study-optimization.md | 168 -- ...-12-04-what-small-business-really-means.md | 34 - _posts/2018-12-15-allocation-safety.md | 218 -- ...02-04-understanding-allocations-in-rust.md | 113 - _posts/2019-02-05-the-whole-world.md | 337 --- _posts/2019-02-06-stacking-up.md | 601 ----- _posts/2019-02-07-a-heaping-helping.md | 254 -- _posts/2019-02-08-compiler-optimizations.md | 148 -- _posts/2019-02-09-summary.md | 35 - _posts/2019-05-03-making-bread.md | 52 - _posts/2019-06-31-high-performance-systems.md | 296 --- _posts/2019-09-28-binary-format-shootout.md | 263 -- _posts/2019-12-14-release-the-gil.md | 370 --- .../2022-11-20-webpack-industrial-complex.md | 60 - archive/index.html | 1 + assets/css/fonts.css | 15 - assets/css/style.scss | 119 - assets/css/styles.ae6ff4a3.css | 1 + assets/font/JetBrainsMono-Regular.woff | Bin 59368 -> 0 bytes assets/font/JetBrainsMono-Regular.woff2 | Bin 44688 -> 0 bytes assets/font/lato-regular-webfont.woff | Bin 38260 -> 0 bytes assets/font/lato-regular-webfont.woff2 | Bin 30308 -> 0 bytes .../1-0d5e8450555296218deb0517b80440f3.png | Bin 0 -> 119373 bytes .../1-2d6670430a11b01011e4c231ea594db1.png | Bin 0 -> 100659 bytes .../10-b7987a0ff93705d5045057cbdaa2ede9.png | Bin 0 -> 102297 bytes .../2-062e1e47a07f200ff3b1531a02812bc7.png | Bin 0 -> 139195 bytes .../2-46bb7cc9cf739d97050c199eedced1a7.png | Bin 0 -> 96423 bytes .../2018-10-heaptrack/heaptrack-after.png | Bin 72414 -> 0 bytes .../heaptrack-flamegraph.xcf | Bin 585864 -> 0 bytes .../3-2f5c483659f81d741809de6d095bd577.png | Bin 0 -> 113151 bytes .../3-eea635f8cfe4a12ae649ceb6c984e0cd.png | Bin 0 -> 27939 bytes .../4-63dc81954b1604cfa91f4c789da144a5.png | Bin 0 -> 102757 bytes .../4-b4c3dbfa10b1997706bc271ca71e2ff5.png | Bin 0 -> 164167 bytes .../5-8f10acd82b2f025abe57cb93d435a25f.png | Bin 0 -> 139338 bytes .../5-ae210d26729cea1700924579adf2c44c.png | Bin 0 -> 99135 bytes .../6-456ca1125f48947cf3c1c13722af95a0.png | Bin 0 -> 17889 bytes .../6-f07e72ff0b4639453034c75b2e62faba.png | Bin 0 -> 103202 bytes .../7-e0793eed6c42845d8ce4e3e79c1d44d8.png | Bin 0 -> 99213 bytes .../8-3eb2ad63e4c40b6717ee4516223d73ed.png | Bin 0 -> 115307 bytes .../9-630bd32c43e654f068e3c3bea79810e5.png | Bin 0 -> 105406 bytes ..._11_0-1c14b9b64e0cc03bce9e40f936d85202.png | Bin 0 -> 22458 bytes ..._11_0-2d0fe64b876b1c32a095f2d74b128f3c.png | Bin 0 -> 13171 bytes ..._12_0-1106bdfe947224ae55d7227b5a631d0e.svg | 121 + ..._12_0-78c0e39ab1b402672551d197e388d2ba.png | Bin 0 -> 29658 bytes ..._13_0-2d0fe64b876b1c32a095f2d74b128f3c.png | Bin 0 -> 13171 bytes ..._13_0-d109b38c5db52ca12be209a2a268c6eb.png | Bin 0 -> 97459 bytes ..._14_0-8fad23eda4377ce379465c56be3eb022.png | Bin 0 -> 24915 bytes ..._14_1-22c2867e2cee02c45b87635ab4d3b76a.png | Bin 0 -> 18634 bytes ..._16_1-144f4c4021e22c02fe015acc38d26343.png | Bin 0 -> 24316 bytes ..._16_1-eaa9ebc93d4557216c77a63df7df5192.png | Bin 0 -> 15954 bytes ..._18_1-0c204d1f3b296db4c925816140a946f2.png | Bin 0 -> 24537 bytes ..._18_1-f61745c7c099b114becca8acb3175541.png | Bin 0 -> 18530 bytes ..._20_1-76d1356ea34f0db5122ddbeb90dc117c.png | Bin 0 -> 20141 bytes ..._20_1-ea40789d2365d20a0725aca866680217.png | Bin 0 -> 19569 bytes ..._22_1-210909ddb46467b78aa3f65c8ae519fd.png | Bin 0 -> 18865 bytes ..._23_1-86585ab19c818b386afb7ec00dbec595.png | Bin 0 -> 20234 bytes ..._25_1-0db87f90eaf0febd08b4775910528a75.png | Bin 0 -> 23901 bytes ..._26_1-686b3995a84cbcac983b369843d1e222.png | Bin 0 -> 19891 bytes ...k_3_0-6ba22789c3bcc8bd99c64f3fbfa11b30.png | Bin 0 -> 10330 bytes ...k_4_0-46068cab7ea025b2c7ee79ad91eaf317.png | Bin 0 -> 25190 bytes ...k_5_0-34febf65365a147cd218c9266b77e4fb.png | Bin 0 -> 12007 bytes ...k_5_0-85416fdde3eb77aa133be1e7dab17d9f.png | Bin 0 -> 90852 bytes ...k_6_0-46e660e38189a411644eac491e9b35ad.svg | 121 + ...k_7_0-a9df30d31e6b96a01619455d5040eb8b.png | Bin 0 -> 33133 bytes ...k_8_0-f511f25f81b5b7b1baeaef267dd1a2b4.png | Bin 0 -> 25992 bytes ...k_9_0-1bd353a1fa4f84f07d0964b59b00be1e.png | Bin 0 -> 99210 bytes ...read-52bb152a7c29148e837d94bdf1755e1c.jpg} | Bin ...size-7fd54cbb2391e3e7310b0424c5f92cc1.svg} | 0 ...wasm-9ccb2be15a9bed6da44486afc266bad5.png} | Bin ...duct-607f96e84dada915fa422a7e5d524ca1.jpg} | Bin ...ound-4afad8bdb1cd6b0e40dd2fd41adca36f.jpg} | Bin ...fore-11fba190f97831448cc539ebb32fa579.png} | Bin ...seup-12ae3897c033ccb3684a88dd45592e14.png} | Bin ...fter-967bc4596c480bcc9e8410b0a7a64a00.png} | Bin ...ized-e6caf224f50df2dd56981f5b02970325.png} | Bin ...raph-5094664fa79faaf2664b38505c15ac1f.png} | Bin ...fter-cedc4c3519313f5af538364165e92c34.png} | Bin ...ault-26cc411d387f58f50cb548f8e81df1a1.png} | Bin ...ized-cfe5d7d345d32cfc1a0f297580619718.png} | Bin ...ized-a1898beaf28a3997ac86810f872539b7.png} | Bin ...type-a977835e8dcbfdb20fdda3c67ee4f76c.png} | Bin ...-fu-5715f30eef7bf3aaa26770b1247024dc.webp} | Bin ...ocks-6b9a0c44bf45210d496e2ebe2f896e0c.jpg} | Bin ...aves-cea15e9ccef6b180525abaee2d288880.jpg} | Bin ...lass-0b56af0302f7a8c3295bf43cbab77ffe.jpg} | Bin ...mp2t-1decc5fbd88b54dadd06691ce4c629ec.png} | Bin ...burn-630e740c91d090f5790a3f4e103f1142.webp | Bin 0 -> 1762374 bytes ...fold-d7a27f12c1d2be572807105d6d7321f3.jpg} | Bin ...fold-c5a4424f9a5227f1f8e86b13b436782c.jpg} | Bin ...sing-922d19641c91922b7634fff1d6f15e6d.jpg} | Bin assets/js/0fb9ce37.ec625f5c.js | 1 + assets/js/130b4a4b.8e30fd10.js | 1 + assets/js/16c8da5a.6e786399.js | 1 + assets/js/1803684d.778a092f.js | 1 + assets/js/1806d708.22f71128.js | 1 + assets/js/1a1424c7.9077c1c0.js | 1 + assets/js/1b190668.1a035f32.js | 1 + assets/js/1d2da633.f724f156.js | 1 + assets/js/1e5192b9.9888c924.js | 1 + assets/js/1f1953c8.5837c39b.js | 1 + assets/js/1fe257c0.d2e59b40.js | 1 + assets/js/2061.3bbdbc04.js | 1 + assets/js/2062e753.bee6da1d.js | 1 + assets/js/2519.06ba1bd0.js | 4 + assets/js/319b187a.06669ed5.js | 1 + assets/js/33496f92.dec736d4.js | 1 + assets/js/35b21e3d.3e3cc712.js | 1 + assets/js/36994c47.0c1ebe43.js | 1 + assets/js/39c8d8a0.ee5aa2e0.js | 1 + assets/js/3a2ddf2f.21bbad76.js | 1 + assets/js/3aab746c.9a48ca86.js | 1 + assets/js/3cafba32.27890503.js | 1 + assets/js/3d0fb9fd.94683b2b.js | 1 + assets/js/3f9ae9f6.2800ab67.js | 1 + assets/js/4294.a7567dcb.js | 101 + assets/js/47f41a37.e28a1f98.js | 1 + assets/js/4c2b0735.497a037f.js | 1 + assets/js/4cf7e30f.9d25ceb5.js | 1 + assets/js/4dbec139.aa429c61.js | 1 + assets/js/522b09ee.b6dc382a.js | 1 + assets/js/5601.f9142a81.js | 1 + assets/js/5f602fa1.12465c04.js | 1 + assets/js/621db11d.329bb35b.js | 1 + assets/js/6472.40189ba2.js | 1 + assets/js/6fa48b14.90d2bd8d.js | 1 + assets/js/71d18034.0eabaf41.js | 1 + assets/js/724b3f70.bf6a608e.js | 1 + assets/js/72c73938.10945791.js | 1 + assets/js/761aff6b.101e026d.js | 1 + assets/js/76b3b3f5.c2e17148.js | 1 + assets/js/77bf0009.fdc32348.js | 1 + assets/js/78d2eb38.6ac70e04.js | 1 + assets/js/7ba60abf.8ef0d9fb.js | 1 + assets/js/814f3328.1f5daeee.js | 1 + assets/js/818287cf.09f82d49.js | 1 + assets/js/84329d6a.ef47a922.js | 1 + assets/js/857496c7.71360a70.js | 1 + assets/js/85b3a5ed.c46f1e8b.js | 1 + assets/js/868a7989.8d7c3544.js | 1 + assets/js/88eed8c4.1f2da266.js | 1 + assets/js/89fbf712.0cbe55d4.js | 1 + assets/js/8fedb115.e520b846.js | 1 + assets/js/92079dc1.4088067f.js | 1 + assets/js/94d32f6c.bca1abd5.js | 1 + assets/js/9555.2cb431fa.js | 1 + assets/js/962a4168.a5498250.js | 1 + assets/js/975a028b.b8fcc2ff.js | 1 + assets/js/9990.f91a94a2.js | 1 + assets/js/9e4087bc.7ce15d2c.js | 1 + assets/js/a14a666c.bfdacafa.js | 1 + assets/js/a6aa9e1f.1b78d77e.js | 1 + assets/js/a7456010.f1672167.js | 1 + assets/js/acecf23e.31db30f2.js | 1 + assets/js/aea41ef6.9227fb2a.js | 1 + assets/js/b08f0f32.4de38c21.js | 1 + assets/js/b16509ac.cc76897d.js | 1 + assets/js/b266de79.ae25f078.js | 1 + assets/js/b537349a.fd272082.js | 1 + assets/js/b5b60058.690e82eb.js | 1 + assets/js/b5d84c45.bfcf3ca8.js | 1 + assets/js/c32740fe.fac8281f.js | 1 + assets/js/c97f4488.7aeaa095.js | 1 + assets/js/ccc49370.f10e1762.js | 1 + assets/js/cd68b6a4.371cb0e3.js | 1 + assets/js/d085497a.41eaf6d0.js | 1 + assets/js/d185f613.d18d8259.js | 1 + assets/js/d280b035.762e2b8a.js | 1 + assets/js/d7ab2b33.2a9efd2a.js | 1 + assets/js/db76ea4b.9758d9ce.js | 1 + assets/js/dca2e11d.209b089a.js | 1 + assets/js/de854ad9.70027cd9.js | 1 + assets/js/de863535.9c2d8ada.js | 1 + assets/js/e0aaf982.98e12c9e.js | 1 + assets/js/e37dfb5c.04d97c35.js | 1 + assets/js/e62372be.2024c2a5.js | 1 + assets/js/ed9b7162.911627e7.js | 1 + assets/js/ef7aa1ca.f98a4c92.js | 1 + assets/js/f2eb9457.6fac4cda.js | 1 + assets/js/f8fee0f7.90b05631.js | 1 + assets/js/fd7e7e63.09b6c6e9.js | 1 + assets/js/main.62ce6156.js | 36 + assets/js/runtime~main.751b419d.js | 1 + .../1-bc356a416dae6236d2e366a42bee2cd3.wav | Bin 0 -> 882044 bytes .../2-bc356a416dae6236d2e366a42bee2cd3.wav | Bin 0 -> 882044 bytes .../3-e8092f56b531e18a0d335c0f391b46b9.wav | Bin 0 -> 882044 bytes .../4-90047e615651067970475dc7f117aceb.wav | Bin 0 -> 882044 bytes .../5-896767515da7b5a0fe46e9a205c1130f.wav | Bin 0 -> 882044 bytes .../6-756ec27a28b4fa02181f43ed9061f0b3.wav | Bin 0 -> 882044 bytes atom.css | 75 + atom.xml | 2242 +++++++++++++++++ atom.xsl | 92 + authors/index.html | 1 + feed.xml/index.html | 1 + img/favicon.ico | Bin 0 -> 15406 bytes img/logo-dark.svg | 148 ++ img/logo.svg | 148 ++ index.html | 60 + index.md | 6 - lunr-index-1731274975527.json | 1 + lunr-index.json | 1 + page/2/index.html | 44 + page/3/index.html | 2 + page/4/index.html | 1 + rss.css | 75 + rss.xml | 2187 ++++++++++++++++ rss.xsl | 86 + search-doc-1731274975527.json | 1 + search-doc.json | 1 + sitemap.xml | 1 + 262 files changed, 8754 insertions(+), 4196 deletions(-) delete mode 100644 .devcontainer/Dockerfile delete mode 100644 .devcontainer/devcontainer.json delete mode 100644 .gitignore create mode 100644 .nojekyll delete mode 100644 .vale.ini create mode 100644 2011/11/webpack-industrial-complex/index.html create mode 100644 2015/11/autocallable/index.html create mode 100644 2015/11/welcome/index.html create mode 100644 2015/12/testing-cramer/index.html create mode 100644 2016/01/cloudy-in-seattle/index.html create mode 100644 2016/01/complaining-about-the-weather/index.html create mode 100644 2016/02/guaranteed-money-maker/index.html create mode 100644 2016/02/profitability-using-the-investment-formula/index.html create mode 100644 2016/03/predicting-santander-customer-happiness/index.html create mode 100644 2016/03/tweet-like-me/index.html create mode 100644 2016/04/tick-tock/index.html create mode 100644 2016/05/the-unfair-casino/index.html create mode 100644 2016/06/event-studies-and-earnings-releases/index.html create mode 100644 2016/10/rustic-repodcasting/index.html create mode 100644 2016/11/pca-audio-compression/index.html create mode 100644 2018/01/captains-cookbook-part-1/index.html create mode 100644 2018/01/captains-cookbook-part-2/index.html create mode 100644 2018/05/hello/index.html create mode 100644 2018/06/dateutil-parser-to-rust/index.html create mode 100644 2018/09/isomorphic-apps/index.html create mode 100644 2018/09/primitives-in-rust-are-weird/index.html create mode 100644 2018/10/case-study-optimization/index.html create mode 100644 2018/12/allocation-safety/index.html create mode 100644 2018/12/what-small-business-really-means/index.html create mode 100644 2019/02/08/compiler-optimizations/index.html create mode 100644 2019/02/a-heaping-helping/index.html create mode 100644 2019/02/stacking-up/index.html create mode 100644 2019/02/summary/index.html create mode 100644 2019/02/the-whole-world/index.html create mode 100644 2019/02/understanding-allocations-in-rust/index.html create mode 100644 2019/05/making-bread/index.html create mode 100644 2019/06/high-performance-systems/index.html create mode 100644 2019/09/binary-format-shootout/index.html create mode 100644 2019/12/release-the-gil/index.html delete mode 100644 Gemfile delete mode 100644 Gemfile.lock delete mode 100644 _config.yml delete mode 100644 _includes/footer.html delete mode 100644 _includes/head.html delete mode 100644 _includes/nav.html delete mode 100644 _includes/page_header.html delete mode 100644 _pages/about.md delete mode 100644 _posts/2018-05-28-hello.md delete mode 100644 _posts/2018-06-25-dateutil-parser-to-rust.md delete mode 100644 _posts/2018-09-01-primitives-in-rust-are-weird.md delete mode 100644 _posts/2018-09-15-isomorphic-apps.md delete mode 100644 _posts/2018-10-08-case-study-optimization.md delete mode 100644 _posts/2018-12-04-what-small-business-really-means.md delete mode 100644 _posts/2018-12-15-allocation-safety.md delete mode 100644 _posts/2019-02-04-understanding-allocations-in-rust.md delete mode 100644 _posts/2019-02-05-the-whole-world.md delete mode 100644 _posts/2019-02-06-stacking-up.md delete mode 100644 _posts/2019-02-07-a-heaping-helping.md delete mode 100644 _posts/2019-02-08-compiler-optimizations.md delete mode 100644 _posts/2019-02-09-summary.md delete mode 100644 _posts/2019-05-03-making-bread.md delete mode 100644 _posts/2019-06-31-high-performance-systems.md delete mode 100644 _posts/2019-09-28-binary-format-shootout.md delete mode 100644 _posts/2019-12-14-release-the-gil.md delete mode 100644 _posts/2022-11-20-webpack-industrial-complex.md create mode 100644 archive/index.html delete mode 100644 assets/css/fonts.css delete mode 100644 assets/css/style.scss create mode 100644 assets/css/styles.ae6ff4a3.css delete mode 100644 assets/font/JetBrainsMono-Regular.woff delete mode 100644 assets/font/JetBrainsMono-Regular.woff2 delete mode 100644 assets/font/lato-regular-webfont.woff delete mode 100644 assets/font/lato-regular-webfont.woff2 create mode 100644 assets/images/1-0d5e8450555296218deb0517b80440f3.png create mode 100644 assets/images/1-2d6670430a11b01011e4c231ea594db1.png create mode 100644 assets/images/10-b7987a0ff93705d5045057cbdaa2ede9.png create mode 100644 assets/images/2-062e1e47a07f200ff3b1531a02812bc7.png create mode 100644 assets/images/2-46bb7cc9cf739d97050c199eedced1a7.png delete mode 100644 assets/images/2018-10-heaptrack/heaptrack-after.png delete mode 100644 assets/images/2018-10-heaptrack/heaptrack-flamegraph.xcf create mode 100644 assets/images/3-2f5c483659f81d741809de6d095bd577.png create mode 100644 assets/images/3-eea635f8cfe4a12ae649ceb6c984e0cd.png create mode 100644 assets/images/4-63dc81954b1604cfa91f4c789da144a5.png create mode 100644 assets/images/4-b4c3dbfa10b1997706bc271ca71e2ff5.png create mode 100644 assets/images/5-8f10acd82b2f025abe57cb93d435a25f.png create mode 100644 assets/images/5-ae210d26729cea1700924579adf2c44c.png create mode 100644 assets/images/6-456ca1125f48947cf3c1c13722af95a0.png create mode 100644 assets/images/6-f07e72ff0b4639453034c75b2e62faba.png create mode 100644 assets/images/7-e0793eed6c42845d8ce4e3e79c1d44d8.png create mode 100644 assets/images/8-3eb2ad63e4c40b6717ee4516223d73ed.png create mode 100644 assets/images/9-630bd32c43e654f068e3c3bea79810e5.png create mode 100644 assets/images/_notebook_11_0-1c14b9b64e0cc03bce9e40f936d85202.png create mode 100644 assets/images/_notebook_11_0-2d0fe64b876b1c32a095f2d74b128f3c.png create mode 100644 assets/images/_notebook_12_0-1106bdfe947224ae55d7227b5a631d0e.svg create mode 100644 assets/images/_notebook_12_0-78c0e39ab1b402672551d197e388d2ba.png create mode 100644 assets/images/_notebook_13_0-2d0fe64b876b1c32a095f2d74b128f3c.png create mode 100644 assets/images/_notebook_13_0-d109b38c5db52ca12be209a2a268c6eb.png create mode 100644 assets/images/_notebook_14_0-8fad23eda4377ce379465c56be3eb022.png create mode 100644 assets/images/_notebook_14_1-22c2867e2cee02c45b87635ab4d3b76a.png create mode 100644 assets/images/_notebook_16_1-144f4c4021e22c02fe015acc38d26343.png create mode 100644 assets/images/_notebook_16_1-eaa9ebc93d4557216c77a63df7df5192.png create mode 100644 assets/images/_notebook_18_1-0c204d1f3b296db4c925816140a946f2.png create mode 100644 assets/images/_notebook_18_1-f61745c7c099b114becca8acb3175541.png create mode 100644 assets/images/_notebook_20_1-76d1356ea34f0db5122ddbeb90dc117c.png create mode 100644 assets/images/_notebook_20_1-ea40789d2365d20a0725aca866680217.png create mode 100644 assets/images/_notebook_22_1-210909ddb46467b78aa3f65c8ae519fd.png create mode 100644 assets/images/_notebook_23_1-86585ab19c818b386afb7ec00dbec595.png create mode 100644 assets/images/_notebook_25_1-0db87f90eaf0febd08b4775910528a75.png create mode 100644 assets/images/_notebook_26_1-686b3995a84cbcac983b369843d1e222.png create mode 100644 assets/images/_notebook_3_0-6ba22789c3bcc8bd99c64f3fbfa11b30.png create mode 100644 assets/images/_notebook_4_0-46068cab7ea025b2c7ee79ad91eaf317.png create mode 100644 assets/images/_notebook_5_0-34febf65365a147cd218c9266b77e4fb.png create mode 100644 assets/images/_notebook_5_0-85416fdde3eb77aa133be1e7dab17d9f.png create mode 100644 assets/images/_notebook_6_0-46e660e38189a411644eac491e9b35ad.svg create mode 100644 assets/images/_notebook_7_0-a9df30d31e6b96a01619455d5040eb8b.png create mode 100644 assets/images/_notebook_8_0-f511f25f81b5b7b1baeaef267dd1a2b4.png create mode 100644 assets/images/_notebook_9_0-1bd353a1fa4f84f07d0964b59b00be1e.png rename assets/images/{2018-05-28-bread.jpg => bread-52bb152a7c29148e837d94bdf1755e1c.jpg} (100%) rename assets/images/{2019-02-04-container-size.svg => container-size-7fd54cbb2391e3e7310b0424c5f92cc1.svg} (100%) rename assets/images/{2018-09-15-electron-percy-wasm.png => electron-percy-wasm-9ccb2be15a9bed6da44486afc266bad5.png} (100%) rename assets/images/{2019-05-03-making-bread/final-product.jpg => final-product-607f96e84dada915fa422a7e5d524ca1.jpg} (100%) rename assets/images/{2018-06-25-gravel-mound.jpg => gravel-mound-4afad8bdb1cd6b0e40dd2fd41adca36f.jpg} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-before.png => heaptrack-before-11fba190f97831448cc539ebb32fa579.png} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-closeup.png => heaptrack-closeup-12ae3897c033ccb3684a88dd45592e14.png} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-closeup-after.png => heaptrack-closeup-after-967bc4596c480bcc9e8410b0a7a64a00.png} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-dtparse-colorized.png => heaptrack-dtparse-colorized-e6caf224f50df2dd56981f5b02970325.png} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-flamegraph.png => heaptrack-flamegraph-5094664fa79faaf2664b38505c15ac1f.png} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-flamegraph-after.png => heaptrack-flamegraph-after-cedc4c3519313f5af538364165e92c34.png} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-flamegraph-default.png => heaptrack-flamegraph-default-26cc411d387f58f50cb548f8e81df1a1.png} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-main-colorized.png => heaptrack-main-colorized-cfe5d7d345d32cfc1a0f297580619718.png} (100%) rename assets/images/{2018-10-heaptrack/heaptrack-parseinfo-colorized.png => heaptrack-parseinfo-colorized-a1898beaf28a3997ac86810f872539b7.png} (100%) rename assets/images/{2018-09-15-incorrect-MIME-type.png => incorrect-MIME-type-a977835e8dcbfdb20fdda3c67ee4f76c.png} (100%) rename assets/images/{2019-04-24-kung-fu.webp => kung-fu-5715f30eef7bf3aaa26770b1247024dc.webp} (100%) rename assets/images/{2018-05-28-rocks.jpg => rocks-6b9a0c44bf45210d496e2ebe2f896e0c.jpg} (100%) rename assets/images/{2019-05-03-making-bread/shaped-loaves.jpg => shaped-loaves-cea15e9ccef6b180525abaee2d288880.jpg} (100%) rename assets/images/{2019-05-03-making-bread/shattered-glass.jpg => shattered-glass-0b56af0302f7a8c3295bf43cbab77ffe.jpg} (100%) rename assets/images/{2022-11-20-video_mp2t.png => video_mp2t-1decc5fbd88b54dadd06691ce4c629ec.png} (100%) create mode 100644 assets/images/watch-the-world-burn-630e740c91d090f5790a3f4e103f1142.webp rename assets/images/{2019-05-03-making-bread/white-dough-rising-after-fold.jpg => white-dough-rising-after-fold-d7a27f12c1d2be572807105d6d7321f3.jpg} (100%) rename assets/images/{2019-05-03-making-bread/white-dough-rising-before-fold.jpg => white-dough-rising-before-fold-c5a4424f9a5227f1f8e86b13b436782c.jpg} (100%) rename assets/images/{2019-05-03-making-bread/whole-wheat-not-rising.jpg => whole-wheat-not-rising-922d19641c91922b7634fff1d6f15e6d.jpg} (100%) create mode 100644 assets/js/0fb9ce37.ec625f5c.js create mode 100644 assets/js/130b4a4b.8e30fd10.js create mode 100644 assets/js/16c8da5a.6e786399.js create mode 100644 assets/js/1803684d.778a092f.js create mode 100644 assets/js/1806d708.22f71128.js create mode 100644 assets/js/1a1424c7.9077c1c0.js create mode 100644 assets/js/1b190668.1a035f32.js create mode 100644 assets/js/1d2da633.f724f156.js create mode 100644 assets/js/1e5192b9.9888c924.js create mode 100644 assets/js/1f1953c8.5837c39b.js create mode 100644 assets/js/1fe257c0.d2e59b40.js create mode 100644 assets/js/2061.3bbdbc04.js create mode 100644 assets/js/2062e753.bee6da1d.js create mode 100644 assets/js/2519.06ba1bd0.js create mode 100644 assets/js/319b187a.06669ed5.js create mode 100644 assets/js/33496f92.dec736d4.js create mode 100644 assets/js/35b21e3d.3e3cc712.js create mode 100644 assets/js/36994c47.0c1ebe43.js create mode 100644 assets/js/39c8d8a0.ee5aa2e0.js create mode 100644 assets/js/3a2ddf2f.21bbad76.js create mode 100644 assets/js/3aab746c.9a48ca86.js create mode 100644 assets/js/3cafba32.27890503.js create mode 100644 assets/js/3d0fb9fd.94683b2b.js create mode 100644 assets/js/3f9ae9f6.2800ab67.js create mode 100644 assets/js/4294.a7567dcb.js create mode 100644 assets/js/47f41a37.e28a1f98.js create mode 100644 assets/js/4c2b0735.497a037f.js create mode 100644 assets/js/4cf7e30f.9d25ceb5.js create mode 100644 assets/js/4dbec139.aa429c61.js create mode 100644 assets/js/522b09ee.b6dc382a.js create mode 100644 assets/js/5601.f9142a81.js create mode 100644 assets/js/5f602fa1.12465c04.js create mode 100644 assets/js/621db11d.329bb35b.js create mode 100644 assets/js/6472.40189ba2.js create mode 100644 assets/js/6fa48b14.90d2bd8d.js create mode 100644 assets/js/71d18034.0eabaf41.js create mode 100644 assets/js/724b3f70.bf6a608e.js create mode 100644 assets/js/72c73938.10945791.js create mode 100644 assets/js/761aff6b.101e026d.js create mode 100644 assets/js/76b3b3f5.c2e17148.js create mode 100644 assets/js/77bf0009.fdc32348.js create mode 100644 assets/js/78d2eb38.6ac70e04.js create mode 100644 assets/js/7ba60abf.8ef0d9fb.js create mode 100644 assets/js/814f3328.1f5daeee.js create mode 100644 assets/js/818287cf.09f82d49.js create mode 100644 assets/js/84329d6a.ef47a922.js create mode 100644 assets/js/857496c7.71360a70.js create mode 100644 assets/js/85b3a5ed.c46f1e8b.js create mode 100644 assets/js/868a7989.8d7c3544.js create mode 100644 assets/js/88eed8c4.1f2da266.js create mode 100644 assets/js/89fbf712.0cbe55d4.js create mode 100644 assets/js/8fedb115.e520b846.js create mode 100644 assets/js/92079dc1.4088067f.js create mode 100644 assets/js/94d32f6c.bca1abd5.js create mode 100644 assets/js/9555.2cb431fa.js create mode 100644 assets/js/962a4168.a5498250.js create mode 100644 assets/js/975a028b.b8fcc2ff.js create mode 100644 assets/js/9990.f91a94a2.js create mode 100644 assets/js/9e4087bc.7ce15d2c.js create mode 100644 assets/js/a14a666c.bfdacafa.js create mode 100644 assets/js/a6aa9e1f.1b78d77e.js create mode 100644 assets/js/a7456010.f1672167.js create mode 100644 assets/js/acecf23e.31db30f2.js create mode 100644 assets/js/aea41ef6.9227fb2a.js create mode 100644 assets/js/b08f0f32.4de38c21.js create mode 100644 assets/js/b16509ac.cc76897d.js create mode 100644 assets/js/b266de79.ae25f078.js create mode 100644 assets/js/b537349a.fd272082.js create mode 100644 assets/js/b5b60058.690e82eb.js create mode 100644 assets/js/b5d84c45.bfcf3ca8.js create mode 100644 assets/js/c32740fe.fac8281f.js create mode 100644 assets/js/c97f4488.7aeaa095.js create mode 100644 assets/js/ccc49370.f10e1762.js create mode 100644 assets/js/cd68b6a4.371cb0e3.js create mode 100644 assets/js/d085497a.41eaf6d0.js create mode 100644 assets/js/d185f613.d18d8259.js create mode 100644 assets/js/d280b035.762e2b8a.js create mode 100644 assets/js/d7ab2b33.2a9efd2a.js create mode 100644 assets/js/db76ea4b.9758d9ce.js create mode 100644 assets/js/dca2e11d.209b089a.js create mode 100644 assets/js/de854ad9.70027cd9.js create mode 100644 assets/js/de863535.9c2d8ada.js create mode 100644 assets/js/e0aaf982.98e12c9e.js create mode 100644 assets/js/e37dfb5c.04d97c35.js create mode 100644 assets/js/e62372be.2024c2a5.js create mode 100644 assets/js/ed9b7162.911627e7.js create mode 100644 assets/js/ef7aa1ca.f98a4c92.js create mode 100644 assets/js/f2eb9457.6fac4cda.js create mode 100644 assets/js/f8fee0f7.90b05631.js create mode 100644 assets/js/fd7e7e63.09b6c6e9.js create mode 100644 assets/js/main.62ce6156.js create mode 100644 assets/js/runtime~main.751b419d.js create mode 100644 assets/medias/1-bc356a416dae6236d2e366a42bee2cd3.wav create mode 100644 assets/medias/2-bc356a416dae6236d2e366a42bee2cd3.wav create mode 100644 assets/medias/3-e8092f56b531e18a0d335c0f391b46b9.wav create mode 100644 assets/medias/4-90047e615651067970475dc7f117aceb.wav create mode 100644 assets/medias/5-896767515da7b5a0fe46e9a205c1130f.wav create mode 100644 assets/medias/6-756ec27a28b4fa02181f43ed9061f0b3.wav create mode 100644 atom.css create mode 100644 atom.xml create mode 100644 atom.xsl create mode 100644 authors/index.html create mode 100644 feed.xml/index.html create mode 100644 img/favicon.ico create mode 100644 img/logo-dark.svg create mode 100644 img/logo.svg create mode 100644 index.html delete mode 100644 index.md create mode 100644 lunr-index-1731274975527.json create mode 100644 lunr-index.json create mode 100644 page/2/index.html create mode 100644 page/3/index.html create mode 100644 page/4/index.html create mode 100644 rss.css create mode 100644 rss.xml create mode 100644 rss.xsl create mode 100644 search-doc-1731274975527.json create mode 100644 search-doc.json create mode 100644 sitemap.xml diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index 7159a0b..0000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM mcr.microsoft.com/vscode/devcontainers/ruby:0-2.7-bullseye - -RUN wget https://github.com/errata-ai/vale/releases/download/v2.21.0/vale_2.21.0_Linux_64-bit.tar.gz -O /tmp/vale.tar.gz \ - && cd /usr/local/bin \ - && tar xf /tmp/vale.tar.gz \ - && rm /tmp/vale.tar.gz \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json deleted file mode 100644 index 1d206de..0000000 --- a/.devcontainer/devcontainer.json +++ /dev/null @@ -1,13 +0,0 @@ -// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: -// https://github.com/microsoft/vscode-dev-containers/tree/v0.245.0/containers/ruby -{ - "name": "Ruby", - "build": { - "dockerfile": "Dockerfile" - }, - "runArgs": ["--userns=keep-id"], - - "remoteUser": "vscode", - "containerUser": "vscode", - "workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/${localWorkspaceFolderBasename},type=bind,Z" -} diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 508b6b2..0000000 --- a/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -_site/ -.swp -.sass-cache/ -.jekyll-metadata -.bundle/ -vendor/ -.styles/ -.vscode/ diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/.vale.ini b/.vale.ini deleted file mode 100644 index 777f626..0000000 --- a/.vale.ini +++ /dev/null @@ -1,7 +0,0 @@ -StylesPath = .styles -MinAlertLevel = suggestion -Packages = Microsoft, write-good - -[*] -BasedOnStyles = Vale, Microsoft, write-good -write-good.E-Prime = NO \ No newline at end of file diff --git a/2011/11/webpack-industrial-complex/index.html b/2011/11/webpack-industrial-complex/index.html new file mode 100644 index 0000000..64e0d9c --- /dev/null +++ b/2011/11/webpack-industrial-complex/index.html @@ -0,0 +1,32 @@ +The webpack industrial complex | The Old Speice Guy
Skip to main content

The webpack industrial complex

· 5 min read
Bradlee Speice

This started because I wanted to build a synthesizer. Setting a goal of "digital DX7" was ambitious, but I needed something unrelated to the day job. Beyond that, working with audio seemed like a good challenge. I enjoy performance-focused code, and performance problems in audio are conspicuous. Building a web project was an obvious choice because of the web audio API documentation and independence from a large Digital Audio Workstation (DAW).

+

The project was soon derailed trying to sort out technical issues unrelated to the original purpose. Finding a resolution was a frustrating journey, and it's still not clear whether those problems were my fault. As a result, I'm writing this to try making sense of it, as a case study/reference material, and to salvage something from the process.

+

Starting strong

+

The sole starting requirement was to write everything in TypeScript. Not because of project scale, but because guardrails help with unfamiliar territory. Keeping that in mind, the first question was: how does one start a new project? All I actually need is "compile TypeScript, show it in a browser."

+

Create React App (CRA) came to the rescue and the rest of that evening was a joy. My TypeScript/JavaScript skills were rusty, but the online documentation was helpful. I had never understood the appeal of JSX (why put a DOM in JavaScript?) until it made connecting an onEvent handler and a function easy.

+

Some quick dimensional analysis later and there was a sine wave oscillator playing A=440 through the speakers. I specifically remember thinking "modern browsers are magical."

+

Continuing on

+

Now comes the first mistake: I began to worry about "scale" before encountering an actual problem. Rather than rendering audio in the main thread, why not use audio worklets and render in a background thread instead?

+

The first sign something was amiss came from the TypeScript compiler errors showing the audio worklet API was missing. After searching out Github issues and (unsuccessfully) tweaking the .tsconfig settings, I settled on installing a package and moving on.

+

The next problem came from actually using the API. Worklets must load from separate "modules," but it wasn't clear how to guarantee the worklet code stayed separate from the application. I saw recommendations to use new URL(<local path>, import.meta.url) and it worked! Well, kind of:

+

Browser error

+

That file has the audio processor code, so why does it get served with Content-Type: video/mp2t?

+

Floundering about

+

Now comes the second mistake: even though I didn't understand the error, I ignored recommendations to just use JavaScript and stuck by the original TypeScript requirement.

+

I tried different project structures. Moving the worklet code to a new folder didn't help, nor did setting up a monorepo and placing it in a new package.

+

I tried three different CRA tools - react-app-rewired, craco, customize-react-app - but got the same problem. Each has varying levels of compatibility with recent CRA versions, so it wasn't clear if I had the right solution but implemented it incorrectly. After attempting to eject the application and panicking after seeing the configuration, I abandoned that as well.

+

I tried changing the webpack configuration: using new loaders, setting asset rules, even changing how webpack detects worker resources. In hindsight, entry points may have been the answer. But because CRA actively resists attempts to change its webpack configuration, and I couldn't find audio worklet examples in any other framework, I gave up.

+

I tried so many application frameworks. Next.js looked like a good candidate, but added its own bespoke webpack complexity to the existing confusion. Astro had the best "getting started" experience, but I refuse to install an IDE-specific plugin. I first used Deno while exploring Lume, but it couldn't import the audio worklet types (maybe because of module compatibility?). Each framework was unique in its own way (shout-out to SvelteKit) but I couldn't figure out how to make them work.

+

Learning and reflecting

+

I ended up using Vite and vite-plugin-react-pages to handle both "build the app" and "bundle worklets," but the specific tool choice isn't important. Instead, the focus should be on lessons learned.

+

For myself:

+
    +
  • I'm obsessed with tooling, to the point it can derail the original goal. While it comes from a good place (for example: "types are awesome"), it can get in the way of more important work
  • +
  • I tend to reach for online resources right after seeing a new problem. While finding help online is often faster, spending time understanding the problem would have been more productive than cycling through (often outdated) blog posts
  • +
+

For the tools:

+
    +
  • Resource bundling is great and solves a genuine challenge. I've heard too many horror stories of developers writing modules by hand to believe this is unnecessary complexity
  • +
  • Webpack is a build system and modern frameworks are deeply dependent on it (hence the "webpack industrial complex"). While this often saves users from unnecessary complexity, there's no path forward if something breaks
  • +
  • There's little ability to mix and match tools across frameworks. Next.js and Gatsby let users extend webpack, but because each framework adds its own modules, changes aren't portable. After spending a week looking at webpack, I had an example running with parcel in thirty minutes, but couldn't integrate it
  • +
+

In the end, learning new systems is fun, but a focus on tools that "just work" can leave users out in the cold if they break down.

\ No newline at end of file diff --git a/2015/11/autocallable/index.html b/2015/11/autocallable/index.html new file mode 100644 index 0000000..5530117 --- /dev/null +++ b/2015/11/autocallable/index.html @@ -0,0 +1,95 @@ +Autocallable Bonds | The Old Speice Guy
Skip to main content

Autocallable Bonds

· 12 min read
Bradlee Speice

For a final project, my group was tasked with understanding three exotic derivatives: The Athena, Phoenix without memory, and Phoenix with memory autocallable products.

+

My only non-core class this semester has been in Structure Products. We've been surveying a wide variety of products, and the final project was to pick one to report on. +Because these are all very similar, we decided to demonstrate all 3 products at once.

+

What follows below is a notebook demonstrating the usage of Julia for Monte-Carlo simulation of some exotic products.

+
+
using Gadfly
+

Athena/Phoenix Simulation

+

Underlying simulation

+

In order to price the autocallable bonds, we need to simulate the underlying assets. Let's go ahead and set up the simulation first, as this lays the foundation for what we're trying to do. We're going to use JNJ as the basis for our simulation. This implies the following parameters:

+
    +
  • S0S_0 = $102.2 (as of time of writing)
  • +
  • qq = 2.84%
  • +
  • rr = [.49, .9, 1.21, 1.45, 1.69] (term structure as of time of writing, linear interpolation)
  • +
  • μ\mu = rqr - q (note that this implies a negative drift because of current low rates)
  • +
  • σ\sigma = σimp\sigma_{imp} = 15.62% (from VIX implied volatility)
  • +
+

We additionally define some parameters for simulation:

+
    +
  • T: The number of years to simulate
  • +
  • m: The number of paths to simulate
  • +
  • n: The number of steps to simulate in a year
  • +
+
S0 = 102.2
nominal = 100
q = 2.84 / 100
σ = 15.37 / 100
term = [0, .49, .9, 1.21, 1.45, 1.69] / 100 + 1

###
# Potential: Based on PEP
# S0 = 100.6
# σ = 14.86
# q = 2.7
###

# Simulation parameters
T = 5 # Using years as the unit of time
n = 250 # simulations per year
m = 100000 # paths
num_simulations = 5; # simulation rounds per price
+

Defining the simulation

+

To make things simpler, we simulate a single year at a time. This allows us to easily add in a dividend policy without too much difficulty, and update the simulation every year to match the term structure. The underlying uses GBM for simulation between years.

+
simulate_gbm = function(S0, μ, σ, T, n)
# Set the initial state
m = length(S0)
t = T / n
motion = zeros(m, n)
motion[:,1] = S0

# Build out all states
for i=1:(n-1)
motion[:,i+1] = motion[:,i] .* exp((μ - σ^2/2)*t) .* exp(sqrt(t) * σ .* randn(m))
end

return motion
end

function display_motion(motion, T)
# Given a matrix of paths, display the motion
n = length(motion[1,:])
m = length(motion[:,1])
x = repmat(1:n, m)

# Calculate the ticks we're going to use. We'd like to
# have an xtick every month, so calculate where those
# ticks will actually be at.
if (T > 3)
num_ticks = T
xlabel = "Years"
else
num_ticks = T * 12
xlabel = "Months"
end
tick_width = n / num_ticks
x_ticks = []
for i=1:round(num_ticks)
x_ticks = vcat(x_ticks, i*tick_width)
end

# Use one color for each path. I'm not sure if there's
# a better way to do this without going through DataFrames
colors = []
for i = 1:m
colors = vcat(colors, ones(n)*i)
end

plot(x=x, y=motion', color=colors, Geom.line,
Guide.xticks(ticks=x_ticks, label=false),
Guide.xlabel(xlabel),
Guide.ylabel("Value"))
end;
+

Example simulation

+

Let's go ahead and run a sample simulation to see what the functions got us!

+
initial = ones(5) * S0
# Using μ=0, T=.25 for now, we'll use the proper values later
motion = simulate_gbm(initial, 0, σ, .25, 200)

display_motion(motion, .25)
+

+

Computing the term structure

+

Now that we've got the basic motion set up, let's start making things a bit more sophisticated for the model. We're going to assume that the drift of the stock is the difference between the implied forward rate and the quarterly dividend rate.

+

We're given the yearly term structure, and need to calculate the quarterly forward rate to match this structure. The term structure is assumed to follow:

+

d(0,t)=d(0,t1)fi1,id(0, t) = d(0,t-1)\cdot f_{i-1, i}

+

Where fi1,if_{i-1, i} is the quarterly forward rate.

+
forward_term = function(yearly_term)
# It is assumed that we have a yearly term structure passed in, and starts at year 0
# This implies a nominal rate above 0 for the first year!
years = length(term)-1 # because we start at 0
structure = [(term[i+1] / term[i]) for i=1:years]
end;
+

Illustrating the term structure

+

Now that we've got our term structure, let's validate that we're getting the correct results! If we've done this correctly, then:

+
term[2] == term[1] * structure[1]
+
# Example term structure taken from:
# http://www.treasury.gov/resource-center/data-chart-center/interest-rates/Pages/TextView.aspx?data=yield
# Linear interpolation used years in-between periods, assuming real-dollar
# interest rates
forward_yield = forward_term(term)
calculated_term2 = term[1] * forward_yield[1]

println("Actual term[2]: $(term[2]); Calculated term[2]: $(calculated_term2)")
+
    Actual term[2]: 1.0049; Calculated term[2]: 1.0049
+

The full underlying simulation

+

Now that we have the term structure set up, we can actually start doing some real simulation! Let's construct some paths through the full 5-year time frame. In order to do this, we will simulate 1 year at a time, and use the forward rates at those times to compute the drift. Thus, there will be 5 total simulations batched together.

+
full_motion = ones(5) * S0
full_term = vcat(term[1], forward_yield)
for i=1:T
μ = (full_term[i] - 1 - q)
year_motion = simulate_gbm(full_motion[:,end], μ, σ, 1, n)
full_motion = hcat(full_motion, year_motion)
end

display_motion(full_motion, T)
+

+

Final simulation

+

We're now going to actually build out the full motion that we'll use for computing the pricing of our autocallable products. It will be largely the same, but we will use far more sample paths for the simulation.

+
full_simulation = function(S0, T, n, m, term)
forward = vcat(term[1], forward_term(term))

# And an S0 to kick things off.
final_motion = ones(m) * S0
for i=1:T
μ = (forward[i] - 1 - q)
year_motion = simulate_gbm(final_motion[:,end], μ, σ, 1, n)
final_motion = hcat(final_motion, year_motion)
end
return final_motion
end

tic()
full_simulation(S0, T, n, m, term)
time = toq()
@printf("Time to run simulation: %.2fs", time)
+
    Time to run simulation: 5.34s
+

Athena Simulation

+

Now that we've defined our underlying simulation, let's actually try and price an Athena note. Athena has the following characteristics:

+
    +
  • Automatically called if the underlying is above the call barrier at observation
  • +
  • Accelerated coupon paid if the underlying is above the call barrier at observation +
      +
    • The coupon paid is cic \cdot i with ii as the current year, and cc the coupon rate
    • +
    +
  • +
  • Principle protection up until a protection barrier at observation; All principle at risk if this barrier not met
  • +
  • Observed yearly
  • +
+
call_barrier = S0
strike = S0
protection_barrier = S0 * .6
coupon = nominal * .07

price_athena = function(initial_price, year_prices, call_barrier,
protection_barrier, coupon, forward_structure)

total_coupons = 0

t = length(year_prices)

for i=1:t
price = year_prices[i]
if price call_barrier
return (nominal + coupon*i) * exp((prod(forward_structure[i:end])-1)*(t-i))
end
end

# We've reached maturity, time to check capital protection
if year_prices[end] > protection_barrier
return nominal
else
put = (strike - year_prices[end]) / strike
return nominal*(1-put)
end
end

forward_structure = forward_term(term)
price_function = (year_prices) -> price_athena(S0, year_prices,
call_barrier, protection_barrier, coupon, forward_structure)

athena = function()
year_indexes = [n*i for i=1:T]
motion = full_simulation(S0, T, n, m, term)
payoffs = [price_function(motion[i, year_indexes]) for i=1:m]
return mean(payoffs)
end

mean_payoffs = zeros(num_simulations)
for i=1:num_simulations
tic()
mean_payoffs[i] = athena()
time = toq()
@printf("Mean of simulation %i: \$%.4f; Simulation time: %.2fs\n", i, mean_payoffs[i], time)
end

final_mean = mean(mean_payoffs)
println("Mean over $num_simulations simulations: $(mean(mean_payoffs))")
pv = final_mean * (exp(-(prod(forward_structure)-1)*T))
@printf("Present value of Athena note: \$%.2f, notional: \$%.2f", pv, nominal)
+
    Mean of simulation 1: $103.2805; Simulation time: 5.59s
Mean of simulation 2: $103.3796; Simulation time: 5.05s
Mean of simulation 3: $103.4752; Simulation time: 5.18s
Mean of simulation 4: $103.4099; Simulation time: 5.37s
Mean of simulation 5: $103.3260; Simulation time: 5.32s
Mean over 5 simulations: 103.37421610015554
Present value of Athena note: $95.00, notional: $100.00
+

Phoenix without Memory Simulation

+

Let's move into pricing a Phoenix without memory. It's very similar to the Athena production, with the exception that we introduce a coupon barrier so coupons are paid even when the underlying is below the initial price.

+

The Phoenix product has the following characteristics (example here):

+
    +
  • Automatically called if the underlying is above the call barrier at observation
  • +
  • Coupon paid if the underlying is above a coupon barrier at observation
  • +
  • Principle protection up until a protection barrier at observation; All principle at risk if this barrier not met
  • +
  • Observed yearly
  • +
+

Some example paths (all assume that a call barrier of the current price, and coupon barrier some level below that):

+
    +
  • At the end of year 1, the stock is above the call barrier; the note is called and you receive the value of the stock plus the coupon being paid.
  • +
  • At the end of year 1, the stock is above the coupon barrier, but not the call barrier; you receive the coupon. At the end of year 2, the stock is below the coupon barrier; you receive nothing. At the end of year 3, the stock is above the call barrier; the note is called and you receive the value of the stock plus a coupon for year 3.
  • +
+

We're going to re-use the same simulation, with the following parameters:

+
    +
  • Call barrier: 100%
  • +
  • Coupon barrier: 70%
  • +
  • Coupon: 6%
  • +
  • Capital protection until 70% (at maturity)
  • +
+
call_barrier = S0
coupon_barrier = S0 * .8
protection_barrier = S0 * .6
coupon = nominal * .06

price_phoenix_no_memory = function(initial_price, year_prices, call_barrier, coupon_barrier,
protection_barrier, coupon, forward_structure)

total_coupons = 0
t = length(year_prices)

for i=1:t
price = year_prices[i]
if price call_barrier
return (nominal + coupon + total_coupons)*exp((prod(forward_structure[i:end])-1)*(t-i))
elseif price coupon_barrier
total_coupons = total_coupons * exp(forward_structure[i]-1) + coupon
else
total_coupons *= exp(forward_structure[i]-1)
end
end

# We've reached maturity, time to check capital protection
if year_prices[end] > protection_barrier
return nominal + total_coupons
else
put = (strike - year_prices[end]) / strike
return nominal*(1-put)
end
end

forward_structure = forward_term(term)
price_function = (year_prices) -> price_phoenix_no_memory(S0, year_prices,
call_barrier, coupon_barrier, protection_barrier, coupon, forward_structure)

phoenix_no_memory = function()
year_indexes = [n*i for i=1:T]
motion = full_simulation(S0, T, n, m, term)
payoffs = [price_function(motion[i, year_indexes]) for i=1:m]
return mean(payoffs)
end

mean_payoffs = zeros(num_simulations)
for i=1:num_simulations
tic()
mean_payoffs[i] = phoenix_no_memory()
time = toq()
@printf("Mean of simulation %i: \$%.4f; Simulation time: %.2fs\n", i, mean_payoffs[i], time)
end

final_mean = mean(mean_payoffs)
println("Mean over $num_simulations simulations: $(mean(mean_payoffs))")
pv = final_mean * exp(-(prod(forward_structure)-1)*(T))
@printf("Present value of Phoenix without memory note: \$%.2f", pv)
+
    Mean of simulation 1: $106.0562; Simulation time: 5.72s
Mean of simulation 2: $106.0071; Simulation time: 5.85s
Mean of simulation 3: $105.9959; Simulation time: 5.87s
Mean of simulation 4: $106.0665; Simulation time: 5.93s
Mean of simulation 5: $106.0168; Simulation time: 5.81s
Mean over 5 simulations: 106.02850857209883
Present value of Phoenix without memory note: $97.44
+

Phoenix with Memory Simulation

+

The Phoenix with Memory structure is very similar to the Phoenix, but as the name implies, has a special "memory" property: It remembers any coupons that haven't been paid at prior observation times, and pays them all if the underlying crosses the coupon barrier. For example:

+
    +
  • Note issued with 100% call barrier, 70% coupon barrier. At year 1, the underlying is at 50%, so no coupons are paid. At year 2, the underlying is at 80%, so coupons for both year 1 and 2 are paid, resulting in a double coupon.
  • +
+

You can also find an example here.

+

Let's go ahead and set up the simulation! The parameters will be the same, but we can expect that the value will go up because of the memory attribute

+
call_barrier = S0
coupon_barrier = S0 * .8
protection_barrier = S0 * .6
coupon = nominal * .07

price_phoenix_with_memory = function(initial_price, year_prices, call_barrier,
coupon_barrier, protection_barrier, coupon, forward_structure)

last_coupon = 0
total_coupons = 0

t = length(year_prices)

for i=1:t
price = year_prices[i]
if price > call_barrier
return (nominal + coupon + total_coupons)*exp((prod(forward_structure[i:end])-1)*(t-i))
elseif price > coupon_barrier
####################################################################
# The only difference between with/without memory is the below lines
memory_coupons = (i - last_coupon) * coupon
last_coupon = i
total_coupons = total_coupons * exp(forward_structure[i]-1) + memory_coupons
####################################################################
else
total_coupons *= exp(forward_structure[i]-1)
end
end

# We've reached maturity, time to check capital protection
if year_prices[end] > protection_barrier
return nominal + total_coupons
else
put = (strike - year_prices[end]) / strike
return nominal*(1-put)
end
end

forward_structure = forward_term(term)
price_function = (year_prices) -> price_phoenix_with_memory(S0, year_prices,
call_barrier, coupon_barrier, protection_barrier, coupon, forward_structure)

phoenix_with_memory = function()
year_indexes = [n*i for i=1:T]
motion = full_simulation(S0, T, n, m, term)
payoffs = [price_function(motion[i, year_indexes]) for i=1:m]
return mean(payoffs)
end

mean_payoffs = zeros(num_simulations)
for i=1:num_simulations
tic()
mean_payoffs[i] = phoenix_with_memory()
time = toq()
@printf("Mean of simulation %i: \$%.4f; Simulation time: %.2fs\n",
i, mean_payoffs[i], time)
end

final_mean = mean(mean_payoffs)
println("Mean over $num_simulations simulations: $(mean(mean_payoffs))")
pv = final_mean * exp(-(prod(forward_structure)-1)*(T))
@printf("Present value of Phoenix with memory note: \$%.2f", pv)
+
    Mean of simulation 1: $108.8612; Simulation time: 5.89s
Mean of simulation 2: $109.0226; Simulation time: 5.90s
Mean of simulation 3: $108.9175; Simulation time: 5.92s
Mean of simulation 4: $108.9426; Simulation time: 5.94s
Mean of simulation 5: $108.8087; Simulation time: 6.06s
Mean over 5 simulations: 108.91052564051816
Present value of Phoenix with memory note: $100.09
\ No newline at end of file diff --git a/2015/11/welcome/index.html b/2015/11/welcome/index.html new file mode 100644 index 0000000..486ad2d --- /dev/null +++ b/2015/11/welcome/index.html @@ -0,0 +1,47 @@ +Welcome, and an algorithm | The Old Speice Guy
Skip to main content

Welcome, and an algorithm

· 5 min read
Bradlee Speice

Hello! Glad to meet you. I'm currently a student at Columbia University studying Financial Engineering, and want to give an overview of the projects I'm working on!

+

To start things off, Columbia has been hosting a trading competition that myself and another partner are competing in. I'm including a notebook of the algorithm that we're using, just to give a simple overview of a miniature algorithm.

+

The competition is scored in 3 areas:

+ +

Our algorithm uses a basic momentum strategy: in the given list of potential portfolios, pick the stocks that have been performing well in the past 30 days. Then, optimize for return subject to the drawdown being below a specific level. We didn't include the Sharpe ratio as a constraint, mostly because we were a bit late entering the competition.

+

I'll be updating this post with the results of our algorithm as they come along!

+
+

UPDATE 12/5/2015: Now that the competition has ended, I wanted to update how the algorithm performed. Unfortunately, it didn't do very well. I'm planning to make some tweaks over the coming weeks, and do another forward test in January.

+
    +
  • After week 1: Down .1%
  • +
  • After week 2: Down 1.4%
  • +
  • After week 3: Flat
  • +
+

And some statistics for all teams participating in the competition:

+
StatisticValue
Max Return74.1%
Min Return-97.4%
Average Return-.1%
Std Dev of Returns19.6%
+
+

Trading Competition Optimization

+

Goal: Max return given maximum Sharpe and Drawdown

+
from IPython.display import display
import Quandl
from datetime import datetime, timedelta

tickers = ['XOM', 'CVX', 'CLB', 'OXY', 'SLB']
market_ticker = 'GOOG/NYSE_VOO'
lookback = 30
d_col = 'Close'

data = {tick: Quandl.get('YAHOO/{}'.format(tick))[-lookback:] for tick in tickers}
market = Quandl.get(market_ticker)
+

Calculating the Return

+

We first want to know how much each ticker returned over the prior period.

+
returns = {tick: data[tick][d_col].pct_change() for tick in tickers}

display({tick: returns[tick].mean() for tick in tickers})
+
    {'CLB': -0.0016320202164526894,
'CVX': 0.0010319531629488911,
'OXY': 0.00093418904454400551,
'SLB': 0.00098431254720448159,
'XOM': 0.00044165797556096868}
+

Calculating the Sharpe ratio

+

Sharpe: RRMσ{R - R_M \over \sigma}

+

We use the average return over the lookback period, minus the market average return, over the ticker standard deviation to calculate the Sharpe. Shorting a stock turns a negative Sharpe positive.

+
market_returns = market.pct_change()

sharpe = lambda ret: (ret.mean() - market_returns[d_col].mean()) / ret.std()
sharpes = {tick: sharpe(returns[tick]) for tick in tickers}

display(sharpes)
+
    {'CLB': -0.10578734457846127,
'CVX': 0.027303529817677398,
'OXY': 0.022622210057414487,
'SLB': 0.026950946344858676,
'XOM': -0.0053519259698605499}
+

Calculating the drawdown

+

This one is easy - what is the maximum daily change over the lookback period? That is, because we will allow short positions, we are not concerned strictly with maximum downturn, but in general, what is the largest 1-day change?

+
drawdown = lambda ret: ret.abs().max()
drawdowns = {tick: drawdown(returns[tick]) for tick in tickers}

display(drawdowns)
+
    {'CLB': 0.043551495607375035,
'CVX': 0.044894389686214398,
'OXY': 0.051424517867144637,
'SLB': 0.034774627850375328,
'XOM': 0.035851524605672758}
+

Performing the optimization

+max  μωs.t.  1ω=1SωsDωdωl\begin{align*} +max\ \ & \mu \cdot \omega\\ +s.t.\ \ & \vec{1} \omega = 1\\ +& \vec{S} \omega \ge s\\ +& \vec{D} \cdot | \omega | \le d\\ +& \left|\omega\right| \le l\\ +\end{align*} +

We want to maximize average return subject to having a full portfolio, Sharpe above a specific level, drawdown below a level, and leverage not too high - that is, don't have huge long/short positions.

+
import numpy as np
from scipy.optimize import minimize

#sharpe_limit = .1
drawdown_limit = .05
leverage = 250

# Use the map so we can guarantee we maintain the correct order

# So we can write as upper-bound
# sharpe_a = np.array(list(map(lambda tick: sharpes[tick], tickers))) * -1
dd_a = np.array(list(map(lambda tick: drawdowns[tick], tickers)))

# Because minimizing
returns_a = np.array(list(map(lambda tick: returns[tick].mean(), tickers)))

meets_sharpe = lambda x: sum(abs(x) * sharpe_a) - sharpe_limit
def meets_dd(x):
portfolio = sum(abs(x))
if portfolio < .1:
# If there are no stocks in the portfolio,
# we can accidentally induce division by 0,
# or division by something small enough to cause infinity
return 0

return drawdown_limit - sum(abs(x) * dd_a) / sum(abs(x))

is_portfolio = lambda x: sum(x) - 1

def within_leverage(x):
return leverage - sum(abs(x))

objective = lambda x: sum(x * returns_a) * -1 # Because we're minimizing
bounds = ((None, None),) * len(tickers)
x = np.zeros(len(tickers))

constraints = [
{
'type': 'eq',
'fun': is_portfolio
}, {
'type': 'ineq',
'fun': within_leverage
#}, {
# 'type': 'ineq',
# 'fun': meets_sharpe
}, {
'type': 'ineq',
'fun': meets_dd
}
]

optimal = minimize(objective, x, bounds=bounds, constraints=constraints,
options={'maxiter': 500})

# Optimization time!
display(optimal.message)

display("Holdings: {}".format(list(zip(tickers, optimal.x))))

# multiply by -100 to scale, and compensate for minimizing
expected_return = optimal.fun * -100
display("Expected Return: {:.3f}%".format(expected_return))

expected_drawdown = sum(abs(optimal.x) * dd_a) / sum(abs(optimal.x)) * 100
display("Expected Max Drawdown: {0:.2f}%".format(expected_drawdown))

# TODO: Calculate expected Sharpe
+
    'Optimization terminated successfully.'
"Holdings: [('XOM', 5.8337945679814904),
('CVX', 42.935064321851307),
('CLB', -124.5),
('OXY', 36.790387773552119),
('SLB', 39.940753336615096)]"
'Expected Return: 32.375%'
'Expected Max Drawdown: 4.34%'
\ No newline at end of file diff --git a/2015/12/testing-cramer/index.html b/2015/12/testing-cramer/index.html new file mode 100644 index 0000000..b3f92db --- /dev/null +++ b/2015/12/testing-cramer/index.html @@ -0,0 +1,40 @@ +Testing Cramer | The Old Speice Guy
Skip to main content

Testing Cramer

· 9 min read
Bradlee Speice

Pursuant to attending a graduate school studying Financial Engineering, I've been a fan of the Mad Money TV show featuring the bombastic Jim Cramer. One of the things that he's said is that you shouldn't use the futures to predict where the stock market is going to go. But he says it often enough, I've begun to wonder - who is he trying to convince?

+

It makes sense that because futures on things like the S&P 500 are traded continuously, they would price in market information before the stock market opens. So is Cramer right to be convinced that strategies based on the futures are a poor idea? I wanted to test it out.

+

The first question is where to get the future's data. I've been part of Seeking Alpha for a bit, and they publish the Wall Street Breakfast newsletter which contains daily future's returns as of 6:20 AM EST. I'd be interested in using that data to see if we can actually make some money.

+

First though, let's get the data:

+

Downloading Futures data from Seeking Alpha

+

We're going to define two HTML parsing classes - one to get the article URL's from a page, and one to get the actual data from each article.

+
class ArticleListParser(HTMLParser):
"""Given a web page with articles on it, parse out the article links"""

articles = []

def handle_starttag(self, tag, attrs):
#if tag == 'div' and ("id", "author_articles_wrapper") in attrs:
# self.fetch_links = True
if tag == 'a' and ('class', 'dashboard_article_link') in attrs:
href = list(filter(lambda x: x[0] == 'href', attrs))[0][1]
self.articles.append(href)

base_url = "http://seekingalpha.com/author/wall-street-breakfast/articles"
article_page_urls = [base_url] + [base_url + '/{}'.format(i) for i in range(2, 20)]

global_articles = []
for page in article_page_urls:
# We need to switch the user agent, as SA blocks the standard requests agent
articles_html = requests.get(page,
headers={"User-Agent": "Wget/1.13.4"})
parser = ArticleListParser()
parser.feed(articles_html.text)
global_articles += (parser.articles)
+
class ArticleReturnParser(HTMLParser):
"Given an article, parse out the futures returns in it"

record_font_tags = False
in_font_tag = False
counter = 0
# data = {} # See __init__

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = {}

def handle_starttag(self, tag, attrs):
if tag == 'span' and ('itemprop', 'datePublished') in attrs:
date_string = list(filter(lambda x: x[0] == 'content', attrs))[0][1]
date = dtparser.parse(date_string)
self.data['date'] = date

self.in_font_tag = tag == 'font'

def safe_float(self, string):
try:
return float(string[:-1]) / 100
except ValueError:
return np.NaN

def handle_data(self, content):
if not self.record_font_tags and "Futures at 6" in content:
self.record_font_tags = True

if self.record_font_tags and self.in_font_tag:
if self.counter == 0:
self.data['DOW'] = self.safe_float(content)
elif self.counter == 1:
self.data['S&P'] = self.safe_float(content)
elif self.counter == 2:
self.data['NASDAQ'] = self.safe_float(content)
elif self.counter == 3:
self.data['Crude'] = self.safe_float(content)
elif self.counter == 4:
self.data['Gold'] = self.safe_float(content)

self.counter += 1

def handle_endtag(self, tag):
self.in_font_tag = False

def retrieve_data(url):
sa = "http://seekingalpha.com"
article_html = requests.get(sa + url,
headers={"User-Agent": "Wget/1.13.4"})
parser = ArticleReturnParser()
parser.feed(article_html.text)
parser.data.update({"url": url})
parser.data.update({"text": article_html.text})
return parser.data

# This copy **MUST** be in place. I'm not sure why,
# as you'd think that the data being returned would already
# represent a different memory location. Even so, it blows up
# if you don't do this.
article_list = list(set(global_articles))
article_data = [copy(retrieve_data(url)) for url in article_list]
# If there's an issue downloading the article, drop it.
article_df = pd.DataFrame.from_dict(article_data).dropna()
+

Fetching the Returns data

+

Now that we have the futures data, we're going to compare across 4 different indices - the S&P 500 index, Dow Jones Industrial, Russell 2000, and NASDAQ 100. Let's get the data off of Quandl to make things easier!

+
# article_df is sorted by date, so we get the first row.
start_date = article_df.sort_values(by='date').iloc[0]['date'] - relativedelta(days=1)
SPY = Quandl.get("GOOG/NYSE_SPY", trim_start=start_date)
DJIA = Quandl.get("GOOG/AMS_DIA", trim_start=start_date)
RUSS = Quandl.get("GOOG/AMEX_IWM", trim_start=start_date)
NASDAQ = Quandl.get("GOOG/EPA_QQQ", trim_start=start_date)
+

Running the Comparison

+

There are two types of tests I want to determine: How accurate each futures category is at predicting the index's opening change over the close before, and predicting the index's daily return.

+

Let's first calculate how good each future is at predicting the opening return over the previous day. I expect that the futures will be more than 50% accurate, since the information is recorded 3 hours before the markets open.

+
def calculate_opening_ret(frame):
# I'm not a huge fan of the appending for loop,
# but it's a bit verbose for a comprehension
data = {}
for i in range(1, len(frame)):
date = frame.iloc[i].name
prior_close = frame.iloc[i-1]['Close']
open_val = frame.iloc[i]['Open']
data[date] = (open_val - prior_close) / prior_close

return data

SPY_open_ret = calculate_opening_ret(SPY)
DJIA_open_ret = calculate_opening_ret(DJIA)
RUSS_open_ret = calculate_opening_ret(RUSS)
NASDAQ_open_ret = calculate_opening_ret(NASDAQ)

def signs_match(list_1, list_2):
# This is a surprisingly difficult task - we have to match
# up the dates in order to check if opening returns actually match
index_dict_dt = {key.to_datetime(): list_2[key] for key in list_2.keys()}

matches = []
for row in list_1.iterrows():
row_dt = row[1][1]
row_value = row[1][0]
index_dt = datetime(row_dt.year, row_dt.month, row_dt.day)
if index_dt in list_2:
index_value = list_2[index_dt]
if (row_value > 0 and index_value > 0) or \
(row_value < 0 and index_value < 0) or \
(row_value == 0 and index_value == 0):
matches += [1]
else:
matches += [0]
#print("{}".format(list_2[index_dt]))
return matches


prediction_dict = {}
matches_dict = {}
count_dict = {}
index_dict = {"SPY": SPY_open_ret, "DJIA": DJIA_open_ret, "RUSS": RUSS_open_ret, "NASDAQ": NASDAQ_open_ret}
indices = ["SPY", "DJIA", "RUSS", "NASDAQ"]
futures = ["Crude", "Gold", "DOW", "NASDAQ", "S&P"]
for index in indices:
matches_dict[index] = {future: signs_match(article_df[[future, 'date']],
index_dict[index]) for future in futures}
count_dict[index] = {future: len(matches_dict[index][future]) for future in futures}
prediction_dict[index] = {future: np.mean(matches_dict[index][future])
for future in futures}
print("Articles Checked: ")
print(pd.DataFrame.from_dict(count_dict))
print()
print("Prediction Accuracy:")
print(pd.DataFrame.from_dict(prediction_dict))
+
    Articles Checked: 
DJIA NASDAQ RUSS SPY
Crude 268 268 271 271
DOW 268 268 271 271
Gold 268 268 271 271
NASDAQ 268 268 271 271
S&P 268 268 271 271

Prediction Accuracy:
DJIA NASDAQ RUSS SPY
Crude 0.544776 0.522388 0.601476 0.590406
DOW 0.611940 0.604478 0.804428 0.841328
Gold 0.462687 0.455224 0.464945 0.476015
NASDAQ 0.615672 0.608209 0.797048 0.830258
S&P 0.604478 0.597015 0.811808 0.848708
+

This data is very interesting. Some insights:

+
    +
  • Both DOW and NASDAQ futures are pretty bad at predicting their actual market openings
  • +
  • NASDAQ and Dow are fairly unpredictable; Russell 2000 and S&P are very predictable
  • +
  • Gold is a poor predictor in general - intuitively Gold should move inverse to the market, but it appears to be about as accurate as a coin flip.
  • +
+

All said though it appears that futures data is important for determining market direction for both the S&P 500 and Russell 2000. Cramer is half-right: futures data isn't very helpful for the Dow and NASDAQ indices, but is great for the S&P and Russell indices.

+

The next step - Predicting the close

+

Given the code we currently have, I'd like to predict the close of the market as well. We can re-use most of the code, so let's see what happens:

+
def calculate_closing_ret(frame):
# I'm not a huge fan of the appending for loop,
# but it's a bit verbose for a comprehension
data = {}
for i in range(0, len(frame)):
date = frame.iloc[i].name
open_val = frame.iloc[i]['Open']
close_val = frame.iloc[i]['Close']
data[date] = (close_val - open_val) / open_val

return data

SPY_close_ret = calculate_closing_ret(SPY)
DJIA_close_ret = calculate_closing_ret(DJIA)
RUSS_close_ret = calculate_closing_ret(RUSS)
NASDAQ_close_ret = calculate_closing_ret(NASDAQ)

def signs_match(list_1, list_2):
# This is a surprisingly difficult task - we have to match
# up the dates in order to check if opening returns actually match
index_dict_dt = {key.to_datetime(): list_2[key] for key in list_2.keys()}

matches = []
for row in list_1.iterrows():
row_dt = row[1][1]
row_value = row[1][0]
index_dt = datetime(row_dt.year, row_dt.month, row_dt.day)
if index_dt in list_2:
index_value = list_2[index_dt]
if (row_value > 0 and index_value > 0) or \
(row_value < 0 and index_value < 0) or \
(row_value == 0 and index_value == 0):
matches += [1]
else:
matches += [0]
#print("{}".format(list_2[index_dt]))
return matches


matches_dict = {}
count_dict = {}
prediction_dict = {}
index_dict = {"SPY": SPY_close_ret, "DJIA": DJIA_close_ret,
"RUSS": RUSS_close_ret, "NASDAQ": NASDAQ_close_ret}
indices = ["SPY", "DJIA", "RUSS", "NASDAQ"]
futures = ["Crude", "Gold", "DOW", "NASDAQ", "S&P"]
for index in indices:
matches_dict[index] = {future: signs_match(article_df[[future, 'date']],
index_dict[index]) for future in futures}
count_dict[index] = {future: len(matches_dict[index][future]) for future in futures}
prediction_dict[index] = {future: np.mean(matches_dict[index][future])
for future in futures}

print("Articles Checked:")
print(pd.DataFrame.from_dict(count_dict))
print()
print("Prediction Accuracy:")
print(pd.DataFrame.from_dict(prediction_dict))
+
    Articles Checked:
DJIA NASDAQ RUSS SPY
Crude 268 268 271 271
DOW 268 268 271 271
Gold 268 268 271 271
NASDAQ 268 268 271 271
S&P 268 268 271 271

Prediction Accuracy:
DJIA NASDAQ RUSS SPY
Crude 0.533582 0.529851 0.501845 0.542435
DOW 0.589552 0.608209 0.535055 0.535055
Gold 0.455224 0.451493 0.483395 0.512915
NASDAQ 0.582090 0.626866 0.531365 0.538745
S&P 0.585821 0.608209 0.535055 0.535055
+

Well, it appears that the futures data is terrible at predicting market close. NASDAQ predicting NASDAQ is the most interesting data point, but 63% accuracy isn't accurate enough to make money consistently.

+

Final sentiments

+

The data bears out very close to what I expected would happen:

+
    +
  • Futures data is more accurate than a coin flip for predicting openings, which makes sense since it is recorded only 3 hours before the actual opening
  • +
  • Futures data is about as acccurate as a coin flip for predicting closings, which means there is no money to be made in trying to predict the market direction for the day given the futures data.
  • +
+

In summary:

+
    +
  • Cramer is half right: Futures data is not good for predicting the market open of the Dow and NASDAQ indices. Contrary to Cramer though, it is very good for predicting the S&P and Russell indices - we can achieve an accuracy slightly over 80% for each.
  • +
  • Making money in the market is hard. We can't just go to the futures and treat them as an oracle for where the market will close.
  • +
+

I hope you've enjoyed this, I quite enjoyed taking a deep dive in the analytics this way. I'll be posting more soon!

\ No newline at end of file diff --git a/2016/01/cloudy-in-seattle/index.html b/2016/01/cloudy-in-seattle/index.html new file mode 100644 index 0000000..a4bd74f --- /dev/null +++ b/2016/01/cloudy-in-seattle/index.html @@ -0,0 +1,41 @@ +Cloudy in Seattle | The Old Speice Guy
Skip to main content

Cloudy in Seattle

· 4 min read
Bradlee Speice

Building on prior analysis, is Seattle's reputation as a depressing city actually well-earned?

+
import pickle
import pandas as pd
import numpy as np
from bokeh.plotting import output_notebook, figure, show
from bokeh.palettes import RdBu4 as Palette
from datetime import datetime
import warnings

output_notebook()
+
BokehJS successfully loaded.
+

Examining other cities

+

After taking some time to explore how the weather in North Carolina stacked up over the past years, I was interested in doing the same analysis for other cities. Growing up with family from Binghamton, NY I was always told it was very cloudy there. And Seattle has a nasty reputation for being very depressing and cloudy. All said, the cities I want to examine are:

+
    +
  • Binghamton, NY
  • +
  • Cary, NC
  • +
  • Seattle, WA
  • +
  • New York City, NY
  • +
+

I'd be interested to try this analysis worldwide at some point - comparing London and Seattle might be an interesting analysis. For now though, we'll stick with trying out the US data.

+

There will be plenty of charts. I want to know: How has average cloud cover and precipitation chance changed over the years for each city mentioned? This will hopefully tell us whether Seattle has actually earned its reputation for being a depressing city.

+
city_forecasts = pickle.load(open('city_forecasts.p', 'rb'))
forecasts_df = pd.DataFrame.from_dict(city_forecasts)
+
cities = ['binghamton', 'cary', 'nyc', 'seattle']
city_colors = {cities[i]: Palette[i] for i in range(0, 4)}

def safe_cover(frame):
if frame and 'cloudCover' in frame:
return frame['cloudCover']
else:
return np.NaN

def monthly_avg_cloudcover(city, year, month):
dates = pd.DatetimeIndex(start=datetime(year, month, 1, 12),
end=datetime(year, month + 1, 1, 12),
freq='D', closed='left')
cloud_cover_vals = list(map(lambda x: safe_cover(forecasts_df[city][x]['currently']), dates))
cloud_cover_samples = len(list(filter(lambda x: x is not np.NaN, cloud_cover_vals)))
# Ignore an issue with nanmean having all NaN values. We'll discuss the data issues below.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return np.nanmean(cloud_cover_vals), cloud_cover_samples
+
years = range(1990, 2016)
def city_avg_cc(city, month):
return [monthly_avg_cloudcover(city, y, month) for y in years]

months = [
('July', 7),
('August', 8),
('September', 9),
('October', 10),
('November', 11)
]

for month, month_id in months:
month_averages = {city: city_avg_cc(city, month_id) for city in cities}
f = figure(title="{} Average Cloud Cover".format(month),
x_axis_label='Year',
y_axis_label='Cloud Cover Percentage')
for city in cities:
f.line(years, [x[0] for x in month_averages[city]],
legend=city, color=city_colors[city])
show(f)
+

July average cloud cover chart +August average cloud cover chart +September average cloud cover chart +October average cloud cover chart +November average cloud cover chart

+

Well, as it so happens it looks like there are some data issues. July's data is a bit sporadic, and 2013 seems to be missing from most months as well. I think really only two things can really be confirmed here:

+
    +
  • Seattle, specifically for the months of October and November, is in fact significantly more cloudy on average than are other cities
  • +
  • All cities surveyed have seen average cloud cover decline over the months studied. There are data issues, but the trend seems clear.
  • +
+

Let's now move from cloud cover data to looking at average rainfall chance.

+
def safe_precip(frame):
if frame and 'precipProbability' in frame:
return frame['precipProbability']
else:
return np.NaN

def monthly_avg_precip(city, year, month):
dates = pd.DatetimeIndex(start=datetime(year, month, 1, 12),
end=datetime(year, month + 1, 1, 12),
freq='D', closed='left')
precip_vals = list(map(lambda x: safe_precip(forecasts_df[city][x]['currently']), dates))
precip_samples = len(list(filter(lambda x: x is not np.NaN, precip_vals)))
# Ignore an issue with nanmean having all NaN values. We'll discuss the data issues below.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return np.nanmean(precip_vals), precip_samples

def city_avg_precip(city, month):
return [monthly_avg_precip(city, y, month) for y in years]

for month, month_id in months:
month_averages = {city: city_avg_cc(city, month_id) for city in cities}
f = figure(title="{} Average Precipitation Chance".format(month),
x_axis_label='Year',
y_axis_label='Precipitation Chance Percentage')
for city in cities:
f.line(years, [x[0] for x in month_averages[city]],
legend=city, color=city_colors[city])
show(f)
+

July average precipitation chance chart +August average precipitation chance chart +September average precipitation chance chart +October average precipitation chance chart +November average precipitation chance chart

+

The same data issue caveats apply here: 2013 seems to be missing some data, and July has some issues as well. However, this seems to confirm the trends we saw with cloud cover:

+
    +
  • Seattle, specifically for the months of August, October, and November has had a consistently higher chance of rain than other cities surveyed.
  • +
  • Average precipitation chance, just like average cloud cover, has been trending down over time.
  • +
+

Conclusion

+

I have to admit I was a bit surprised after doing this analysis. Seattle showed a higher average cloud cover and average precipitation chance than did the other cities surveyed. Maybe Seattle is actually an objectively more depressing city to live in.

+

Well that's all for weather data at the moment. It's been a great experiment, but I think this is about as far as I'll be able to get with weather data without some domain knowledge. Talk again soon!

\ No newline at end of file diff --git a/2016/01/complaining-about-the-weather/index.html b/2016/01/complaining-about-the-weather/index.html new file mode 100644 index 0000000..6953095 --- /dev/null +++ b/2016/01/complaining-about-the-weather/index.html @@ -0,0 +1,30 @@ +Complaining about the weather | The Old Speice Guy
Skip to main content

Complaining about the weather

· 8 min read
Bradlee Speice

Figuring out whether people should be complaining about the recent weather in North Carolina.

+
from bokeh.plotting import figure, output_notebook, show
from bokeh.palettes import PuBuGn9 as Palette
import pandas as pd
import numpy as np
from datetime import datetime
import pickle

output_notebook()
+
BokehJS successfully loaded.
+

I'm originally from North Carolina, and I've been hearing a lot of people talking about how often it's been raining recently. They're excited for any day that has sun.

+

So I got a bit curious: Has North Carolina over the past few months actually had more cloudy and rainy days recently than in previous years? This shouldn't be a particularly challenging task, but I'm interested to know if people's perceptions actually reflect reality.

+

The data we'll use comes from forecast.io, since they can give us a cloud cover percentage. I've gone ahead and retrieved the data to a pickle file, and included the code that was used to generate it. First up: What was the average cloud cover in North Carolina during August - November, and how many days were cloudy? We're going to assume that a "cloudy" day is defined as any day in which the cloud cover is above 50%.

+
city_forecasts = pickle.load(open('city_forecasts.p', 'rb'))
forecast_df = pd.DataFrame.from_dict(city_forecasts)
+
cary_forecast = forecast_df['cary']
years = range(1990, 2016)
months = range(7, 12)
months_str = ['July', 'August', 'September', 'October', 'November']

def safe_cover(frame):
if frame and 'cloudCover' in frame:
return frame['cloudCover']
else:
return np.NaN

def monthly_avg_cloudcover(year, month):
dates = pd.DatetimeIndex(start=datetime(year, month, 1, 12),
end=datetime(year, month + 1, 1, 12),
freq='D', closed='left')
cloud_cover_vals = list(map(lambda x: safe_cover(cary_forecast[x]['currently']), dates))
cloud_cover_samples = len(list(filter(lambda x: x is not np.NaN, cloud_cover_vals)))
return np.nanmean(cloud_cover_vals), cloud_cover_samples


monthly_cover_vals = [[monthly_avg_cloudcover(y, m)[0] for y in years] for m in months]

f = figure(title='Monthly Average Cloud Cover',
x_range=(1990, 2015),
x_axis_label='Year')
for x in range(0, len(months)):
f.line(years, monthly_cover_vals[x], legend=months_str[x], color=Palette[x])
show(f)
+

Monthly average cloud cover chart

+

As we can see from the chart above, on the whole the monthly average cloud cover has been generally trending down over time. The average cloud cover is also lower than it was last year - it seems people are mostly just complaining. There are some data issues that start in 2012 that we need to be aware of - the cloud cover percentage doesn't exist for all days. Even so, the data that we have seems to reflect the wider trend, so we'll assume for now that the missing data doesn't skew our results.

+

There's one more metric we want to check though - how many cloudy days were there? This is probably a better gauge of sentiment than the average monthly cover.

+
def monthly_cloudy_days(year, month):
dates = pd.DatetimeIndex(start=datetime(year, month, 1, 12),
end=datetime(year, month + 1, 1, 12),
freq='D', closed='left')
cloud_cover_vals = list(map(lambda x: safe_cover(cary_forecast[x]['currently']), dates))
cloud_cover_samples = len(list(filter(lambda x: x is not np.NaN, cloud_cover_vals)))
cloudy_days = [cover > .5 for cover in cloud_cover_vals]
return np.count_nonzero(cloudy_days), cloud_cover_samples

monthly_days_vals = [[monthly_cloudy_days(y, m)[0] for y in years] for m in months]
monthly_cover_samples = [[monthly_cloudy_days(y, m)[1] for y in years] for m in months]

f = figure(title='Monthly Cloudy Days',
x_range=(1990, 2015),
x_axis_label='Year')
for x in range(0, len(months)):
f.line(years, monthly_days_vals[x], legend=months_str[x], color=Palette[x])
show(f)

f = figure(title='Monthly Cloud Cover Samples',
x_range=(1990, 2015),
x_axis_label='Year',
height=300)
for x in range(0, len(months)):
f.line(years, monthly_cover_samples[x], legend=months_str[x], color=Palette[x])
show(f)
+

Monthly cloudy days chart

+

Monthly cloud cover samples chart

+

On the whole, the number of cloudy days seems to reflect the trend with average cloud cover - it's actually becoming more sunny as time progresses. That said, we need to be careful in how we view this number - because there weren't as many samples in 2015 as previous years, the number of days can get thrown off. In context though, even if most days not recorded were in fact cloudy, the overall count for 2015 would still be lower than previous years.

+

In addition to checking cloud cover, I wanted to check precipitation data as well - what is the average precipitation chance over a month, and how many days during a month is rain likely? The thinking is that days with a high-precipitation chance will also be days in which it is cloudy or depressing.

+
def safe_precip(frame):
if frame and 'precipProbability' in frame:
return frame['precipProbability']
else:
return np.NaN

def monthly_avg_precip(year, month):
dates = pd.DatetimeIndex(start=datetime(year, month, 1, 12),
end=datetime(year, month + 1, 1, 12),
freq='D', closed='left')
precip_vals = list(map(lambda x: safe_precip(cary_forecast[x]['currently']), dates))
precip_samples = len(list(filter(lambda x: x is not np.NaN, precip_vals)))
return np.nanmean(precip_vals), precip_samples

monthly_avg_precip_vals = [[monthly_avg_precip(y, m)[0] for y in years] for m in months]

f = figure(title='Monthly Average Precipitation Chance',
x_range=(1990, 2015),
x_axis_label='Year')
for x in range(0, len(months)):
f.line(years, monthly_avg_precip_vals[x], legend=months_str[x], color=Palette[x])
show(f)
+

Monthly average precipitation chance chart

+

As we can see from the chart, the average chance of precipitation over a month more or less stays within a band of 0 - .1 for all months over all years. This is further evidence that the past few months are no more cloudy or rainy than previous years. Like the cloud cover though, we still want to get a count of all the rainy days, in addition to the average chance. We'll define a "rainy day" as any day in which the chance of rain is greater than 25%.

+
def monthly_rainy_days(year, month):
dates = pd.DatetimeIndex(start=datetime(year, month, 1, 12),
end=datetime(year, month + 1, 1, 12),
freq='D', closed='left')
precip_prob_vals = list(map(lambda x: safe_precip(cary_forecast[x]['currently']), dates))
precip_prob_samples = len(list(filter(lambda x: x is not np.NaN, precip_prob_vals)))
precip_days = [prob > .25 for prob in precip_prob_vals]
return np.count_nonzero(precip_days), precip_prob_samples

monthly_precip_days_vals = [[monthly_rainy_days(y, m)[0] for y in years] for m in months]
monthly_precip_samples = [[monthly_rainy_days(y, m)[1] for y in years] for m in months]

f = figure(title='Monthly Rainy Days',
x_range=(1990, 2015),
x_axis_label='Year')
for x in range(0, len(months)):
f.line(years, monthly_precip_days_vals[x], legend=months_str[x], color=Palette[x])
show(f)

f = figure(title='Monthly Rainy Days Samples',
x_range=(1990, 2015),
x_axis_label='Year',
height=300)
for x in range(0, len(months)):
f.line(years, monthly_precip_samples[x], legend=months_str[x], color=Palette[x])
show(f)
+

Monthly rainy days chart

+

Monthly rainy days samples chart

+

After trying to find the number of days that are rainy, we can see that November hit its max value for rainy days in 2015. However, that value is 6, as compared to a previous maximum of 5. While it is a new record, the value isn't actually all that different. And for other months, the values are mostly in-line with the averages.

+

Summary and Conclusions

+

After having looked at forecast data for Cary, it appears that the months of July - November this year in terms of weather were at worst on par with prior years, if not slightly more sunny. This seems to be a case of confirmation bias: someone complains about a string of cloudy or rainy days, and suddenly you start noticing them more.

+

While this analysis doesn't take into account other areas of North Carolina, my initial guess would be to assume that other areas also will show similar results: nothing interesting is happening. Maybe that will be for another blog post later!

+

Coming soon: I'll compare rain/cloud conditions in North Carolina to some other places in the U.S.!

+

Generating the Forecast file

+

The following code was generates the file that was used throughout the blog post. Please note that I'm retrieving data for other cities to use in a future blog post, only Cary data was used for this post.

+
import pandas as pd
from functools import reduce
import requests
from datetime import datetime

# Coordinate data from http://itouchmap.com/latlong.html
cary_loc = (35.79154,-78.781117)
nyc_loc = (40.78306,-73.971249)
seattle_loc = (47.60621,-122.332071)
binghamton_loc = (42.098687,-75.917974)
cities = {
'cary': cary_loc,
'nyc': nyc_loc,
'seattle': seattle_loc,
'binghamton': binghamton_loc
}

apikey = '' # My super-secret API Key

def get_forecast(lat, long, date=None):
forecast_base = "https://api.forecast.io/forecast/"
if date is None:
url = forecast_base + apikey + '/{},{}'.format(lat, long)
else:
epoch = int(date.timestamp())
url = forecast_base + apikey + '/{},{},{}'.format(lat, long, epoch)

return requests.get(url).json()

years = range(1990,2016)
# For datetimes, the 12 is for getting the weather at noon.
# We're doing this over midnight because we're more concerned
# with what people see, and people don't typically see the weather
# at midnight.
dt_indices = [pd.date_range(start=datetime(year, 7, 1, 12),
end=datetime(year, 11, 30, 12))
for year in years]
dt_merge = reduce(lambda x, y: x.union(y), dt_indices)

# Because we have to pay a little bit to use the API, we use for loops here
# instead of a comprehension - if something breaks, we want to preserve the
# data already retrieved
city_forecasts = {}
for city, loc in cities.items():
print("Retrieving data for {} starting at {}".format(city,
datetime.now().strftime("%I:%M:%S %p")))
for dt in dt_merge:
try:
city_forecasts[(city, dt)] = get_forecast(*loc, dt)
except Exception as e:
print(e)
city_forecasts[(city, dt)] = None
print("End forecast retrieval: {}".format(datetime.now().strftime("%I:%M:%S %p")))

import pickle
pickle.dump(city_forecasts, open('city_forecasts.p', 'wb'))

### Output:
# Retrieving data for binghamton starting at 05:13:42 PM
# Retrieving data for seattle starting at 05:30:51 PM
# Retrieving data for nyc starting at 05:48:30 PM
# Retrieving data for cary starting at 06:08:32 PM
# End forecast retrieval: 06:25:21 PM
\ No newline at end of file diff --git a/2016/02/guaranteed-money-maker/index.html b/2016/02/guaranteed-money-maker/index.html new file mode 100644 index 0000000..d4e6766 --- /dev/null +++ b/2016/02/guaranteed-money-maker/index.html @@ -0,0 +1,75 @@ +Guaranteed money maker | The Old Speice Guy
Skip to main content

Guaranteed money maker

· 8 min read
Bradlee Speice

Developing an investment strategy based on the Martingale betting strategy

+

If you can see into the future, that is.

+

My previous class in Stochastic Calculus covered a lot of interesting topics, and the important one for today is the Gambler's Ruin problem. If you're interested in some of the theory behind it, also make sure to check out random walks. The important bit is that we studied the Martingale Betting Strategy, which describes for us a guaranteed way to eventually make money.

+

The strategy goes like this: You are going to toss a fair coin with a friend. If you guess heads or tails correctly, you get back double the money you bet. If you guess incorrectly, you lose money. How should you bet?

+

The correct answer is that you should double your bet each time you lose. Then when you finally win, you'll be guaranteed to make back everything you lost and then $1 extra! Consider the scenario:

+
    +
  1. You bet $1, and guess incorrectly. You're 1 dollar in the hole.
  2. +
  3. You bet $2, and guess incorrectly. You're 3 dollars in the hole now.
  4. +
  5. You bet $4, and guess incorrectly. You're 7 dollars in the hole.
  6. +
  7. You bet $8, and guess correctly! You now get back those 8 dollars you bet, plus 8 extra for winning, for a total profit of one dollar!
  8. +
+

Mathematically, we can prove that as long as you have unlimited money to bet, you are guaranteed to make money.

+

Applying the Martingale Strategy

+

But we're all realistic people, and once you start talking about "unlimited money" eyebrows should be raised. Even still, this is an interesting strategy to investigate, and I want to apply it to the stock market. As long as we can guarantee there's a single day in which the stock goes up, we should be able to make money right? The question is just how much we have to invest to guarantee this.

+

Now it's time for the math. We'll use the following definitions:

+
    +
  • oio_i = the share price at the opening of day ii
  • +
  • cic_i = the share price at the close of day ii
  • +
  • did_i = the amount of money we want to invest at the beginning of day ii
  • +
+

With those definitions in place, I'd like to present the formula that is guaranteed to make you money. I call it Bradlee's Investment Formula:

+

cni=1ndioi>i=1ndic_n \sum_{i=1}^n \frac{d_i}{o_i} > \sum_{i=1}^{n} d_i

+

It might not look like much, but if you can manage to make it so that this formula holds true, you will be guaranteed to make money. The intuition behind the formula is this: The closing share price times the number of shares you have purchased ends up greater than the amount of money you invested.

+

That is, on day nn, if you know what the closing price will be you can set up the amount of money you invest that day to guarantee you make money. I'll even teach you to figure out how much money that is! Take a look:

+cni=1n1dioi+cndnon>i=1n1di+dncndnondn>i=1n1(dicndioi)dn(cnonon)>i=1n1di(1cnoi)dn>oncnoni=1n1di(11oi)\begin{align*} +c_n \sum_{i=1}^{n-1} \frac{d_i}{o_i} + \frac{c_nd_n}{o_n} &> \sum_{i=1}^{n-1}d_i + d_n\\ +\frac{c_nd_n}{o_n} - d_n &> \sum_{i=1}^{n-1}(d_i - \frac{c_nd_i}{o_i})\\ +d_n (\frac{c_n - o_n}{o_n}) &> \sum_{i=1}^{n-1} d_i(1 - \frac{c_n}{o_i})\\ +d_n &> \frac{o_n}{c_n - o_n} \sum_{i=1}^{n-1} d_i(1 - \frac{1}{o_i}) +\end{align*} +

If you invest exactly dnd_n that day, you'll break even. But if you can make sure the money you invest is greater than that quantity on the right (which requires that you have a crystal ball tell you the stock's closing price) you are guaranteed to make money!

+

Interesting Implications

+

On a more serious note though, the formula above tells us a couple of interesting things:

+
    +
  1. It's impossible to make money without the closing price at some point being greater than the opening price (or vice-versa if you are short selling) - there is no amount of money you can invest that will turn things in your favor.
  2. +
  3. Close prices of the past aren't important if you're concerned about the bottom line. While chart technicians use price history to make judgment calls, in the end, the closing price on anything other than the last day is irrelevant.
  4. +
  5. It's possible to make money as long as there is a single day where the closing price is greater than the opening price! You might have to invest a lot to do so, but it's possible.
  6. +
  7. You must make a prediction about where the stock will close at if you want to know how much to invest. That is, we can set up our investment for the day to make money if the stock goes up 1%, but if it only goes up .5% we'll still lose money.
  8. +
  9. It's possible the winning move is to scale back your position. Consider the scenario: +
      +
    • You invest money and the stock closes down the day .5%
    • +
    • You invest tomorrow expecting the stock to go up 1%
    • +
    • The winning investment to break even (assuming a 1% increase) is to scale back the position, since the shares you purchased at the beginning would then be profitable
    • +
    +
  10. +
+

Running the simulation

+

So now that we've defined our investment formula,we need to tweak a couple things in order to make an investment strategy we can actually work with. There are two issues we need to address:

+
    +
  1. The formula only tells us how much to invest if we want to break even (dnd_n). If we actually want to turn a profit, we need to invest more than that, which we will refer to as the bias.
  2. +
  3. The formula assumes we know what the closing price will be on any given day. If we don't know this, we can still invest assuming the stock price will close at a level we choose. If the price doesn't meet this objective, we try again tomorrow! This predetermined closing price will be referred to as the expectation.
  4. +
+

Now that we've defined our bias and expectation, we can actually build a strategy we can simulate. Much like the martingale strategy told you to bet twice your previous bet in order to make money, we've designed a system that tells us how much to bet in order to make money as well.

+

Now, let's get to the code!

+
using Quandl
api_key = ""
daily_investment = function(current_open, current_close, purchase_history, open_history)
# We're not going to safeguard against divide by 0 - that's the user's responsibility
t1 = current_close / current_open - 1
t2 = sum(purchase_history - purchase_history*current_close ./ open_history)
return t2 / t1
end;
+

And let's code a way to run simulations quickly:

+
is_profitable = function(current_price, purchase_history, open_history)
shares = sum(purchase_history ./ open_history)
return current_price*shares > sum(purchase_history)
end

simulate = function(name, start, init, expected, bias)
ticker_info = quandlget(name, from=start, api_key=api_key)
open_vals = ticker_info["Open"].values
close_vals = ticker_info["Close"].values
invested = [init]

# The simulation stops once we've made a profit
day = 1
profitable = is_profitable(close_vals[day], invested, open_vals[1:length(invested)]) ||
is_profitable(open_vals[day+1], invested, open_vals[1:length(invested)])
while !profitable
expected_close = open_vals[day+1] * expected
todays_purchase = daily_investment(open_vals[day+1], expected_close, invested, open_vals[1:day])
invested = [invested; todays_purchase + bias]
# expected_profit = expected_close * sum(invested ./ open_vals[1:length(invested)]) - sum(invested)
day += 1
profitable = is_profitable(close_vals[day], invested, open_vals[1:length(invested)]) ||
is_profitable(open_vals[day+1], invested, open_vals[1:length(invested)])
end

shares = sum(invested ./ open_vals[1:length(invested)])
max_profit = max(close_vals[day], open_vals[day+1])
profit = shares * max_profit - sum(invested)
return (invested, profit)
end

sim_summary = function(investments, profit)
leverages = [sum(investments[1:i]) for i=1:length(investments)]
max_leverage = maximum(leverages) / investments[1]
println("Max leverage: $(max_leverage)")
println("Days invested: $(length(investments))")
println("Profit: $profit")
end;
+

Now, let's get some data and run a simulation! Our first test:

+
    +
  • We'll invest 100 dollars in LMT, and expect that the stock will close up 1% every day. We'll invest dnd_n + 10 dollars every day that we haven't turned a profit, and end the simulation once we've made a profit.
  • +
+
investments, profit = simulate("YAHOO/LMT", Date(2015, 11, 29), 100, 1.01, 10)
sim_summary(investments, profit)
+
    Max leverage: 5.590373200042106
Days invested: 5
Profit: 0.6894803101560001
+

The result: We need to invest 5.6x our initial position over a period of 5 days to make approximately .69¢

+
    +
  • Now let's try the same thing, but we'll assume the stock closes up 2% instead.
  • +
+
investments, profit = simulate("YAHOO/LMT", Date(2015, 11, 29), 100, 1.02, 10)
sim_summary(investments, profit)
+
    Max leverage: 1.854949900247809
Days invested: 25
Profit: 0.08304813163696423
+

In this example, we only get up to a 1.85x leveraged position, but it takes 25 days to turn a profit of 8¢

+

Summary

+

We've defined an investment strategy that can tell us how much to invest when we know what the closing position of a stock will be. We can tweak the strategy to actually make money, but plenty of work needs to be done so that we can optimize the money invested.

+

In the next post I'm going to post more information about some backtests and strategy tests on this strategy (unless of course this experiment actually produces a significant profit potential, and then I'm keeping it for myself).

+

Side note and disclaimer

+

The claims made in this presentation about being able to guarantee making money are intended as a joke and do not constitute investment advice of any sort.

\ No newline at end of file diff --git a/2016/02/profitability-using-the-investment-formula/index.html b/2016/02/profitability-using-the-investment-formula/index.html new file mode 100644 index 0000000..5d4150a --- /dev/null +++ b/2016/02/profitability-using-the-investment-formula/index.html @@ -0,0 +1,51 @@ +Profitability using the investment formula | The Old Speice Guy
Skip to main content

Profitability using the investment formula

· 8 min read
Bradlee Speice

After developing a formula to guide our investing, how do we actually evaluate its performance in the real world?

+

I've previously talked about crafting an Investment Formula that would guarantee making money if you could predict which direction the stock market was going to go. This is going to be the first in a series of posts trying to flesh out what an actual investment strategy based on this formula would look like.

+

But first, the formula doesn't take into account two very important things: leverage, and the number of days invested. That's why I want to set up what I'm going to call the Profitability Score.

+

The definition is going to be very simple:

+
    +
  • pp: Profit made once you exit the investment
  • +
  • ii: Initial investment into the asset
  • +
  • mm: Maximum investment in the asset
  • +
  • l=m/il = m / i: The maximum leverage of an investment, as the ratio of maximum invested to initial investment
  • +
  • dd: The number of days it takes to turn a profit
  • +
+

s=1000pi(l+d)=1000pm+ids = \frac{1000 p}{i(l + d)} = \frac{1000 p}{m + i\cdot d}

+

Crazy, right? The score is simply the (normalized) profit you made divided by the leverage plus days invested. The 1000\cdot 1000 is just to turn the number into something more reasonable - people don't like hearing something with a profitability score of .001 for example.

+

Theoretical Justification

+

The formula itself is designed to be simple in principle: I like making a profit, and I want to penalize the leverage you incur and days you have to invest. Ideally, we want to have a stock that goes up all the time. However, the investment formula takes advantage of a different case: trying to profit from highly volatile assets. If we can make money when the investment only has one day up, let's do it!

+

Even so, there are two potential issues: First, stocks that trend upward will have a higher profitability score - both leverage and days invested will be 1. To protect against only investing in this trend, I can do things like taking log(d)\log(d). I don't want to start biasing the scoring function until I have a practical reason to do so, so right now I'll leave it standing.

+

The second issue is how to penalize leverage and days invested relative to each other. As it currently stands, a leverage of 6x with only 1 day invested is the same as leveraging 2x with 3 days invested. In the future, I'd again want to look at making the impact of days invested smaller - I can get over an extra 3 days in the market if it means that I don't have to incur a highly leveraged position.

+

So there could be things about the scoring function we change in the future, but I want to run some actual tests before we start worrying about things like that!

+

Running a simulation

+

This won't be an incredibly rigorous backtest, I just want to see some results from the work so far. Let's set up the simulation code again, and start looking into some random stocks. If you've read the last blog post, you can skip over the code. The only difference is that it's been ported to python to make the data-wrangling easier. Julia doesn't yet support some of the multi-index things I'm trying to do.

+
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from Quandl import get as qget
%matplotlib inline
api_key = ''

profitability = lambda p, i, m, d: 1000*p / (m + i*d)

def is_profitable(current_price, purchase_history, open_history):
shares = (purchase_history / open_history).sum()
return current_price * shares > sum(purchase_history)

def daily_investment(current_open, current_close, purchase_history, open_history):
t1 = current_close / current_open - 1
t2 = (purchase_history - purchase_history * current_close / open_history).sum()
return t2 / t1

def simulate_day(open_vals, close_vals, init, expected, bias):
invested = np.array([init])

day = 1
profitable = is_profitable(close_vals[day-1], invested, open_vals[0:len(invested)]) \
or is_profitable(open_vals[day], invested, open_vals[0:len(invested)])

while not profitable:
expected_close = open_vals[day] * expected
todays_purchase = daily_investment(open_vals[day], expected_close, invested, open_vals[0:day])
invested = np.append(invested, todays_purchase + bias)
# expected_profit = expected_close * (invested / open_vals[0:len(invested)]).sum() - invested.sum()
day += 1
profitable = is_profitable(close_vals[day-1], invested, open_vals[0:len(invested)]) \
or is_profitable(open_vals[day], invested, open_vals[0:len(invested)])

shares = (invested / open_vals[0:len(invested)]).sum()

# Make sure we can't see into the future - we know either today's close or tomorrow's open
# will be profitable, but we need to check which one.
if is_profitable(close_vals[day-1], invested, open_vals[0:len(invested)]):
ending_price = close_vals[day-1]
else:
ending_price = open_vals[day]

profit = shares * ending_price - sum(invested)
return invested, profit

def simulate_ts(name, start, end, initial, expected, bias):
ticker_info = qget(name, trim_start=start, api_key=api_key)
evaluation_times = ticker_info[:end].index

# Handle Google vs. YFinance data
if "Adjusted Close" in ticker_info.columns:
close_column = "Adjusted Close"
else:
close_column = "Close"

sim = {d: simulate_day(ticker_info[d:]["Open"], ticker_info[d:][close_column],
100, 1.02, 10) for d in evaluation_times}
sim_series = pd.Series(sim)
result = pd.DataFrame()
result["profit"] = sim_series.apply(lambda x: x[1])
result["max"] = sim_series.apply(lambda x: max(x[0]))
result["days"] = sim_series.apply(lambda x: len(x[0]))
result["score"] = sim_series.apply(lambda x: profitability(x[1], x[0][0], max(x[0]), len(x[0])))
result["investments"] = sim_series.apply(lambda x: x[0])

return result

def simulate_tickers(tickers):
from datetime import datetime
results = {}
for ticker in tickers:
start = datetime(2015, 1, 1)
results_df = simulate_ts(ticker, start, datetime(2016, 1, 1), 100, 1.01, 10)
results[ticker] = results_df

return pd.concat(list(results.values()), keys=list(results.keys()), axis=1)
+

And now the interesting part

+

Let's start looking into the data! FANG stocks have been big over the past year, let's see how they look:

+
fang_df = simulate_tickers(["YAHOO/FB", "YAHOO/AAPL", "YAHOO/NFLX", "YAHOO/GOOG"])
+
fang_df.xs('days', axis=1, level=1).hist()
plt.gcf().set_size_inches(18, 8);
plt.gcf().suptitle("Distribution of Days Until Profitability", fontsize=18);
+

png

+
fang_df.xs('score', axis=1, level=1).plot()
plt.gcf().set_size_inches(18, 6)
plt.gcf().suptitle("Profitability score over time", fontsize=18);
+

png

+

Let's think about these graphs. First, the histogram. What we like seeing is a lot of 1's - that means there were a lot of days that the stock went up and we didn't have to worry about actually implementing the strategy - we were able to close the trade at a profit.

+

Looking at the profitability score over time though is a bit more interesting. First off, stocks that are more volatile will tend to have a higher profitability score, no two ways about that. However, Netflix consistently outperformed on this metric. We know that 2015 was a good year for Netflix, so that's a (small) sign the strategy is performing as expected.

+

The final interesting note happens around the end of August 2015. Around this period, the markets were selling off in a big way due to issues in China (not unlike what's happening now). Even so, all of the FANG stocks saw an uptick in profitability around this time. This is another sign that the strategy being developed performs better during periods of volatility, rather than from riding markets up or down.

+

What about FANG vs. some cyclicals?

+
cyclic_df = simulate_tickers(["YAHOO/X", "YAHOO/CAT", "YAHOO/NFLX", "YAHOO/GOOG"])
+
cyclic_df.xs('days', axis=1, level=1).hist()
plt.gcf().set_size_inches(18, 8);
plt.gcf().suptitle("Distribution of Days Until Profitability", fontsize=18);
+

png

+
cyclic_df.xs('score', axis=1, level=1).plot()
plt.gcf().set_size_inches(18, 6)
plt.gcf().suptitle("Profitability score over time", fontsize=18);
+

png

+

Some more interesting results come from this as well. First off, US Steel (X) has a much smoother distribution of days until profitability - it doesn't have a huge number of values at 1 and then drop off. Intuitively, we're not terribly large fans of this, we want a stock to go up! However, on the profitability score it is the only serious contender to Netflix.

+

Second, we see the same trend around August - the algorithm performs well in volatile markets.

+

For a final test, let's try some biotech and ETFs!

+
biotech_df = simulate_tickers(['YAHOO/REGN', 'YAHOO/CELG', 'GOOG/NASDAQ_BIB', 'GOOG/NASDAQ_IBB'])
+
biotech_df.xs('days', axis=1, level=1).hist()
plt.gcf().set_size_inches(18, 8);
plt.gcf().suptitle("Distribution of Days Until Profitability", fontsize=18);
+

png

+
biotech_df.xs('score', axis=1, level=1).plot()
plt.gcf().set_size_inches(18, 6)
plt.gcf().suptitle("Profitability score over time", fontsize=18);
+

png

+

In this example, we don't see a whole lot of interesting things: the scores are all fairly close together with notable exceptions in late August, and mid-October.

+

What is interesting is that during the volatile period, the ETF's performed significantly better than the stocks did in terms of profitability. The leveraged ETF (BIB) performed far above anyone else, and it appears that indeed, it is most profitable during volatile periods. Even so, it was far more likely to take multiple days to give a return. Its count of 1-day investments trails the other ETF and both stocks by a decent margin.

+

And consider me an OCD freak, but I just really like Celgene's distribution - it looks nice and smooth.

+

Summary and plans for the next post

+

So far I'm really enjoying playing with this strategy - there's a lot of depth here to understand, though the preliminary results seem to indicate that it profits mostly from taking the other side of a volatile trade. I'd be interested to run results later on data from January - It's been a particularly volatile start to the year so it would be neat to see whether this strategy would work then.

+

For the next post, I want to start playing with some of the parameters: How do the bias and expected close influence the process? The values have been fairly conservative so far, it will be interesting to see how the simulations respond afterward.

\ No newline at end of file diff --git a/2016/03/predicting-santander-customer-happiness/index.html b/2016/03/predicting-santander-customer-happiness/index.html new file mode 100644 index 0000000..c6be51a --- /dev/null +++ b/2016/03/predicting-santander-customer-happiness/index.html @@ -0,0 +1,48 @@ +Predicting Santander customer happiness | The Old Speice Guy
Skip to main content

Predicting Santander customer happiness

· 7 min read
Bradlee Speice

My first Kaggle competition.

+

It's time! After embarking on a Machine Learning class this semester, and with a Saturday in which I don't have much planned, I wanted to put this class and training to work. It's my first competition submission. I want to walk you guys through how I'm approaching this problem, because I thought it would be really neat. The competition is Banco Santander's Santander Customer Satisfaction competition. It seemed like an easy enough problem I could actually make decent progress on it.

+

Data Exploration

+

First up: we need to load our data and do some exploratory work. Because we're going to be using this data for model selection prior to testing, we need to make a further split. I've already gone ahead and done this work, please see the code in the appendix below.

+
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline

# Record how long it takes to run the notebook - I'm curious.
from datetime import datetime
start = datetime.now()

dataset = pd.read_csv('split_train.csv')
dataset.index = dataset.ID
X = dataset.drop(['TARGET', 'ID', 'ID.1'], 1)
y = dataset.TARGET
+
y.unique()
+
    array([0, 1], dtype=int64)
+
len(X.columns)
+
    369
+

Okay, so there are only two classes we're predicting: 1 for unsatisfied customers, 0 for satisfied customers. I would have preferred this to be something more like a regression, or predicting multiple classes: maybe the customer isn't the most happy, but is nowhere near closing their accounts. For now though, that's just the data we're working with.

+

Now, I'd like to make a scatter matrix of everything going on. Unfortunately as noted above, we have 369 different features. There's no way I can graphically make sense of that much data to start with.

+

We're also not told what the data actually represents: Are these survey results? Average time between contact with a customer care person? Frequency of contacting a customer care person? The idea is that I need to reduce the number of dimensions we're predicting across.

+

Dimensionality Reduction pt. 1 - Binary Classifiers

+

My first attempt to reduce the data dimensionality is to find all the binary classifiers in the dataset (i.e. 0 or 1 values) and see if any of those are good (or anti-good) predictors of the final data.

+
cols = X.columns
b_class = []
for c in cols:
if len(X[c].unique()) == 2:
b_class.append(c)

len(b_class)
+
    111
+

So there are 111 features in the dataset that are a binary label. Let's see if any of them are good at predicting the users satisfaction!

+
# First we need to `binarize` the data to 0-1; some of the labels are {0, 1},
# some are {0, 3}, etc.
from sklearn.preprocessing import binarize
X_bin = binarize(X[b_class])

accuracy = [np.mean(X_bin[:,i] == y) for i in range(0, len(b_class))]
acc_df = pd.DataFrame({"Accuracy": accuracy}, index=b_class)
acc_df.describe()
+
Accuracy
count111.000000
mean0.905159
std0.180602
min0.043598
25%0.937329
50%0.959372
75%0.960837
max0.960837
+

Wow! Looks like we've got some incredibly predictive features! So much so that we should be a bit concerned. My initial guess for what's happening is that we have a sparsity issue: so many of the values are 0, and these likely happen to line up with satisfied customers.

+

So the question we must now answer, which I likely should have asked long before now: What exactly is the distribution of un/satisfied customers?

+
unsat = y[y == 1].count()
print("Satisfied customers: {}; Unsatisfied customers: {}".format(len(y) - unsat, unsat))
naive_guess = np.mean(y == np.zeros(len(y)))
print("Naive guess accuracy: {}".format(naive_guess))
+
    Satisfied customers: 51131; Unsatisfied customers: 2083
Naive guess accuracy: 0.9608561656706882
+

This is a bit discouraging. A naive guess of "always satisfied" performs as well as our best individual binary classifier. What this tells me then, is that these data columns aren't incredibly helpful in prediction. I'd be interested in a polynomial expansion of this data-set, but for now, that's more computation than I want to take on.

+

Dimensionality Reduction pt. 2 - LDA

+

Knowing that our naive guess performs so well is a blessing and a curse:

+
    +
  • Curse: The threshold for performance is incredibly high: We can only "improve" over the naive guess by 4%
  • +
  • Blessing: All the binary classification features we just discovered are worthless on their own. We can throw them out and reduce the data dimensionality from 369 to 111.
  • +
+

Now, in removing these features from the dataset, I'm not saying that there is no "information" contained within them. There might be. But the only way we'd know is through a polynomial expansion, and I'm not going to take that on within this post.

+

My initial thought for a "next guess" is to use the LDA model for dimensionality reduction. However, it can only reduce dimensions to 1p1 - p, with pp being the number of classes. Since this is a binary classification, every LDA model that I try will have dimensionality one; when I actually try this, the predictor ends up being slightly less accurate than the naive guess.

+

Instead, let's take a different approach to dimensionality reduction: principle components analysis. This allows us to perform the dimensionality reduction without worrying about the number of classes. Then, we'll use a Gaussian Naive Bayes model to actually do the prediction. This model is chosen simply because it doesn't take a long time to fit and compute; because PCA will take so long, I just want a prediction at the end of this. We can worry about using a more sophisticated LDA/QDA/SVM model later.

+

Now into the actual process: We're going to test out PCA dimensionality reduction from 1 - 20 dimensions, and then predict using a Gaussian Naive Bayes model. The 20 dimensions upper limit was selected because the accuracy never improves after you get beyond that (I found out by running it myself). Hopefully, we'll find that we can create a model better than the naive guess.

+
from sklearn.naive_bayes import GaussianNB
from sklearn.decomposition import PCA

X_no_bin = X.drop(b_class, 1)

def evaluate_gnb(dims):
pca = PCA(n_components=dims)
X_xform = pca.fit_transform(X_no_bin)

gnb = GaussianNB()
gnb.fit(X_xform, y)
return gnb.score(X_xform, y)

dim_range = np.arange(1, 21)
plt.plot(dim_range, [evaluate_gnb(dim) for dim in dim_range], label="Gaussian NB Accuracy")
plt.axhline(naive_guess, label="Naive Guess", c='k')
plt.axhline(1 - naive_guess, label="Inverse Naive Guess", c='k')
plt.gcf().set_size_inches(12, 6)
plt.legend();
+

png

+

sigh... After all the effort and computational power, we're still at square one: we have yet to beat out the naive guess threshold. With PCA in play we end up performing terribly, but not terribly enough that we can guess against ourselves.

+

Let's try one last-ditch attempt using the entire data set:

+
def evaluate_gnb_full(dims):
pca = PCA(n_components=dims)
X_xform = pca.fit_transform(X)

gnb = GaussianNB()
gnb.fit(X_xform, y)
return gnb.score(X_xform, y)

dim_range = np.arange(1, 21)
plt.plot(dim_range, [evaluate_gnb(dim) for dim in dim_range], label="Gaussian NB Accuracy")
plt.axhline(naive_guess, label="Naive Guess", c='k')
plt.axhline(1 - naive_guess, label="Inverse Naive Guess", c='k')
plt.gcf().set_size_inches(12, 6)
plt.legend();
+

png

+

Nothing. It is interesting to note that the graphs are almost exactly the same: This would imply again that the variables we removed earlier (all the binary classifiers) indeed have almost no predictive power. It seems this problem is high-dimensional, but with almost no data that can actually inform our decisions.

+

Summary for Day 1

+

After spending a couple hours with this dataset, there seems to be a fundamental issue in play: We have very high-dimensional data, and it has no bearing on our ability to actually predict customer satisfaction. This can be a huge issue: it implies that no matter what model we use, we fundamentally can't perform well. I'm sure most of this is because I'm not an experienced data scientist. Even so, we have yet to develop a strategy that can actually beat out the village idiot; so far, the bank is best off just assuming all its customers are satisfied. Hopefully more to come soon.

+
end = datetime.now()
print("Running time: {}".format(end - start))
+
    Running time: 0:00:58.715714
+

Appendix

+

Code used to split the initial training data:

+
from sklearn.cross_validation import train_test_split
data = pd.read_csv('train.csv')
data.index = data.ID

data_train, data_validate = train_test_split(
data, train_size=.7)

data_train.to_csv('split_train.csv')
data_validate.to_csv('split_validate.csv')
\ No newline at end of file diff --git a/2016/03/tweet-like-me/index.html b/2016/03/tweet-like-me/index.html new file mode 100644 index 0000000..a1dbf92 --- /dev/null +++ b/2016/03/tweet-like-me/index.html @@ -0,0 +1,59 @@ +Tweet like me | The Old Speice Guy
Skip to main content

Tweet like me

· 9 min read
Bradlee Speice

In which I try to create a robot that will tweet like I tweet.

+

So, I'm taking a Machine Learning course this semester in school, and one of the topics we keep coming back to is natural language processing and the 'bag of words' data structure. That is, given a sentence:

+

How much wood would a woodchuck chuck if a woodchuck could chuck wood?

+

We can represent that sentence as the following list:

+

{ How: 1 much: 1 wood: 2 would: 2 a: 2 woodchuck: 2 chuck: 2 if: 1 }

+

Ignoring where the words happened, we're just interested in how often the words occurred. That got me thinking: I wonder what would happen if I built a robot that just imitated how often I said things? It's dangerous territory when computer scientists ask "what if," but I got curious enough I wanted to follow through.

+

The Objective

+

Given an input list of Tweets, build up the following things:

+
    +
  1. The distribution of starting words; since there are no "prior" words to go from, we need to treat this as a special case.
  2. +
  3. The distribution of words given a previous word; for example, every time I use the word woodchuck in the example sentence, there is a 50% chance it is followed by chuck and a 50% chance it is followed by could. I need this distribution for all words.
  4. +
  5. The distribution of quantity of hashtags; Do I most often use just one? Two? Do they follow something like a Poisson distribution?
  6. +
  7. Distribution of hashtags; Given a number of hashtags, what is the actual content? I'll treat hashtags as separate from the content of a tweet.
  8. +
+

The Data

+

I'm using as input my tweet history. I don't really use Twitter anymore, but it seems like a fun use of the dataset. I'd like to eventually build this to a point where I can imitate anyone on Twitter using their last 100 tweets or so, but I'll start with this as example code.

+

The Algorithm

+

I'll be using the NLTK library for doing a lot of the heavy lifting. First, let's import the data:

+
import pandas as pd

tweets = pd.read_csv('tweets.csv')
text = tweets.text

# Don't include tweets in reply to or mentioning people
replies = text.str.contains('@')
text_norep = text.loc[~replies]
+

And now that we've got data, let's start crunching. First, tokenize and build out the distribution of first word:

+
from nltk.tokenize import TweetTokenizer
tknzr = TweetTokenizer()
tokens = text_norep.map(tknzr.tokenize)

first_words = tokens.map(lambda x: x[0])
first_words_alpha = first_words[first_words.str.isalpha()]
first_word_dist = first_words_alpha.value_counts() / len(first_words_alpha)
+

Next, we need to build out the conditional distributions. That is, what is the probability of the next word given the current word is XX? This one is a bit more involved. First, find all unique words, and then find what words proceed them. This can probably be done in a more efficient manner than I'm currently doing here, but we'll ignore that for the moment.

+
from functools import reduce

# Get all possible words
all_words = reduce(lambda x, y: x+y, tokens, [])
unique_words = set(all_words)
actual_words = set([x if x[0] != '.' else None for x in unique_words])

word_dist = {}
for word in iter(actual_words):
indices = [i for i, j in enumerate(all_words) if j == word]
proceeding = [all_words[i+1] for i in indices]
word_dist[word] = proceeding
+

Now that we've got the tweet analysis done, it's time for the fun part: hashtags! Let's count how many hashtags are in each tweet, I want to get a sense of the distribution.

+
import matplotlib.pyplot as plt
%matplotlib inline

hashtags = text_norep.str.count('#')
bins = hashtags.unique().max()
hashtags.plot(kind='hist', bins=bins)
+
    <matplotlib.axes._subplots.AxesSubplot at 0x18e59dc28d0>
+

png

+

That looks like a Poisson distribution, kind of as I expected. I'm guessing my number of hashtags per tweet is Poi(1)\sim Poi(1), but let's actually find the most likely estimator which in this case is just λˉ\bar{\lambda}:

+
mle = hashtags.mean()
mle
+
    0.870236869207003
+

Pretty close! So we can now simulate how many hashtags are in a tweet. Let's also find what hashtags are actually used:

+
hashtags = [x for x in all_words if x[0] == '#']
n_hashtags = len(hashtags)

unique_hashtags = list(set([x for x in unique_words if x[0] == '#']))
hashtag_dist = pd.DataFrame({'hashtags': unique_hashtags,
'prob': [all_words.count(h) / n_hashtags
for h in unique_hashtags]})
len(hashtag_dist)
+
    603
+

Turns out I have used 603 different hashtags during my time on Twitter. That means I was using a unique hashtag for about every third tweet.

+

In better news though, we now have all the data we need to go about actually constructing tweets! The process will happen in a few steps:

+
    +
  1. Randomly select what the first word will be.
  2. +
  3. Randomly select the number of hashtags for this tweet, and then select the actual hashtags.
  4. +
  5. Fill in the remaining space of 140 characters with random words taken from my tweets.
  6. +
+

And hopefully, we won't have anything too crazy come out the other end. The way we do the selection follows a Multinomial Distribution: given a lot of different values with specific probability, pick one. Let's give a quick example:

+
x: .33
y: .5
z: .17
+

That is, I pick x with probability 33%, y with probability 50%, and so on. In context of our sentence construction, I've built out the probabilities of specific words already - now I just need to simulate that distribution. Time for the engine to actually be developed!

+
import numpy as np

def multinom_sim(n, vals, probs):
occurrences = np.random.multinomial(n, probs)
results = occurrences * vals
return ' '.join(results[results != ''])

def sim_n_hashtags(hashtag_freq):
return np.random.poisson(hashtag_freq)

def sim_hashtags(n, hashtag_dist):
return multinom_sim(n, hashtag_dist.hashtags, hashtag_dist.prob)

def sim_first_word(first_word_dist):
probs = np.float64(first_word_dist.values)
return multinom_sim(1, first_word_dist.reset_index()['index'], probs)

def sim_next_word(current, word_dist):
dist = pd.Series(word_dist[current])
probs = np.ones(len(dist)) / len(dist)
return multinom_sim(1, dist, probs)
+

Pulling it all together

+

I've now built out all the code I need to actually simulate a sentence written by me. Let's try doing an example with five words and a single hashtag:

+
first = sim_first_word(first_word_dist)
second = sim_next_word(first, word_dist)
third = sim_next_word(second, word_dist)
fourth = sim_next_word(third, word_dist)
fifth = sim_next_word(fourth, word_dist)
hashtag = sim_hashtags(1, hashtag_dist)

' '.join((first, second, third, fourth, fifth, hashtag))
+
    'My first all-nighter of friends #oldschool'
+

Let's go ahead and put everything together! We're going to simulate a first word, simulate the hashtags, and then simulate to fill the gap until we've either taken up all the space or reached a period.

+
def simulate_tweet():
chars_remaining = 140
first = sim_first_word(first_word_dist)
n_hash = sim_n_hashtags(mle)
hashtags = sim_hashtags(n_hash, hashtag_dist)

chars_remaining -= len(first) + len(hashtags)

tweet = first
current = first
while chars_remaining > len(tweet) + len(hashtags) and current[0] != '.' and current[0] != '!':
current = sim_next_word(current, word_dist)
tweet += ' ' + current

tweet = tweet[:-2] + tweet[-1]

return ' '.join((tweet, hashtags)).strip()
+

The results

+

And now for something completely different: twenty random tweets dreamed up by my computer and my Twitter data. Here you go:

+
for i in range(0, 20):
print(simulate_tweet())
print()
+
    Also , I'm at 8 this morning. #thursdaysgohard #ornot

Turns out of us breathe the code will want to my undergraduate career is becoming more night trying ? Religion is now as a chane #HYPE

You know what recursion is to review the UNCC. #ornot

There are really sore 3 bonfires in my first writing the library ground floor if awesome. #realtalk #impressed

So we can make it out there's nothing but I'm not let us so hot I could think I may be good. #SwingDance

Happy Christmas , at Harris Teeter to be be godly or Roman Catholic ). #4b392b#4b392b #Isaiah26

For context , I in the most decisive factor of the same for homework. #accomplishment

Freaking done. #loveyouall

New blog post : Don't jump in a quiz in with a knife fight. #haskell #earlybirthday

God shows me legitimately want to get some food and one day.

Stormed the queen city. #mindblown

The day of a cold at least outside right before the semester ..

Finished with the way back. #winners

Waking up , OJ , I feel like Nick Jonas today.

First draft of so hard drive. #humansvszombies

Eric Whitacre is the wise creation.

Ethics paper first , music in close to everyone who just be posting up with my sin , and Jerry Springr #TheLittleThings

Love that you know enough time I've eaten at 8 PM. #deepthoughts #stillblownaway

Lead. #ThinkingTooMuch #Christmas

Aamazing conference when you married #DepartmentOfRedundancyDepartment Yep , but there's a legitimate challenge.
+

...Which all ended up being a whole lot more nonsensical than I had hoped for. There are some good ones, so I'll call that an accomplishment! I was banking on grammar not being an issue: since my tweets use impeccable grammar, the program modeled off them should have pretty good grammar as well. There are going to be some hilarious edge cases (I'm looking at you, Ethics paper first, music in close to everyone) that make no sense, and some hilarious edge cases (Waking up, OJ, I feel like Nick Jonas today) that make me feel like I should have a Twitter rap career. On the whole though, the structure came out alright.

+

Moving on from here

+

During class we also talked about an interesting idea: trying to analyze corporate documents and corporate speech. I'd be interested to know what this analysis applied to something like a couple of bank press releases could do. By any means, the code needs some work to clean it up before I get that far.

+

For further reading

+

I'm pretty confident I re-invented a couple wheels along the way - what I'm doing feels a lot like what Markov Chain Monte Carlo is intended to do. But I've never worked explicitly with that before, so more research is needed.

\ No newline at end of file diff --git a/2016/04/tick-tock/index.html b/2016/04/tick-tock/index.html new file mode 100644 index 0000000..cd1113e --- /dev/null +++ b/2016/04/tick-tock/index.html @@ -0,0 +1,83 @@ +Tick tock... | The Old Speice Guy
Skip to main content

Tick tock...

· 11 min read
Bradlee Speice

If all we have is a finite number of heartbeats left, what about me?

+

Warning: this one is a bit creepier. But that's what you get when you come up with data science ideas as you're drifting off to sleep.

+

2.5 Billion

+

If PBS is right, that's the total number of heartbeats we get. Approximately once every second that number goes down, and down, and down again...

+
total_heartbeats = 2500000000
+

I got a Fitbit this past Christmas season, mostly because I was interested in the data and trying to work on some data science projects with it. This is going to be the first project, but there will likely be more (and not nearly as morbid). My idea was: If this is the final number that I'm running up against, how far have I come, and how far am I likely to go? I've currently had about 3 months' time to estimate what my data will look like, so let's go ahead and see: given a lifetime 2.5 billion heart beats, how much time do I have left?

+

Statistical Considerations

+

Since I'm starting to work with health data, there are a few considerations I think are important before I start digging through my data.

+
    +
  1. The concept of 2.5 billion as an agreed-upon number is tenuous at best. I've seen anywhere from 2.21 billion to 3.4 billion so even if I knew exactly how many times my heart had beaten so far, the ending result is suspect at best. I'm using 2.5 billion because that seems to be about the midpoint of the estimates I've seen so far.
  2. +
  3. Most of the numbers I've seen so far are based on extrapolating number of heart beats from life expectancy. As life expectancy goes up, the number of expected heart beats goes up too.
  4. +
  5. My estimation of the number of heartbeats in my life so far is based on 3 months worth of data, and I'm extrapolating an entire lifetime based on this.
  6. +
+

So while the ending number is not useful in any medical context, it is still an interesting project to work with the data I have on hand.

+

Getting the data

+

Fitbit has an API available for people to pull their personal data off the system. It requires registering an application, authentication with OAuth, and some other complicated things. If you're not interested in how I fetch the data, skip here.

+

Registering an application

+

I've already registered a personal application with Fitbit, so I can go ahead and retrieve things like the client secret from a file.

+
# Import all the OAuth secret information from a local file
from secrets import CLIENT_SECRET, CLIENT_ID, CALLBACK_URL
+

Handling OAuth 2

+

So, all the people that know what OAuth 2 is know what's coming next. For those who don't: OAuth is how people allow applications to access other data without having to know your password. Essentially the dialog goes like this:

+
Application: I've got a user here who wants to use my application, but I need their data.
Fitbit: OK, what data do you need access to, and for how long?
Application: I need all of these scopes, and for this amount of time.
Fitbit: OK, let me check with the user to make sure they really want to do this.

Fitbit: User, do you really want to let this application have your data?
User: I do! And to prove it, here's my password.
Fitbit: OK, everything checks out. I'll let the application access your data.

Fitbit: Application, you can access the user's data. Use this special value whenever you need to request data from me.
Application: Thank you, now give me all the data.
+

Effectively, this allows an application to gain access to a user's data without ever needing to know the user's password. That way, even if the other application is hacked, the user's original data remains safe. Plus, the user can let the data service know to stop providing the application access any time they want. All in all, very secure.

+

It does make handling small requests a bit challenging, but I'll go through the steps here. We'll be using the Implicit Grant workflow, as it requires fewer steps in processing.

+

First, we need to set up the URL the user would visit to authenticate:

+
import urllib

FITBIT_URI = 'https://www.fitbit.com/oauth2/authorize'
params = {
# If we need more than one scope, must be a CSV string
'scope': 'heartrate',
'response_type': 'token',
'expires_in': 86400, # 1 day
'redirect_uri': CALLBACK_URL,
'client_id': CLIENT_ID
}

request_url = FITBIT_URI + '?' + urllib.parse.urlencode(params)
+

Now, here you would print out the request URL, go visit it, and get the full URL that it sends you back to. Because that is very sensitive information (specifically containing my CLIENT_ID that I'd really rather not share on the internet), I've skipped that step in the code here, but it happens in the background.

+
# The `response_url` variable contains the full URL that
# FitBit sent back to us, but most importantly,
# contains the token we need for authorization.
access_token = dict(urllib.parse.parse_qsl(response_url))['access_token']
+

Requesting the data

+

Now that we've actually set up our access via the access_token, it's time to get the actual heart rate data. I'll be using data from January 1, 2016 through March 31, 2016, and extrapolating wildly from that.

+

Fitbit only lets us fetch intraday data one day at a time, so I'll create a date range using pandas and iterate through that to pull down all the data.

+
from requests_oauthlib import OAuth2Session
import pandas as pd
from datetime import datetime

session = OAuth2Session(token={
'access_token': access_token,
'token_type': 'Bearer'
})

format_str = '%Y-%m-%d'
start_date = datetime(2016, 1, 1)
end_date = datetime(2016, 3, 31)
dr = pd.date_range(start_date, end_date)

url = 'https://api.fitbit.com/1/user/-/activities/heart/date/{0}/1d/1min.json'
hr_responses = [session.get(url.format(d.strftime(format_str))) for d in dr]

def record_to_df(record):
if 'activities-heart' not in record:
return None
date_str = record['activities-heart'][0]['dateTime']
df = pd.DataFrame(record['activities-heart-intraday']['dataset'])

df.index = df['time'].apply(
lambda x: datetime.strptime(date_str + ' ' + x, '%Y-%m-%d %H:%M:%S'))
return df

hr_dataframes = [record_to_df(record.json()) for record in hr_responses]
hr_df_concat = pd.concat(hr_dataframes)


# There are some minutes with missing data, so we need to correct that
full_daterange = pd.date_range(hr_df_concat.index[0],
hr_df_concat.index[-1],
freq='min')
hr_df_full = hr_df_concat.reindex(full_daterange, method='nearest')

print("Heartbeats from {} to {}: {}".format(hr_df_full.index[0],
hr_df_full.index[-1],
hr_df_full['value'].sum()))
+
    Heartbeats from 2016-01-01 00:00:00 to 2016-03-31 23:59:00: 8139060
+

And now we've retrieved all the available heart rate data for January 1st through March 31st! Let's get to the actual analysis.

+

Wild Extrapolations from Small Data

+

A fundamental issue of this data is that it's pretty small. I'm using 3 months of data to make predictions about my entire life. But, purely as an exercise, I'll move forward.

+

How many heartbeats so far?

+

The first step is figuring out how many of the 2.5 billion heartbeats I've used so far. We're going to try and work backward from the present day to when I was born to get that number. The easy part comes first: going back to January 1st, 1992. That's because I can generalize how many 3-month increments there were between now and then, account for leap years, and call that section done.

+

Between January 1992 and January 2016 there were 96 quarters, and 6 leap days. The number we're looking for is:

+hrqnhrd(nm)\begin{equation*} +hr_q \cdot n - hr_d \cdot (n-m) +\end{equation*} +
    +
  • hrqhr_q: Number of heartbeats per quarter
  • +
  • hrdhr_d: Number of heartbeats on leap day
  • +
  • nn: Number of quarters, in this case 96
  • +
  • mm: Number of leap days, in this case 6
  • +
+
quarterly_count = hr_df_full['value'].sum()
leap_day_count = hr_df_full[(hr_df_full.index.month == 2) &
(hr_df_full.index.day == 29)]['value'].sum()
num_quarters = 96
leap_days = 6

jan_92_jan_16 = quarterly_count * num_quarters - leap_day_count * (num_quarters - leap_days)
jan_92_jan_16
+
    773609400
+

So between January 1992 and January 2016 I've used \approx 774 million heartbeats. Now, I need to go back to my exact birthday. I'm going to first find on average how many heartbeats I use in a minute, and multiply that by the number of minutes between my birthday and January 1992.

+

For privacy purposes I'll put the code here that I'm using, but without any identifying information:

+
minute_mean = hr_df_full['value'].mean()
# Don't you wish you knew?
# birthday_minutes = ???

birthday_heartbeats = birthday_minutes * minute_mean

heartbeats_until_2016 = int(birthday_heartbeats + jan_92_jan_16)
remaining_2016 = total_heartbeats - heartbeats_until_2016

print("Heartbeats so far: {}".format(heartbeats_until_2016))
print("Remaining heartbeats: {}".format(remaining_2016))
+
    Heartbeats so far: 775804660
Remaining heartbeats: 1724195340
+

It would appear that my heart has beaten 775,804,660 times between my moment of birth and January 1st 2016, and that I have 1.72 billion left.

+

How many heartbeats longer?

+

Now comes the tricky bit. I know how many heart beats I've used so far, and how many I have remaining, so I'd like to come up with a (relatively) accurate estimate of when exactly my heart should give out. We'll do this in a few steps, increasing in granularity.

+

First step, how many heartbeats do I use in a 4-year period? I have data for a single quarter including leap day, so I want to know:

+hrqnhrd(nm)\begin{equation*} +hr_q \cdot n - hr_d \cdot (n - m) +\end{equation*} +
    +
  • hrqhr_q: Heartbeats per quarter
  • +
  • hrdhr_d: Heartbeats per leap day
  • +
  • nn: Number of quarters = 16
  • +
  • mm: Number of leap days = 1
  • +
+
heartbeats_4year = quarterly_count * 16 - leap_day_count * (16 - 1)
heartbeats_4year
+
    128934900
+

Now, I can fast forward from 2016 the number of periods of 4 years I have left.

+
four_year_periods = remaining_2016 // heartbeats_4year
remaining_4y = remaining_2016 - four_year_periods * heartbeats_4year

print("Four year periods remaining: {}".format(four_year_periods))
print("Remaining heartbeats after 4 year periods: {}".format(remaining_4y))
+
    Four year periods remaining: 13
Remaining heartbeats after 4 year periods: 48041640
+

Given that there are 13 four-year periods left, I can move from 2016 all the way to 2068, and find that I will have 48 million heart beats left. Let's drop down to figuring out how many quarters that is. I know that 2068 will have a leap day (unless someone finally decides to get rid of them), so I'll subtract that out first. Then, I'm left to figure out how many quarters exactly are left.

+
remaining_leap = remaining_4y - leap_day_count
# Ignore leap day in the data set
heartbeats_quarter = hr_df_full[(hr_df_full.index.month != 2) &
(hr_df_full.index.day != 29)]['value'].sum()
quarters_left = remaining_leap // heartbeats_quarter
remaining_year = remaining_leap - quarters_left * heartbeats_quarter

print("Quarters left starting 2068: {}".format(quarters_left))
print("Remaining heartbeats after that: {}".format(remaining_year))
+
    Quarters left starting 2068: 8
Remaining heartbeats after that: 4760716
+

So, that analysis gets me through until January 1st 2070. Final step, using that minute estimate to figure out how many minutes past that I'm predicted to have:

+
from datetime import timedelta

base = datetime(2070, 1, 1)
minutes_left = remaining_year // minute_mean

kaput = timedelta(minutes=minutes_left)
base + kaput
+
    datetime.datetime(2070, 2, 23, 5, 28)
+

According to this, I've got until February 23rd, 2070 at 5:28 PM in the evening before my heart gives out.

+

Summary

+

Well, that's kind of a creepy date to know. As I said at the top though, this number is totally useless in any medical context. It ignores the rate at which we continue to get better at making people live longer, and is extrapolating from 3 months' worth of data the rest of my life. Additionally, throughout my time developing this post I made many minor mistakes. I think they're all fixed now, but it's easy to mix a number up here or there and the analysis gets thrown off by a couple years.

+

Even still, I think philosophically humans have a desire to know how much time we have left in the world. Man is but a breath, and it's scary to think just how quickly that date may be coming up. This analysis asks an important question though: what are you going to do with the time you have left?

+

Thanks for sticking with me on this one, I promise it will be much less depressing next time!

\ No newline at end of file diff --git a/2016/05/the-unfair-casino/index.html b/2016/05/the-unfair-casino/index.html new file mode 100644 index 0000000..741d3c7 --- /dev/null +++ b/2016/05/the-unfair-casino/index.html @@ -0,0 +1,180 @@ +The unfair casino | The Old Speice Guy
Skip to main content

The unfair casino

· 15 min read
Bradlee Speice

Trying to figure out how exactly two dice are loaded in a cheating casino.

+

In the ongoing eternal cycle of mathematicians asking generally useless questions about probability, I dreamt up another one. The scenario is as follows:

+

You're playing a game with two die, and you do not get to see what the outcome of the die are on each roll. All you get to see is their sum. Given an arbitrarily long list of the sum of two rolls, can you determine if one or both die are loaded, and what those loadings are?

+

Proving we can detect cheating

+

My first question is simply, is this possible? There's a lot of trivial cases that make it obvious that there's cheating going on. But there are some edge cases that might give us more difficulty. First though, let's get a picture of what the fair distribution looks like. In principle, we can only detect cheating if the distribution of the fair die differs from the distribution of the loaded die.

+
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline

fair_1 = np.random.randint(1, 7, 10000)
fair_2 = np.random.randint(1, 7, 10000)

pd.Series(fair_1 + fair_2).plot(kind='hist', bins=11);
plt.title('Fair Distribution');
+

png

+

This distribution makes sense: there are many ways to make a 7 (the most frequent observed value) and very few ways to make a 12 or 2; an important symmetry. As a special note, you can notice that the sum of two fair dice is a discrete case of the Triangle Distribution, which is itself a special case of the Irwin-Hall Distribution.

+

The Edge Cases

+

Given that we understand how the results of two fair dice are distributed, let's see some of the interesting edge cases that come up. This will give us assurance that when a casino is cheating, it is detectable (given sufficient data). To make this as hard as possible, we will think of scenarios where the expected value of the sum of loaded dice is the same as the expected value of the sum of fair dice.

+

Edge Case 1

+

What happens when one die is biased low, and one die is biased high? That is, where:

+D1={1w.p.1/32w.p.1/33w.p.1/124w.p.1/125w.p.1/126w.p.1/12D2={1w.p.1/122w.p.1/123w.p.1/124w.p.1/125w.p.1/36w.p.1/3E[D1]=2.5E[D2]=4.5E[D1+D2]=7=E[Dfair+Dfair]\begin{align*} +\begin{array}{cc} +D_1 = \left\{ +\begin{array}{lr} +1 & w.p. 1/3\\ +2 & w.p. 1/3\\ +3 & w.p. 1/12\\ +4 & w.p. 1/12\\ +5 & w.p. 1/12\\ +6 & w.p. 1/12 +\end{array} +\right. & +D_2 = \left\{ +\begin{array}{lr} +1 & w.p. 1/12\\ +2 & w.p. 1/12\\ +3 & w.p. 1/12\\ +4 & w.p. 1/12\\ +5 & w.p. 1/3\\ +6 & w.p. 1/3 +\end{array} +\right. \\ +\mathbb{E}[D_1] = 2.5 & \mathbb{E}[D_2] = 4.5 +\end{array}\\ +\mathbb{E}[D_1 + D_2] = 7 = \mathbb{E}[D_{fair} + D_{fair}] +\end{align*} +
def unfair_die(p_vals, n):
x = np.random.multinomial(1, p_vals, n)
return x.nonzero()[1] + 1

d1 = [1/3, 1/3, 1/12, 1/12, 1/12, 1/12]
d2 = [1/12, 1/12, 1/12, 1/12, 1/3, 1/3]

x1 = unfair_die(d1, 10000)
x2 = unfair_die(d2, 10000)

pd.Series(x1 + x2).plot(kind='hist', bins=11);
plt.title('$D_1$ biased low, $D_2$ biased high');
+

png

+

We can see that while the 7 value remains the most likely (as expected), the distribution is not so nicely shaped any more.

+

Edge Case 2

+

When one die is loaded low, and one is loaded high, we've seen how we can detect them. How about when two die are loaded both low and high? That is, we have the following distribution:

+D1={1w.p.1/32w.p.1/123w.p.1/124w.p.1/125w.p.1/126w.p.1/3D2={1w.p.1/32w.p.1/123w.p.1/124w.p.1/125w.p.1/126w.p.1/3E[D1]=3.5E[D2]=3.5E[D1+D2]=7=E[Dfair+Dfair]\begin{align*} +\begin{array}{cc} +D_1 = \left\{ +\begin{array}{lr} +1 & w.p. 1/3\\ +2 & w.p. 1/12\\ +3 & w.p. 1/12\\ +4 & w.p. 1/12\\ +5 & w.p. 1/12\\ +6 & w.p. 1/3 +\end{array} +\right. & +D_2 = \left\{ +\begin{array}{lr} +1 & w.p. 1/3\\ +2 & w.p. 1/12\\ +3 & w.p. 1/12\\ +4 & w.p. 1/12\\ +5 & w.p. 1/12\\ +6 & w.p. 1/3 +\end{array} +\right. \\ +\mathbb{E}[D_1] = 3.5 & \mathbb{E}[D_2] = 3.5 +\end{array}\\ +\mathbb{E}[D_1 + D_2] = 7 = \mathbb{E}[D_{fair} + D_{fair}] +\end{align*} +

We can see even that the expected value of each individual die is the same as the fair die! However, the distribution (if we are doing this correctly) should still be skewed:

+
d1 = [1/3, 1/12, 1/12, 1/12, 1/12, 1/3]
d2 = d1

x1 = unfair_die(d1, 10000)
x2 = unfair_die(d2, 10000)

pd.Series(x1 + x2).plot(kind='hist', bins=11)
plt.title("$D_1$ and $D_2$ biased to 1 and 6");
+

png

+

In a very un-subtle way, we have of course made the values 2 and 12 far more likely.

+

Detection Conclusion

+

There are some trivial examples of cheating that are easy to detect: whenever the expected value of the sum of two fair dice deviates from the expected value for the sum of two fair dice, we can immediately conclude that there is cheating at stake.

+

The interesting edge cases occur when the expected value of the sum of loaded dice matches the expected value of the sum of fair dice. Considering the above examples (and a couple more I ran through in developing this), we have seen that in every circumstance having two unfair dice leads to a distribution of results different from the fair results.

+

We can thus finally state: just by looking at the distribution of results from this game, we can immediately conclude whether there is cheating.

+

Simulated Annealing

+

What we really would like to do though, is see if there is any way to determine how exactly the dice are loaded. This is significantly more complicated, but we can borrow some algorithms from Machine Learning to figure out exactly how to perform this process. I'm using the Simulated Annealing algorithm, and I discuss why this works and why I chose it over some of the alternatives in the justification. If you don't care about how I set up the model and just want to see the code, check out the actual code.

+

Simulated Annealing is a variation of the Metropolis-Hastings Algorithm, but the important thing for us is: Simulated Annealing allows us to quickly optimize high-dimensional problems. But what exactly are we trying to optimize? Ideally, we want a function that can tell us whether one distribution for the dice better explains the results than another distribution. This is known as the likelihood function.

+

Deriving the Likelihood function

+

To derive our likelihood function, we want to know: what is the probability of seeing a specific result given those hidden parameters? This is actually a surprisingly difficult problem. While we can do a lot of calculations by hand, we need a more general solution since we will be working with very some interesting die distributions.

+

We first note that the sum of two dice can take on 11 different values - 2 through 12. This implies that each individual sum follows a Categorical distribution. That is:

+L(x)={p2x=2p3x=3p11x=11p12x=12\begin{align*} +\mathcal{L(x)} = \left\{ +\begin{array}{lr} +p_2 & x = 2\\ +p_3 & x = 3\\ +\ldots & \\ +p_{11} & x = 11\\ +p_{12} & x = 12 +\end{array} +\right. +\end{align*} +

Where each pip_i is the probability of seeing that specific result. However, we need to calculate what each probability is! I'll save you the details, but this author explains how to do it.

+

Now, we would like to know the likelihood of our entire data-set. This is trivial:

+L(X)=i=1nL(x)\begin{align*} +\mathcal{L(\mathbf{X})} &= \prod_{i=1}^n L(x) +\end{align*} +

However, it's typically much easier to work with the log(L)\log(\mathcal{L}) function instead. This is critically important from a computational perspective: when you multiply so many small numbers together (i.e. the product of L(x)L(x) terms) the computer suffers from rounding error; if we don't control for this, we will find that no matter the distributions we choose for each die, the "likelihood" will be close to zero because the computer is not precise enough.

+log(L)=i=1nlog(L)\begin{align*} +\log(\mathcal{L}) &= \sum_{i=1}^n \log(L) +\end{align*} +

The process of Simulated Annealing

+

The means by which we optimize our likelihood function is the simulated annealing algorithm. The way it works is as follows:

+
    +
  1. +

    Start with a random guess for the parameters we are trying to optimize. In our case we are trying to guess the distribution of two dice, and so we "optimize" until we have a distribution that matches the data.

    +
  2. +
  3. +

    For each iteration of the algorithm:

    +
      +
    1. Generate a new "proposed" set of parameters based on the current parameters - +i.e. slightly modify the current parameters to get a new set of parameters.
    2. +
    3. Calculate the value of log(L)\log(\mathcal{L}) for each set of parameters. If the function value for the +proposed parameter set is higher than for the current, automatically switch to the new parameter set +and continue the next iteration.
    4. +
    5. Given the new parameter set performs worse, determine a probability of switching to the new parameter set anyways: P(pcurrent,pproposed)\mathcal{P}(p_{current}, p_{proposed})
    6. +
    7. Switch to the new parameter set with probability P\mathcal{P}. If you fail to switch, begin the next iteration.
    8. +
    +
  4. +
  5. +

    The algorithm is complete after we fail to make a transition nn times in a row.

    +
  6. +
+

If everything goes according to plan, we will have a value that is close to the true distribution of each die.

+

The actual code

+

We start by defining the score function. This will tell us how well the proposed die densities actually explain the results.

+
import numpy as np
from numpy import polynomial

def density_coef(d1_density, d2_density):
# Calculating the probabilities of each outcome was taken
# from this author: http://math.stackexchange.com/a/1710392/320784
d1_p = polynomial.Polynomial(d1_density)
d2_p = polynomial.Polynomial(d2_density)
coefs = (d1_p * d2_p).coef
return coefs

def score(x, d1_density, d2_density):
# We've now got the probabilities of each event, but we need
# to shift the array a bit so we can use the x values to actually
# index into it. This will allow us to do all the calculations
# incredibly quickly
coefs = density_coef(d1_density, d2_density)
coefs = np.hstack((0, 0, coefs))
return np.log(coefs[x]).sum()
+

Afterward, we need to write something to permute the proposal densities. We make random modifications, and eventually the best one survives.

+
def permute(d1_density, d2_density):
# To ensure we have legitimate densities, we will randomly
# increase one die face probability by `change`,
# and decrease one by `change`.
# This means there are something less than (1/`change`)^12 possibilities
# we are trying to search over.
change = .01

d1_index1, d1_index2 = np.random.randint(0, 6, 2)
d2_index1, d2_index2 = np.random.randint(0, 6, 2)

# Also make sure to copy. I've had some weird aliasing issues
# in the past that made everything blow up.
new_d1 = np.float64(np.copy(d1_density))
new_d2 = np.float64(np.copy(d2_density))

# While this doesn't account for the possibility that some
# values go negative, in practice this never happens
new_d1[d1_index1] += change
new_d1[d1_index2] -= change
new_d2[d2_index1] += change
new_d2[d2_index2] -= change

return new_d1, new_d2
+

Now we've got the main algorithm code to do. This is what brings all the pieces together.

+
def optimize(data, conv_count=10, max_iter=1e4):
switch_failures = 0
iter_count = 0

# Start with guessing fair dice
cur_d1 = np.repeat(1/6, 6)
cur_d2 = np.repeat(1/6, 6)
cur_score = score(data, cur_d1, cur_d2)

# Keep track of our best guesses - may not be
# what we end with
max_score = cur_score
max_d1 = cur_d1
max_d2 = cur_d2

# Optimization stops when we have failed to switch `conv_count`
# times (presumably because we have a great guess), or we reach
# the maximum number of iterations.
while switch_failures < conv_count and iter_count < max_iter:
iter_count += 1
if iter_count % (max_iter / 10) == 0:
print('Iteration: {}; Current score (higher is better): {}'.format(
iter_count, cur_score))

new_d1, new_d2 = permute(cur_d1, cur_d2)
new_score = score(data, new_d1, new_d2)

if new_score > max_score:
max_score = new_score
max_d1 = new_d1
max_d2 = new_d2

if new_score > cur_score:
# If the new permutation beats the old one,
# automatically select it.
cur_score = new_score
cur_d1 = new_d1
cur_d2 = new_d2
switch_failures = 0
else:
# We didn't beat the current score, but allow
# for possibly switching anyways.
accept_prob = np.exp(new_score - cur_score)
coin_toss = np.random.rand()
if coin_toss < accept_prob:
# We randomly switch to the new distribution
cur_score = new_score
cur_d1 = new_d1
cur_d2 = new_d2
switch_failures = 0
else:
switch_failures += 1

# Return both our best guess, and the ending guess
return max_d1, max_d2, cur_d1, cur_d2
+

And now we have finished the hard work!

+

Catching the Casino

+

Let's go through a couple of scenarios and see if we can catch the casino cheating with some loaded dice. In every scenario we start with an assumption of fair dice, and then try our hand to figure out what the actual distribution was.

+

Attempt 1

+

The casino is using two dice that are both biased low. How well can we recover the distribution?

+
import time
def simulate_casino(d1_dist, d2_dist, n=10000):
d1_vals = unfair_die(d1_dist, n)
d2_vals = unfair_die(d2_dist, n)

start = time.perf_counter()
max_d1, max_d2, final_d1, final_d2 = optimize(d1_vals + d2_vals)
end = time.perf_counter()
print("Simulated Annealing time: {:.02f}s".format(end - start))

coef_range = np.arange(2, 13) - .5
plt.subplot(221)
plt.bar(coef_range, density_coef(d1_dist, d2_dist), width=1)
plt.title('True Distribution')

plt.subplot(222)
plt.hist(d1_vals + d2_vals, bins=11)
plt.title('Empirical Distribution')

plt.subplot(223)
plt.bar(coef_range, density_coef(max_d1, max_d2), width=1)
plt.title('Recovered Distribution')

plt.gcf().set_size_inches(10, 10)


simulate_casino([2/9, 2/9, 2/9, 1/9, 1/9, 1/9],
[2/9, 2/9, 2/9, 1/9, 1/9, 1/9])
+
    Iteration: 1000; Current score (higher is better): -22147.004400281654
Simulated Annealing time: 0.30s
+

png

+

Attempt 2

+

The casino now uses dice that are both biased towards 1 and 6.

+
simulate_casino([1/3, 1/12, 1/12, 1/12, 1/12, 1/3],
[1/3, 1/12, 1/12, 1/12, 1/12, 1/3])
+
    Simulated Annealing time: 0.08s
+

png

+

Attempt 3

+

The casino will now use one die biased towards 1 and 6, and one die towards 3 and 4.

+
simulate_casino([1/3, 1/12, 1/12, 1/12, 1/12, 1/3],
[1/12, 1/12, 1/3, 1/3, 1/12, 1/12])
+
    Simulated Annealing time: 0.09s
+

png

+

Attempt 4

+

We'll now finally go to a fair casino to make sure that we can still recognize a positive result.

+
simulate_casino(np.repeat(1/6, 6), np.repeat(1/6, 6))
+
    Simulated Annealing time: 0.02s
+

png

+

Attempt 5

+

We've so far been working with a large amount of data - 10,000 data points. Can we now scale things back to only 250 throws? We'll start with two dice biased high.

+
simulate_casino([1/9, 1/9, 1/9, 2/9, 2/9, 2/9],
[1/9, 1/9, 1/9, 2/9, 2/9, 2/9],
n=250)
+
    Iteration: 1000; Current score (higher is better): -551.6995384525453
Iteration: 2000; Current score (higher is better): -547.7803673440676
Iteration: 3000; Current score (higher is better): -547.9805613193807
Iteration: 4000; Current score (higher is better): -546.7574874775273
Iteration: 5000; Current score (higher is better): -549.5798007672656
Iteration: 6000; Current score (higher is better): -545.0354060154496
Iteration: 7000; Current score (higher is better): -550.1134504086606
Iteration: 8000; Current score (higher is better): -549.9306537114975
Iteration: 9000; Current score (higher is better): -550.7075182119111
Iteration: 10000; Current score (higher is better): -549.400679551826
Simulated Annealing time: 1.94s
+

png

+

The results are surprisingly good. While the actual optimization process took much longer to finish than in the other examples, we still have a very good guess. As a caveat though: the recovered distribution tends to overfit the data. That is, if the data doesn't fit the underlying distribution well, the model will also fail.

+

Conclusion

+

Given the results above, we can see that we have indeed come up with a very good algorithm to determine the distribution of two dice given their results. As a benefit, we have even seen that results come back very quickly; it's not uncommon for the optimization to converge within a tenth of a second.

+

Additionally, we have seen that the algorithm can intuit the distribution even when there is not much data. While the final example shows that we can 'overfit' on the dataset, we can still get valuable information from a relatively small dataset.

+

We can declare at long last: the mathematicians have again triumphed over the casino.

+
+

Justification of Simulated Annealing

+

Why Simulated Annealing?

+

So why even use an algorithm with a fancy title like Simulated Annealing? First of all, because the title is sexy. Second of all, because this is a reasonably complicated problem to try and solve. We have a parameter space where each value pij(0,1);i,j{1,,6}p_{ij} \in (0, 1); i, j \in \{1, \ldots, 6\}, for a total of 12 different variables we are trying to optimize over. Additionally, given a 12-dimensional function we are trying to optimize, simulated annealing makes sure that we don't fall into a local minimum.

+

Why not something else?

+

This is a fair question. There are two classes of algorithms that can also be used to solve this problem: Non-linear optimization methods, and the EM algorithm.

+
    +
  1. +

    I chose not to use non-linear optimization simply because I'm a bit concerned that it will trap me in a local maximum. Instead of running multiple different optimizations from different starting points, I can just use simulated annealing to take that into account. In addition, throughout the course of testing the simulated annealing code converged incredibly quickly - far more quickly than any non-linear solver would be able to accomplish.

    +
  2. +
  3. +

    The EM Algorithm was originally what I intended to write this blog post with. Indeed, the post was inspired by the crooked casino example which uses the EM algorithm to solve it. However, after modeling the likelihood function I realized that the algebra would very quickly get out of hand. Trying to compute all the polynomial terms would not be fun, which would be needed to actually optimize for each parameter. So while the EM algorithm would likely be much faster in raw speed terms, the amount of time needed to program and verify it meant that I was far better off using a different method for optimization.

    +
  4. +
\ No newline at end of file diff --git a/2016/06/event-studies-and-earnings-releases/index.html b/2016/06/event-studies-and-earnings-releases/index.html new file mode 100644 index 0000000..cbf34c0 --- /dev/null +++ b/2016/06/event-studies-and-earnings-releases/index.html @@ -0,0 +1,74 @@ +Event studies and earnings releases | The Old Speice Guy
Skip to main content

Event studies and earnings releases

· 17 min read
Bradlee Speice

Or, being suspicious of market insiders.

+

The Market Just Knew

+

I recently saw two examples of stock charts that have kept me thinking for a while. And now that the semester is complete, I finally have enough time to really look at them and give them the treatment they deserve. The first is good old Apple:

+
Code
from secrets import QUANDL_KEY
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
from matplotlib.finance import candlestick_ohlc
from matplotlib.dates import DateFormatter, WeekdayLocator,\
DayLocator, MONDAY
import quandl
from datetime import datetime
import pandas as pd
%matplotlib inline

def fetch_ticker(ticker, start, end):
# Quandl is currently giving me issues with returning
# the entire dataset and not slicing server-side.
# So instead, we'll do it client-side!
q_format = '%Y-%m-%d'
ticker_data = quandl.get('YAHOO/' + ticker,
start_date=start.strftime(q_format),
end_date=end.strftime(q_format),
authtoken=QUANDL_KEY)
return ticker_data

def ohlc_dataframe(data, ax=None):
# Much of this code re-used from:
# http://matplotlib.org/examples/pylab_examples/finance_demo.html
if ax is None:
f, ax = plt.subplots()

vals = [(date2num(date), *(data.loc[date]))
for date in data.index]
candlestick_ohlc(ax, vals)

mondays = WeekdayLocator(MONDAY)
alldays = DayLocator()
weekFormatter = DateFormatter('%b %d')
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(weekFormatter)
return ax
+
AAPL = fetch_ticker('AAPL', datetime(2016, 3, 1), datetime(2016, 5, 1))
ax = ohlc_dataframe(AAPL)
plt.vlines(date2num(datetime(2016, 4, 26, 12)),
ax.get_ylim()[0], ax.get_ylim()[1],
color='b',
label='Earnings Release')
plt.legend(loc=3)
plt.title("Apple Price 3/1/2016 - 5/1/2016");
+

png

+

The second chart is from Facebook:

+
FB = fetch_ticker('FB', datetime(2016, 3, 1), datetime(2016, 5, 5))
ax = ohlc_dataframe(FB)
plt.vlines(date2num(datetime(2016, 4, 27, 12)),
ax.get_ylim()[0], ax.get_ylim()[1],
color='b', label='Earnings Release')
plt.title('Facebook Price 3/5/2016 - 5/5/2016')
plt.legend(loc=2);
+

png

+

These two charts demonstrate two very specific phonomena: how the market prepares for earnings releases. Let's look at those charts again, but with some extra information. As we're about the see, the market "knew" in advance that Apple was going to perform poorly. The market expected that Facebook was going to perform poorly, and instead shot the lights out. Let's see that trend in action:

+
Code
def plot_hilo(ax, start, end, data):
ax.plot([date2num(start), date2num(end)],
[data.loc[start]['High'], data.loc[end]['High']],
color='b')
ax.plot([date2num(start), date2num(end)],
[data.loc[start]['Low'], data.loc[end]['Low']],
color='b')

f, axarr = plt.subplots(1, 2)

ax_aapl = axarr[0]
ax_fb = axarr[1]

# Plot the AAPL trend up and down
ohlc_dataframe(AAPL, ax=ax_aapl)
plot_hilo(ax_aapl, datetime(2016, 3, 1), datetime(2016, 4, 15), AAPL)
plot_hilo(ax_aapl, datetime(2016, 4, 18), datetime(2016, 4, 26), AAPL)
ax_aapl.vlines(date2num(datetime(2016, 4, 26, 12)),
ax_aapl.get_ylim()[0], ax_aapl.get_ylim()[1],
color='g', label='Earnings Release')
ax_aapl.legend(loc=2)
ax_aapl.set_title('AAPL Price History')

# Plot the FB trend down and up
ohlc_dataframe(FB, ax=ax_fb)
plot_hilo(ax_fb, datetime(2016, 3, 30), datetime(2016, 4, 27), FB)
plot_hilo(ax_fb, datetime(2016, 4, 28), datetime(2016, 5, 5), FB)
ax_fb.vlines(date2num(datetime(2016, 4, 27, 12)),
ax_fb.get_ylim()[0], ax_fb.get_ylim()[1],
color='g', label='Earnings Release')
ax_fb.legend(loc=2)
ax_fb.set_title('FB Price History')

f.set_size_inches(18, 6)
+

png

+

As we can see above, the market broke a prevailing trend on Apple in order to go down, and ultimately predict the earnings release. For Facebook, the opposite happened. While the trend was down, the earnings were fantastic and the market corrected itself much higher.

+

Formulating the Question

+

While these are two specific examples, there are plenty of other examples you could cite one way or another. Even if the preponderance of evidence shows that the market correctly predicts earnings releases, we need not accuse people of collusion; for a company like Apple with many suppliers we can generally forecast how Apple has done based on those same suppliers.

+

The question then, is this: how well does the market predict the earnings releases? It's an incredibly broad question that I want to disect in a couple of different ways:

+
    +
  1. Given a stock that has been trending down over the past N days before an earnings release, how likely does it continue downward after the release?
  2. +
  3. Given a stock trending up, how likely does it continue up?
  4. +
  5. Is there a difference in accuracy between large- and small-cap stocks?
  6. +
  7. How often, and for how long, do markets trend before an earnings release?
  8. +
+

I want to especially thank Alejandro Saltiel for helping me retrieve the data. He's great. And now for all of the interesting bits.

+

Event Studies

+

Before we go too much further, I want to introduce the actual event study. Each chart intends to capture a lot of information and present an easy-to-understand pattern:

+
Code
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from datetime import datetime, timedelta

# If you remove rules, it removes them from *all* calendars
# To ensure we don't pop rules we don't want to, first make
# sure to fully copy the object
trade_calendar = USFederalHolidayCalendar()
trade_calendar.rules.pop(6) # Remove Columbus day
trade_calendar.rules.pop(7) # Remove Veteran's day
TradeDay = lambda days: CustomBusinessDay(days, calendar=trade_calendar)

def plot_study(array):
# Given a 2-d array, we assume the event happens at index `lookback`,
# and create all of our summary statistics from there.
lookback = int((array.shape[1] - 1) / 2)
norm_factor = np.repeat(array[:,lookback].reshape(-1, 1), array.shape[1], axis=1)
centered_data = array / norm_factor - 1
lookforward = centered_data.shape[1] - lookback
means = centered_data.mean(axis=0)
lookforward_data = centered_data[:,lookforward:]
std_dev = np.hstack([0, lookforward_data.std(axis=0)])
maxes = lookforward_data.max(axis=0)
mins = lookforward_data.min(axis=0)

f, axarr = plt.subplots(1, 2)
range_begin = -lookback
range_end = lookforward
axarr[0].plot(range(range_begin, range_end), means)
axarr[1].plot(range(range_begin, range_end), means)
axarr[0].fill_between(range(0, range_end),
means[-lookforward:] + std_dev,
means[-lookforward:] - std_dev,
alpha=.5, label="$\pm$ 1 s.d.")
axarr[1].fill_between(range(0, range_end),
means[-lookforward:] + std_dev,
means[-lookforward:] - std_dev,
alpha=.5, label="$\pm$ 1 s.d.")

max_err = maxes - means[-lookforward+1:]
min_err = means[-lookforward+1:] - mins
axarr[0].errorbar(range(1, range_end),
means[-lookforward+1:],
yerr=[min_err, max_err], label='Max & Min')
axarr[0].legend(loc=2)
axarr[1].legend(loc=2)

axarr[0].set_xlim((-lookback-1, lookback+1))
axarr[1].set_xlim((-lookback-1, lookback+1))

def plot_study_small(array):
# Given a 2-d array, we assume the event happens at index `lookback`,
# and create all of our summary statistics from there.
lookback = int((array.shape[1] - 1) / 2)
norm_factor = np.repeat(array[:,lookback].reshape(-1, 1), array.shape[1], axis=1)
centered_data = array / norm_factor - 1
lookforward = centered_data.shape[1] - lookback
means = centered_data.mean(axis=0)
lookforward_data = centered_data[:,lookforward:]
std_dev = np.hstack([0, lookforward_data.std(axis=0)])
maxes = lookforward_data.max(axis=0)
mins = lookforward_data.min(axis=0)

range_begin = -lookback
range_end = lookforward
plt.plot(range(range_begin, range_end), means)
plt.fill_between(range(0, range_end),
means[-lookforward:] + std_dev,
means[-lookforward:] - std_dev,
alpha=.5, label="$\pm$ 1 s.d.")

max_err = maxes - means[-lookforward+1:]
min_err = means[-lookforward+1:] - mins
plt.errorbar(range(1, range_end),
means[-lookforward+1:],
yerr=[min_err, max_err], label='Max & Min')
plt.legend(loc=2)
plt.xlim((-lookback-1, lookback+1))

def fetch_event_data(ticker, events, horizon=5):
# Use horizon+1 to account for including the day of the event,
# and half-open interval - that is, for a horizon of 5,
# we should be including 11 events. Additionally, using the
# CustomBusinessDay means we automatically handle issues if
# for example a company reports Friday afternoon - the date
# calculator will turn this into a "Saturday" release, but
# we effectively shift that to Monday with the logic below.
td_back = TradeDay(horizon+1)
td_forward = TradeDay(horizon+1)

start_date = min(events) - td_back
end_date = max(events) + td_forward
total_data = fetch_ticker(ticker, start_date, end_date)
event_data = [total_data.ix[event-td_back:event+td_forward]\
[0:horizon*2+1]\
['Adjusted Close']
for event in events]
return np.array(event_data)
+
# Generate a couple of random events

event_dates = [datetime(2016, 5, 27) - timedelta(days=1) - TradeDay(x*20) for x in range(1, 40)]
data = fetch_event_data('CELG', event_dates)
plot_study_small(data)
plt.legend(loc=3)
plt.gcf().set_size_inches(12, 6);


plt.annotate('Mean price for days leading up to each event',
(-5, -.01), (-4.5, .025),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.annotate('', (-.1, .005), (-.5, .02),
arrowprops={'facecolor': 'black', 'shrink': .05})
plt.annotate('$\pm$ 1 std. dev. each day', (5, .055), (2.5, .085),
arrowprops={'facecolor': 'black', 'shrink': .05})
plt.annotate('Min/Max each day', (.9, -.07), (-1, -.1),
arrowprops={'facecolor': 'black', 'shrink': .05});
+

png

+

And as a quick textual explanation as well:

+
    +
  • The blue line represents the mean price for each day, represented as a percentage of the price on the '0-day'. For example, if we defined an 'event' as whenever the stock price dropped for three days, we would see a decreasing blue line to the left of the 0-day.
  • +
  • The blue shaded area represents one standard deviation above and below the mean price for each day following an event. This is intended to give us an idea of what the stock price does in general following an event.
  • +
  • The green bars are the minimum and maximum price for each day following an event. This instructs us as to how much it's possible for the stock to move.
  • +
+ +

The first type of event I want to study is how stocks perform when they've been trending down over the past couple of days prior to a release. However, we need to clarify what exactly is meant by "trending down." To do so, we'll use the following metric: the midpoint between each day's opening and closing price goes down over a period of N days.

+

It's probably helpful to have an example:

+
Code
f, axarr = plt.subplots(1, 2)
f.set_size_inches(18, 6)

FB_plot = axarr[0]
ohlc_dataframe(FB[datetime(2016, 4, 18):], FB_plot)

FB_truncated = FB[datetime(2016, 4, 18):datetime(2016, 4, 27)]
midpoint = FB_truncated['Open']/2 + FB_truncated['Close']/2
FB_plot.plot(FB_truncated.index, midpoint, label='Midpoint')
FB_plot.vlines(date2num(datetime(2016, 4, 27, 12)),
ax_fb.get_ylim()[0], ax_fb.get_ylim()[1],
color='g', label='Earnings Release')
FB_plot.legend(loc=2)
FB_plot.set_title('FB Midpoint Plot')

AAPL_plot = axarr[1]
ohlc_dataframe(AAPL[datetime(2016, 4, 10):], AAPL_plot)
AAPL_truncated = AAPL[datetime(2016, 4, 10):datetime(2016, 4, 26)]
midpoint = AAPL_truncated['Open']/2 + AAPL_truncated['Close']/2
AAPL_plot.plot(AAPL_truncated.index, midpoint, label='Midpoint')
AAPL_plot.vlines(date2num(datetime(2016, 4, 26, 12)),
ax_aapl.get_ylim()[0], ax_aapl.get_ylim()[1],
color='g', label='Earnings Release')
AAPL_plot.legend(loc=3)
AAPL_plot.set_title('AAPL Midpoint Plot');
+

png

+

Given these charts, we can see that FB was trending down for the four days preceding the earnings release, and AAPL was trending down for a whopping 8 days (we don't count the peak day). This will define the methodology that we will use for the study.

+

So what are the results? For a given horizon, how well does the market actually perform?

+
Code
# Read in the events for each stock;
# The file was created using the first code block in the Appendix
import yaml
from dateutil.parser import parse
from progressbar import ProgressBar

data_str = open('earnings_dates.yaml', 'r').read()
# Need to remove invalid lines
filtered = filter(lambda x: '{' not in x, data_str.split('\n'))
earnings_data = yaml.load('\n'.join(filtered))

# Convert our earnings data into a list of (ticker, date) pairs
# to make it easy to work with.
# This is horribly inefficient, but should get us what we need
ticker_dates = []
for ticker, date_list in earnings_data.items():
for iso_str in date_list:
ticker_dates.append((ticker, parse(iso_str)))

def does_trend_down(ticker, event, horizon):
# Figure out if the `event` has a downtrend for
# the `horizon` days preceding it
# As an interpretation note: it is assumed that
# the closing price of day `event` is the reference
# point, and we want `horizon` days before that.
# The price_data.hdf was created in the second appendix code block
try:
ticker_data = pd.read_hdf('price_data.hdf', ticker)
data = ticker_data[event-TradeDay(horizon):event]
midpoints = data['Open']/2 + data['Close']/2

# Shift dates one forward into the future and subtract
# Effectively: do we trend down over all days?
elems = midpoints - midpoints.shift(1)
return len(elems)-1 == len(elems.dropna()[elems <= 0])
except KeyError:
# If the stock doesn't exist, it doesn't qualify as trending down
# Mostly this is here to make sure the entire analysis doesn't
# blow up if there were issues in data retrieval
return False

def study_trend(horizon, trend_function):
five_day_events = np.zeros((1, horizon*2 + 1))
invalid_events = []
for ticker, event in ProgressBar()(ticker_dates):
if trend_function(ticker, event, horizon):
ticker_data = pd.read_hdf('price_data.hdf', ticker)
event_data = ticker_data[event-TradeDay(horizon):event+TradeDay(horizon)]['Close']

try:
five_day_events = np.vstack([five_day_events, event_data])
except ValueError:
# Sometimes we don't get exactly the right number of values due to calendar
# issues. I've fixed most everything I can, and the few issues that are left
# I assume don't systemically bias the results (i.e. data could be missing
# because it doesn't exist, etc.). After running through, ~1% of events get
# discarded this way
invalid_events.append((ticker, event))


# Remove our initial zero row
five_day_events = five_day_events[1:,:]
plot_study(five_day_events)
plt.gcf().suptitle('Action over {} days: {} events'
.format(horizon,five_day_events.shape[0]))
plt.gcf().set_size_inches(18, 6)

# Start with a 5 day study
study_trend(5, does_trend_down)
    100% (47578 of 47578) |###########################################################| Elapsed Time: 0:21:38 Time: 0:21:38
+

png

+

When a stock has been trending down for 5 days, once the earnings are announced it really doesn't move on average. However, the variability is incredible. This implies two important things:

+
    +
  1. The market is just as often wrong about an earnings announcement before it happens as it is correct
  2. +
  3. The incredible width of the min/max bars and standard deviation area tell us that the market reacts violently after the earnings are released.
  4. +
+

Let's repeat the same study, but over a time horizon of 8 days and 3 days. Presumably if a stock has been going down for 8 days at a time before the earnings, the market should be more accurate.

+
Code
# 8 day study next
study_trend(8, does_trend_down)
    100% (47578 of 47578) |###########################################################| Elapsed Time: 0:20:29 Time: 0:20:29
+

png

+

However, looking only at stocks that trended down for 8 days prior to a release, the same pattern emerges: on average, the stock doesn't move, but the market reaction is often incredibly violent.

+
Code
# 3 day study after that
study_trend(3, does_trend_down)
    100% (47578 of 47578) |###########################################################| Elapsed Time: 0:26:26 Time: 0:26:26
+

png

+

Finally, when we look at a 3-day horizon, we start getting some incredible outliers. Stocks have a potential to move over ~300% up, and the standard deviation width is again, incredible. The results for a 3-day horizon follow the same pattern we've seen in the 5- and 8-day horizons.

+ +

We're now going to repeat the analysis, but do it for uptrends instead. That is, instead of looking at stocks that have been trending down over the past number of days, we focus only on stocks that have been trending up.

+
Code
def does_trend_up(ticker, event, horizon):
# Figure out if the `event` has an uptrend for
# the `horizon` days preceding it
# As an interpretation note: it is assumed that
# the closing price of day `event` is the reference
# point, and we want `horizon` days before that.
# The price_data.hdf was created in the second appendix code block
try:
ticker_data = pd.read_hdf('price_data.hdf', ticker)
data = ticker_data[event-TradeDay(horizon):event]
midpoints = data['Open']/2 + data['Close']/2

# Shift dates one forward into the future and subtract
# Effectively: do we trend down over all days?
elems = midpoints - midpoints.shift(1)
return len(elems)-1 == len(elems.dropna()[elems >= 0])
except KeyError:
# If the stock doesn't exist, it doesn't qualify as trending down
# Mostly this is here to make sure the entire analysis doesn't
# blow up if there were issues in data retrieval
return False

study_trend(5, does_trend_up)
    100% (47578 of 47578) |###########################################################| Elapsed Time: 0:22:51 Time: 0:22:51
+

png

+

The patterns here are very similar. With the exception of noting that stocks can go to nearly 400% after an earnings announcement (most likely this included a takeover announcement, etc.), we still see large min/max bars and wide standard deviation of returns.

+

We'll repeat the pattern for stocks going up for both 8 and 3 days straight, but at this point, the results should be very predictable:

+
Code
study_trend(8, does_trend_up)
    100% (47578 of 47578) |###########################################################| Elapsed Time: 0:20:51 Time: 0:20:51
+

png

+
Code
study_trend(3, does_trend_up)
    100% (47578 of 47578) |###########################################################| Elapsed Time: 0:26:56 Time: 0:26:56
+

png

+

Conclusion and Summary

+

I guess the most important thing to summarize with is this: looking at the entire market, stock performance prior to an earnings release has no bearing on the stock's performance. Honestly: given the huge variability of returns after an earnings release, even when the stock has been trending for a long time, you're best off divesting before an earnings release and letting the market sort itself out.

+

However, there is a big caveat. These results are taken when we look at the entire market. So while we can say that the market as a whole knows nothing and just reacts violently, I want to take a closer look into this data. Does the market typically perform poorly on large-cap/high liquidity stocks? Do smaller companies have investors that know them better and can thus predict performance better? Are specific market sectors better at prediction? Presumably technology stocks are more volatile than the industrials.

+

So there are some more interesting questions I still want to ask with this data. Knowing that the hard work of data processing is largely already done, it should be fairly simple to continue this analysis and get much more refined with it. Until next time.

+

Appendix

+

Export event data for Russell 3000 companies:

+
Code
import pandas as pd
from html.parser import HTMLParser
from datetime import datetime, timedelta
import requests
import re
from dateutil import parser
import progressbar
from concurrent import futures
import yaml

class EarningsParser(HTMLParser):
store_dates = False
earnings_offset = None
dates = []

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dates = []

def handle_starttag(self, tag, attrs):
if tag == 'table':
self.store_dates = True

def handle_data(self, data):
if self.store_dates:
match = re.match(r'\d+/\d+/\d+', data)
if match:
self.dates.append(match.group(0))

# If a company reports before the bell, record the earnings date
# being at midnight the day before. Ex: WMT reports 5/19/2016,
# but we want the reference point to be the closing price on 5/18/2016
if 'After Close' in data:
self.earnings_offset = timedelta(days=0)
elif 'Before Open' in data:
self.earnings_offset = timedelta(days=-1)

def handle_endtag(self, tag):
if tag == 'table':
self.store_dates = False

def earnings_releases(ticker):
#print("Looking up ticker {}".format(ticker))
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) '\
'Gecko/20100101 Firefox/46.0'
headers = {'user-agent': user_agent}
base_url = 'http://www.streetinsider.com/ec_earnings.php?q={}'\
.format(ticker)
e = EarningsParser()
s = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=0)
s.mount('http://', a)
e.feed(str(s.get(base_url, headers=headers).content))

if e.earnings_offset is not None:
dates = map(lambda x: parser.parse(x) + e.earnings_offset, e.dates)
past = filter(lambda x: x < datetime.now(), dates)
return list(map(lambda d: d.isoformat(), past))

# Use a Russell-3000 ETF tracker (ticker IWV) to get a list of holdings
r3000 = pd.read_csv('https://www.ishares.com/us/products/239714/'
'ishares-russell-3000-etf/1449138789749.ajax?'
'fileType=csv&fileName=IWV_holdings&dataType=fund',
header=10)
r3000_equities = r3000[(r3000['Exchange'] == 'NASDAQ') |
(r3000['Exchange'] == 'New York Stock Exchange Inc.')]

dates_file = open('earnings_dates.yaml', 'w')

with futures.ThreadPoolExecutor(max_workers=8) as pool:
fs = {pool.submit(earnings_releases, r3000_equities.ix[t]['Ticker']): t
for t in r3000_equities.index}
pbar = progressbar.ProgressBar(term_width=80,
max_value=r3000_equities.index.max())

for future in futures.as_completed(fs):
i = fs[future]
pbar.update(i)
dates_file.write(yaml.dump({r3000_equities.ix[i]['Ticker']:
future.result()}))
+

Downloading stock price data needed for the event studies:

+
Code
from secrets import QUANDL_KEY
import pandas as pd
import yaml
from dateutil.parser import parse
from datetime import timedelta
import quandl
from progressbar import ProgressBar

def fetch_ticker(ticker, start, end):
# Quandl is currently giving me issues with returning
# the entire dataset and not slicing server-side.
# So instead, we'll do it client-side!
q_format = '%Y-%m-%d'
ticker_data = quandl.get('YAHOO/' + ticker,
start_date=start.strftime(q_format),
end_date=end.strftime(q_format),
authtoken=QUANDL_KEY)
return ticker_data

data_str = open('earnings_dates.yaml', 'r').read()
# Need to remove invalid lines
filtered = filter(lambda x: '{' not in x, data_str.split('\n'))
earnings_data = yaml.load('\n'.join(filtered))

# Get the first 1500 keys - split up into two statements
# because of Quandl rate limits
tickers = list(earnings_data.keys())

price_dict = {}
invalid_tickers = []
for ticker in ProgressBar()(tickers[0:1500]):
try:
# Replace '.' with '-' in name for some tickers
fixed = ticker.replace('.', '-')
event_strs = earnings_data[ticker]
events = [parse(event) for event in event_strs]
td = timedelta(days=20)
price_dict[ticker] = fetch_ticker(fixed,
min(events)-td, max(events)+td)
except quandl.NotFoundError:
invalid_tickers.append(ticker)

# Execute this after 10 minutes have passed
for ticker in ProgressBar()(tickers[1500:]):
try:
# Replace '.' with '-' in name for some tickers
fixed = ticker.replace('.', '-')
event_strs = earnings_data[ticker]
events = [parse(event) for event in event_strs]
td = timedelta(days=20)
price_dict[ticker] = fetch_ticker(fixed,
min(events)-td, max(events)+td)
except quandl.NotFoundError:
invalid_tickers.append(ticker)

prices_store = pd.HDFStore('price_data.hdf')
for ticker, prices in price_dict.items():
prices_store[ticker] = prices
\ No newline at end of file diff --git a/2016/10/rustic-repodcasting/index.html b/2016/10/rustic-repodcasting/index.html new file mode 100644 index 0000000..569bb30 --- /dev/null +++ b/2016/10/rustic-repodcasting/index.html @@ -0,0 +1,187 @@ +A Rustic re-podcasting server | The Old Speice Guy
Skip to main content

A Rustic re-podcasting server

· 11 min read
Bradlee Speice

Learning Rust by fire (it sounds better than learning by corrosion)

+

I listen to a lot of Drum and Bass music, because it's beautiful music. And +there's a particular site, Bassdrive.com that hosts +a lot of great content. Specifically, the +archives section of the site has a +list of the past shows that you can download and listen to. The issue is, it's +just a giant list of links to download. I'd really like +this in a podcast format to take with me on the road, etc.

+

So I wrote the elektricity web +application to actually accomplish all that. Whenever you request a feed, it +goes out to Bassdrive, processes all the links on a page, and serves up some +fresh, tasty RSS to satisfy your ears. I hosted it on Heroku using the free +tier because it's really not resource-intensive at all.

+

The issue so far is that I keep running out of free tier hours during a +month because my podcasting application likes to have a server scan for new +episodes constantly. Not sure why it's doing that, but I don't have a whole +lot of control over it. It's a phenomenal application otherwise.

+

My (over-engineered) solution: Re-write the application using the +Rust programming language. I'd like to run +this on a small hacker board I own, and doing this in Rust would allow me to +easily cross-compile it. Plus, I've been very interested in the Rust language +for a while and this would be a great opportunity to really learn it well. +The code is available here as development +progresses.

+

The Setup

+

We'll be using the iron library to handle the +server, and hyper to fetch the data we need from elsewhere +on the interwebs. HTML5Ever allows +us to ingest the content that will be coming from Bassdrive, and finally, +output is done with handlebars-rust.

+

It will ultimately be interesting to see how much more work must be done to +actually get this working over another language like Python. Coming from a +dynamic state of mind it's super easy to just chain stuff together, ship it out, +and call it a day. I think I'm going to end up getting much dirtier trying to +write all of this out.

+

Issue 1: Strings

+

Strings in Rust are hard. I acknowledge Python can get away with some things +that make strings super easy (and Python 3 has gotten better at cracking down +on some bad cases, str <-> bytes specifically), but Rust is hard.

+

Let's take for example the 404 error handler I'm trying to write. The result +should be incredibly simple: All I want is to echo back +Didn't find URL: <url>. Shouldn't be that hard right? In Python I'd just do +something like:

+
def echo_handler(request):
return "You're visiting: {}".format(request.uri)
+

And we'd call it a day. Rust isn't so simple. Let's start with the trivial +examples people post online:

+
fn hello_world(req: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, "You found the server!")))
}
+

Doesn't look too bad right? In fact, it's essentially the same as the Python +version! All we need to do is just send back a string of some form. So, we +look up the documentation for Request and see a url field that will contain +what we want. Let's try the first iteration:

+
fn hello_world(req: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, "You found the URL: " + req.url)))
}
+

Which yields the error:

+
    error[E0369]: binary operation `+` cannot be applied to type `&'static str`
+

OK, what's going on here? Time to start Googling for "concatenate strings in Rust". That's what we +want to do right? Concatenate a static string and the URL.

+

After Googling, we come across a helpful concat! macro that looks really nice! Let's try that one:

+
fn hello_world(req: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, concat!("You found the URL: ", req.url))))
}
+

And the error:

+
    error: expected a literal
+

Turns out Rust actually blows up because the concat! macro expects us to know +at compile time what req.url is. Which, in my outsider opinion, is a bit +strange. println! and format!, etc., all handle values they don't know at +compile time. Why can't concat!? By any means, we need a new plan of attack. +How about we try formatting strings?

+
fn hello_world(req: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, format!("You found the URL: {}", req.url))))
}
+

And at long last, it works. Onwards!

+

Issue 2: Fighting with the borrow checker

+

Rust's single coolest feature is how the compiler can guarantee safety in your +program. As long as you don't use unsafe pointers in Rust, you're guaranteed +safety. And not having truly manual memory management is really cool; I'm +totally OK with never having to write malloc() again.

+

That said, even the Rust documentation makes a specific note:

+
+

Many new users to Rust experience something we like to call +‘fighting with the borrow checker’, where the Rust compiler refuses to +compile a program that the author thinks is valid.

+
+

If you have to put it in the documentation, it's not a helpful note: +it's hazing.

+

So now that we have a handler which works with information from the request, we +want to start making something that looks like an actual web application. +The router provided by iron isn't terribly difficult so I won't cover it. +Instead, the thing that had me stumped for a couple hours was trying to +dynamically create routes.

+

The unfortunate thing with Rust (in my limited experience at the moment) is that +there is a severe lack of non-trivial examples. Using the router is easy when +you want to give an example of a static function. But how do you you start +working on things that are a bit more complex?

+

We're going to cover that here. Our first try: creating a function which returns +other functions. This is a principle called currying. We set up a function that allows us to keep some data in scope +for another function to come later.

+
fn build_handler(message: String) -> Fn(&mut Request) -> IronResult<Response> {
move |_: &mut Request| {
Ok(Response::with((status::Ok, message)))
}
}
+

We've simply set up a function that returns another anonymous function with the +message parameter scoped in. If you compile this, you get not 1, not 2, but 5 +new errors. 4 of them are the same though:

+
    error[E0277]: the trait bound `for<'r, 'r, 'r> std::ops::Fn(&'r mut iron::Request<'r, 'r>) -> std::result::Result<iron::Response, iron::IronError> + 'static: std::marker::Sized` is not satisfied
+

...oookay. I for one, am not going to spend time trying to figure out what's +going on there.

+

And it is here that I will save the audience many hours of frustrated effort. +At this point, I decided to switch from iron to pure hyper since using +hyper would give me a much simpler API. All I would have to do is build a +function that took two parameters as input, and we're done. That said, it +ultimately posed many more issues because I started getting into a weird fight +with the 'static lifetime +and being a Rust newbie I just gave up on trying to understand it.

+

Instead, we will abandon (mostly) the curried function attempt, and instead +take advantage of something Rust actually intends us to use: struct and +trait.

+

Remember when I talked about a lack of non-trivial examples on the Internet? +This is what I was talking about. I could only find one example of this +available online, and it was incredibly complex and contained code we honestly +don't need or care about. There was no documentation of how to build routes that +didn't use static functions, etc. But, I'm assuming you don't really care about +my whining, so let's get to it.

+

The iron documentation mentions the Handler trait as being something we can implement. +Does the function signature for that handle() method look familiar? It's what +we've been working with so far.

+

The principle is that we need to define a new struct to hold our data, then +implement that handle() method to return the result. Something that looks +like this might do:

+
struct EchoHandler {
message: String
}

impl Handler for EchoHandler {
fn handle(&self, _: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, self.message)))
}
}

// Later in the code when we set up the router...
let echo = EchoHandler {
message: "Is it working yet?"
}
router.get("/", echo.handle, "index");
+

We attempt to build a struct, and give its handle method off to the router +so the router knows what to do.

+

You guessed it, more errors:

+
    error: attempted to take value of method `handle` on type `EchoHandler`
+

Now, the Rust compiler is actually a really nice fellow, and offers us help:

+
    help: maybe a `()` to call it is missing? If not, try an anonymous function
+

We definitely don't want to call that function, so maybe try an anonymous +function as it recommends?

+
router.get("/", |req: &mut Request| echo.handle(req), "index");
+

Another error:

+
    error[E0373]: closure may outlive the current function, but it borrows `echo`, which is owned by the current function
+

Another helpful message:

+
    help: to force the closure to take ownership of `echo` (and any other referenced variables), use the `move` keyword
+

We're getting closer though! Let's implement this change:

+
router.get("/", move |req: &mut Request| echo.handle(req), "index");
+

And here's where things get strange:

+
    error[E0507]: cannot move out of borrowed content
--> src/main.rs:18:40
|
18 | Ok(Response::with((status::Ok, self.message)))
| ^^^^ cannot move out of borrowed content
+

Now, this took me another couple hours to figure out. I'm going to explain it, +but keep this in mind: Rust only allows one reference at a time (exceptions +apply of course).

+

When we attempt to use self.message as it has been created in the earlier +struct, we essentially are trying to give it away to another piece of code. +Rust's semantics then state that we may no longer access it unless it is +returned to us (which iron's code does not do). There are two ways to fix +this:

+
    +
  1. Only give away references (i.e. &self.message instead of self.message) +instead of transferring ownership
  2. +
  3. Make a copy of the underlying value which will be safe to give away
  4. +
+

I didn't know these were the two options originally, so I hope this helps the +audience out. Because iron won't accept a reference, we are forced into the +second option: making a copy. To do so, we just need to change the function +to look like this:

+
Ok(Response::with((status::Ok, self.message.clone())))
+

Not so bad, huh? My only complaint is that it took so long to figure out exactly +what was going on.

+

And now we have a small server that we can configure dynamically. At long last.

+
+

Final sidenote: You can actually do this without anonymous functions. Just +change the router line to: +router.get("/", echo, "index");

+

Rust's type system seems to figure out that we want to use the handle() method.

+
+

Conclusion

+

After a good long days' work, we now have the routing functionality set up on +our application. We should be able to scale this pretty well in the future: +the RSS content we need to deliver in the future can be treated as a string, so +the building blocks are in place.

+

There are two important things I learned starting with Rust today:

+
    +
  1. Rust is a new language, and while the code is high-quality, the mindshare is coming.
  2. +
  3. I'm a terrible programmer.
  4. +
+

Number 1 is pretty obvious and not surprising to anyone. Number two caught me +off guard. I've gotten used to having either a garbage collector (Java, Python, +etc.) or playing a little fast and loose with scoping rules (C, C++). You don't +have to worry about object lifetime there. With Rust, it's forcing me to fully +understand and use well the memory in my applications. In the final mistake I +fixed (using .clone()) I would have been fine in C++ to just give away that +reference and never use it again. I wouldn't have run into a "use-after-free" +error, but I would have potentially been leaking memory. Rust forced me to be +incredibly precise about how I use it.

+

All said I'm excited for using Rust more. I think it's super cool, it's just +going to take me a lot longer to do this than I originally thought.

\ No newline at end of file diff --git a/2016/11/pca-audio-compression/index.html b/2016/11/pca-audio-compression/index.html new file mode 100644 index 0000000..31f3c9c --- /dev/null +++ b/2016/11/pca-audio-compression/index.html @@ -0,0 +1,66 @@ +PCA audio compression | The Old Speice Guy
Skip to main content

PCA audio compression

· 11 min read
Bradlee Speice

In which I apply Machine Learning techniques to Digital Signal Processing to astounding failure.

+

Towards a new (and pretty poor) compression scheme

+

I'm going to be working with some audio data for a while as I get prepared for a term project this semester. I'll be working (with a partner) to design a system for separating voices from music. Given my total lack of experience with Digital Signal Processing I figured that now was as good a time as ever to work on a couple of fun projects that would get me back up to speed.

+

The first project I want to work on: Designing a new compression scheme for audio data.

+

A Brief Introduction to Audio Compression

+

Audio files when uncompressed (files ending with .wav) are huge. Like, 10.5 Megabytes per minute huge. Storage is cheap these days, but that's still an incredible amount of data that we don't really need. Instead, we'd like to compress that data so that it's not taking up so much space. There are broadly two ways to accomplish this:

+
    +
  1. +

    Lossless compression - Formats like FLAC, ALAC, and Monkey's Audio (.ape) all go down this route. The idea is that when you compress and uncompress a file, you get exactly the same as what you started with.

    +
  2. +
  3. +

    Lossy compression - Formats like MP3, Ogg, and AAC (.m4a) are far more popular, but make a crucial tradeoff: We can reduce the file size even more during compression, but the decompressed file won't be the same.

    +
  4. +
+

There is a fundamental tradeoff at stake: Using lossy compression sacrifices some of the integrity of the resulting file to save on storage space. Most people (I personally believe it's everybody) can't hear the difference, so this is an acceptable tradeoff. You have files that take up a 10th of the space, and nobody can tell there's a difference in audio quality.

+

A PCA-based Compression Scheme

+

What I want to try out is a PCA approach to encoding audio. The PCA technique comes from Machine Learning, where it is used for a process called Dimensionality Reduction. Put simply, the idea is the same as lossy compression: if we can find a way that represents the data well enough, we can save on space. There are a lot of theoretical concerns that lead me to believe this compression style will not end well, but I'm interested to try it nonetheless.

+

PCA works as follows: Given a dataset with a number of features, I find a way to approximate those original features using some "new features" that are statistically as close as possible to the original ones. This is comparable to a scheme like MP3: Given an original signal, I want to find a way of representing it that gets approximately close to what the original was. The difference is that PCA is designed for statistical data, and not signal data. But we won't let that stop us.

+

The idea is as follows: Given a signal, reshape it into 1024 columns by however many rows are needed (zero-padded if necessary). Run the PCA algorithm, and do dimensionality reduction with a couple different settings. The number of components I choose determines the quality: If I use 1024 components, I will essentially be using the original signal. If I use a smaller number of components, I start losing some of the data that was in the original file. This will give me an idea of whether it's possible to actually build an encoding scheme off of this, or whether I'm wasting my time.

+

Running the Algorithm

+

The audio I will be using comes from the song Tabulasa, by Broke for Free. I'll be loading in the audio signal to Python and using Scikit-Learn to actually run the PCA algorithm.

+

We first need to convert the FLAC file I have to a WAV:

+
!ffmpeg -hide_banner -loglevel panic -i "Broke For Free/XXVII/01 Tabulasa.flac" "Tabulasa.wav" -c wav
+

Then, let's go ahead and load a small sample so you can hear what is going on.

+
from IPython.display import Audio
from scipy.io import wavfile

samplerate, tabulasa = wavfile.read('Tabulasa.wav')

start = samplerate * 14 # 10 seconds in
end = start + samplerate * 10 # 5 second duration
Audio(data=tabulasa[start:end, 0], rate=samplerate)
+ + +

Next, we'll define the code we will be using to do PCA. It's very short, as the PCA algorithm is very simple.

+
from sklearn.decomposition import PCA
import numpy as np

def pca_reduce(signal, n_components, block_size=1024):

# First, zero-pad the signal so that it is divisible by the block_size
samples = len(signal)
hanging = block_size - np.mod(samples, block_size)
padded = np.lib.pad(signal, (0, hanging), 'constant', constant_values=0)

# Reshape the signal to have 1024 dimensions
reshaped = padded.reshape((len(padded) // block_size, block_size))

# Second, do the actual PCA process
pca = PCA(n_components=n_components)
pca.fit(reshaped)

transformed = pca.transform(reshaped)
reconstructed = pca.inverse_transform(transformed).reshape((len(padded)))
return pca, transformed, reconstructed
+

Now that we've got our functions set up, let's try actually running something. First, we'll use n_components == block_size, which implies that we should end up with the same signal we started with.

+
tabulasa_left = tabulasa[:,0]

_, _, reconstructed = pca_reduce(tabulasa_left, 1024, 1024)

Audio(data=reconstructed[start:end], rate=samplerate)
+ + +

OK, that does indeed sound like what we originally had. Let's drastically cut down the number of components we're doing this with as a sanity check: the audio quality should become incredibly poor.

+
_, _, reconstructed = pca_reduce(tabulasa_left, 32, 1024)

Audio(data=reconstructed[start:end], rate=samplerate)
+ + +

As expected, our reconstructed audio does sound incredibly poor! But there's something else very interesting going on here under the hood. Did you notice that the bassline comes across very well, but that there's no midrange or treble? The drums are almost entirely gone.

+

Drop the (Treble)

+

It will help to understand PCA more fully when trying to read this part, but I'll do my best to break it down. PCA tries to find a way to best represent the dataset using "components." Think of each "component" as containing some of the information you need in order to reconstruct the full audio. For example, you might have a "low frequency" component that contains all the information you need in order to hear the bassline. There might be other components that explain the high frequency things like singers, or melodies, that you also need.

+

What makes PCA interesting is that it attempts to find the "most important" components in explaining the signal. In a signal processing world, this means that PCA is trying to find the signal amongst the noise in your data. In our case, this means that PCA, when forced to work with small numbers of components, will chuck out the noisy components first. It's doing it's best job to reconstruct the signal, but it has to make sacrifices somewhere.

+

So I've mentioned that PCA identifies the "noisy" components in our dataset. This is equivalent to saying that PCA removes the "high frequency" components in this case: it's very easy to represent a low-frequency signal like a bassline. It's far more difficult to represent a high-frequency signal because it's changing all the time. When you force PCA to make a tradeoff by using a small number of components, the best it can hope to do is replicate the low-frequency sections and skip the high-frequency things.

+

This is a very interesting insight, and it also has echos (pardon the pun) of how humans understand music in general. Other encoding schemes (like MP3, etc.) typically chop off a lot of the high-frequency range as well. There is typically a lot of high-frequency noise in audio that is nearly impossible to hear, so it's easy to remove it without anyone noticing. PCA ends up doing something similar, and while that certainly wasn't the intention, it is an interesting effect.

+

A More Realistic Example

+

So we've seen the edge cases so far: Using a large number of components results in audio very close to the original, and using a small number of components acts as a low-pass filter. How about we develop something that sounds "good enough" in practice, that we can use as a benchmark for size? We'll use ourselves as judges of audio quality, and build another function to help us estimate how much space we need to store everything in.

+
from bz2 import compress
import pandas as pd

def raw_estimate(transformed, pca):
# We assume that we'll be storing things as 16-bit WAV,
# meaning two bytes per sample
signal_bytes = transformed.tobytes()
# PCA stores the components as floating point, we'll assume
# that means 32-bit floats, so 4 bytes per element
component_bytes = transformed.tobytes()

# Return a result in megabytes
return (len(signal_bytes) + len(component_bytes)) / (2**20)

# Do an estimate for lossless compression applied on top of our
# PCA reduction
def bz2_estimate(transformed, pca):
bytestring = transformed.tobytes() + b';' + pca.components_.tobytes()
compressed = compress(bytestring)
return len(compressed) / (2**20)

compression_attempts = [
(1, 1),
(1, 2),
(1, 4),
(4, 32),
(16, 256),
(32, 256),
(64, 256),
(128, 1024),
(256, 1024),
(512, 1024),
(128, 2048),
(256, 2048),
(512, 2048),
(1024, 2048)
]

def build_estimates(signal, n_components, block_size):
pca, transformed, recon = pca_reduce(tabulasa_left, n_components, block_size)
raw_pca_estimate = raw_estimate(transformed, pca)
bz2_pca_estimate = bz2_estimate(transformed, pca)
raw_size = len(recon.tobytes()) / (2**20)
return raw_size, raw_pca_estimate, bz2_pca_estimate

pca_compression_results = pd.DataFrame([
build_estimates(tabulasa_left, n, bs)
for n, bs in compression_attempts
])

pca_compression_results.columns = ["Raw", "PCA", "PCA w/ BZ2"]
pca_compression_results.index = compression_attempts
pca_compression_results
+
RawPCAPCA w/ BZ2
(1, 1)69.054298138.10859716.431797
(1, 2)69.05430669.05430632.981380
(1, 4)69.05432134.52716116.715032
(4, 32)69.05444317.2636118.481735
(16, 256)69.0546888.6318364.274846
(32, 256)69.05468817.2636728.542909
(64, 256)69.05468834.52734417.097543
(128, 1024)69.05468817.2636729.430644
(256, 1024)69.05468834.52734418.870387
(512, 1024)69.05468869.05468837.800940
(128, 2048)69.0625008.6328126.185015
(256, 2048)69.06250017.26562512.366942
(512, 2048)69.06250034.53125024.736506
(1024, 2048)69.06250069.06250049.517493
+

As we can see, there are a couple of instances where we do nearly 20 times better on storage space than the uncompressed file. Let's here what that sounds like:

+
_, _, reconstructed = pca_reduce(tabulasa_left, 16, 256)
Audio(data=reconstructed[start:end], rate=samplerate)
+ + +

It sounds incredibly poor though. Let's try something that's a bit more realistic:

+
_, _, reconstructed = pca_reduce(tabulasa_left, 1, 4)
Audio(data=reconstructed[start:end], rate=samplerate)
+ + +

And just out of curiosity, we can try something that has the same ratio of components to block size. This should be close to an apples-to-apples comparison.

+
_, _, reconstructed = pca_reduce(tabulasa_left, 64, 256)
Audio(data=reconstructed[start:end], rate=samplerate)
+ + +

The smaller block size definitely has better high-end response, but I personally think the larger block size sounds better overall.

+

Conclusions

+

So, what do I think about audio compression using PCA?

+

Strangely enough, it actually works pretty well relative to what I expected. That said, it's a terrible idea in general.

+

First off, you don't really save any space. The component matrix needed to actually run the PCA algorithm takes up a lot of space on its own, so it's very difficult to save space without sacrificing a huge amount of audio quality. And even then, codecs like AAC sound very nice even at bitrates that this PCA method could only dream of.

+

Second, there's the issue of audio streaming. PCA relies on two components: the datastream, and a matrix used to reconstruct the original signal. While it is easy to stream the data, you can't stream that matrix. And even if you divided the stream up into small blocks to give you a small matrix, you must guarantee that the matrix arrives; if you don't have that matrix, the data stream will make no sense whatsoever.

+

All said, this was an interesting experiment. It's really cool seeing PCA used for signal analysis where I haven't seen it applied before, but I don't think it will lead to any practical results. Look forward to more signal processing stuff in the future!

\ No newline at end of file diff --git a/2018/01/captains-cookbook-part-1/index.html b/2018/01/captains-cookbook-part-1/index.html new file mode 100644 index 0000000..0e8e3cd --- /dev/null +++ b/2018/01/captains-cookbook-part-1/index.html @@ -0,0 +1,88 @@ +Captain's Cookbook: Project setup | The Old Speice Guy
Skip to main content

Captain's Cookbook: Project setup

· 8 min read
Bradlee Speice

A basic introduction to getting started with Cap'N Proto.

+

I've been working a lot with Cap'N Proto recently with Rust, but there's a real dearth of information +on how to set up and get going quickly. In the interest of trying to get more people using this (because I think it's +fantastic), I'm going to work through a couple of examples detailing what exactly should be done to get going.

+

So, what is Cap'N Proto? It's a data serialization library. It has contemporaries with Protobuf +and FlatBuffers, but is better compared with FlatBuffers. The whole point behind it +is to define a schema language and serialization format such that:

+
    +
  1. Applications that do not share the same base programming language can communicate
  2. +
  3. The data and schema you use can naturally evolve over time as your needs change
  4. +
+

Accompanying this are typically code generators that take the schemas you define for your application and give you back +code for different languages to get data to and from that schema.

+

Now, what makes Cap'N Proto different from, say, Protobuf, is that there is no serialization/deserialization step the same way +as is implemented with Protobuf. Instead, the idea is that the message itself can be loaded in memory and used directly there.

+

We're going to take a look at a series of progressively more complex projects that use Cap'N Proto in an effort to provide some +examples of what idiomatic usage looks like, and shorten the startup time needed to make use of this library in Rust projects. +If you want to follow along, feel free. If not, I've posted the final result +for reference.

+

Step 1: Installing capnp

+

The capnp binary itself is needed for taking the schema files you write and turning them into a format that can be used by the +code generation libraries. Don't ask me what that actually means, I just know that you need to make sure this is installed.

+

I'll refer you to Cap'N Proto's installation instructions here. As a quick TLDR though:

+
    +
  • Linux users will likely have a binary shipped by their package manager - On Ubuntu, apt install capnproto is enough
  • +
  • OS X users can use Homebrew as an easy install path. Just brew install capnp
  • +
  • Windows users are a bit more complicated. If you're using Chocolatey, there's a package available. If that doesn't work however, you need to download a release zip and make sure that the capnp.exe binary is in your %PATH% environment variable
  • +
+

The way you know you're done with this step is if the following command works in your shell:

+
capnp id
+

Step 2: Starting a Cap'N Proto Rust project

+

After the capnp binary is set up, it's time to actually create our Rust project. Nothing terribly complex here, just a simple

+
mkdir capnp_cookbook_1
cd capnp_cookbook_1
cargo init --bin
+

We'll put the following content into Cargo.toml:

+
[package]
name = "capnp_cookbook_1"
version = "0.1.0"
authors = ["Bradlee Speice <bspeice@kcg.com>"]

[build-dependencies]
capnpc = "0.8" # 1

[dependencies]
capnp = "0.8" # 2
+

This sets up:

+
    +
  1. The Rust code generator (CAPNProto Compiler)
  2. +
  3. The Cap'N Proto runtime library (CAPNProto runtime)
  4. +
+

We've now got everything prepared that we need for writing a Cap'N Proto project.

+

Step 3: Writing a basic schema

+

We're going to start with writing a pretty trivial data schema that we can extend later. This is just intended to make sure +you get familiar with how to start from a basic project.

+

First, we're going to create a top-level directory for storing the schema files in:

+
# Assuming we're starting from the `capnp_cookbook_1` directory created earlier

mkdir schema
cd schema
+

Now, we're going to put the following content in point.capnp:

+
@0xab555145c708dad2;

struct Point {
x @0 :Int32;
y @1 :Int32;
}
+

Pretty easy, we've now got structure for an object we'll be able to quickly encode in a binary format.

+

Step 4: Setting up the build process

+

Now it's time to actually set up the build process to make sure that Cap'N Proto generates the Rust code we'll eventually be using. +This is typically done through a build.rs file to invoke the schema compiler.

+

In the same folder as your Cargo.toml file, please put the following content in build.rs:

+
extern crate capnpc;

fn main() {
::capnpc::CompilerCommand::new()
.src_prefix("schema") // 1
.file("schema/point.capnp") // 2
.run().expect("compiling schema");
}
+

This sets up the protocol compiler (capnpc from earlier) to compile the schema we've built so far.

+
    +
  1. Because Cap'N Proto schema files can re-use types specified in other files, the src_prefix() tells the compiler +where to look for those extra files at.
  2. +
  3. We specify the schema file we're including by hand. In a much larger project, you could presumably build the CompilerCommand +dynamically, but we won't worry too much about that one for now.
  4. +
+

Step 5: Running the build

+

If you've done everything correctly so far, you should be able to actually build the project and see the auto-generated code. +Run a cargo build command, and if you don't see cargo complaining, you're doing just fine!

+

So where exactly does the generated code go to? I think it's critically important for people to be able to see what the generated +code looks like, because you need to understand what you're actually programming against. The short answer is: the generated code lives +somewhere in the target/ directory.

+

The long answer is that you're best off running a find command to get the actual file path:

+
# Assuming we're running from the capnp_cookbook_1 project folder
find . -name point_capnp.rs
+

Alternately, if the find command isn't available, the path will look something like:

+
./target/debug/build/capnp_cookbook_1-c6e2990393c32fe6/out/point_capnp.rs
+

See if there are any paths in your target directory that look similar.

+

Now, the file content looks pretty nasty. I've included an example here +if you aren't following along at home. There are a couple things I'll try and point out though so you can get an idea of how +the schema we wrote for the "Point" message is tied to the generated code.

+

First, the Cap'N Proto library splits things up into Builder and Reader structs. These are best thought of the same way +Rust separates mut from non-mut code. Builders are mut versions of your message, and Readers are immutable versions.

+

For example, the Builder impl for point defines get_x(), set_x(), get_y(), and set_y() methods. +In comparison, the Reader impl only defines get_x() and get_y() methods.

+

So now we know that there are some get and set methods available for our x and y coordinates; +but what do we actually do with those?

+

Step 6: Making a point

+

So we've install Cap'N Proto, gotten a project set up, and can generate schema code now. It's time to actually start building +Cap'N Proto messages! I'm going to put the code you need here because it's small, and put some extra long comments inline. This code +should go in src/main.rs:

+
// Note that we use `capnp` here, NOT `capnpc`
extern crate capnp;

// We create a module here to define how we are to access the code
// being included.
pub mod point_capnp {
// The environment variable OUT_DIR is set by Cargo, and
// is the location of all the code that was built as part
// of the codegen step.
// point_capnp.rs is the actual file to include
include!(concat!(env!("OUT_DIR"), "/point_capnp.rs"));
}

fn main() {

// The process of building a Cap'N Proto message is a bit tedious.
// We start by creating a generic Builder; it acts as the message
// container that we'll later be filling with content of our `Point`
let mut builder = capnp::message::Builder::new_default();

// Because we need a mutable reference to the `builder` later,
// we fence off this part of the code to allow sequential mutable
// borrows. As I understand it, non-lexical lifetimes:
// https://github.com/rust-lang/rust-roadmap/issues/16
// will make this no longer necessary
{
// And now we can set up the actual message we're trying to create
let mut point_msg = builder.init_root::<point_capnp::point::Builder>();

// Stuff our message with some content
point_msg.set_x(12);

point_msg.set_y(14);
}

// It's now time to serialize our message to binary. Let's set up a buffer for that:
let mut buffer = Vec::new();

// And actually fill that buffer with our data
capnp::serialize::write_message(&mut buffer, &builder).unwrap();

// Finally, let's deserialize the data
let deserialized = capnp::serialize::read_message(
&mut buffer.as_slice(),
capnp::message::ReaderOptions::new()
).unwrap();

// `deserialized` is currently a generic reader; it understands
// the content of the message we gave it (i.e. that there are two
// int32 values) but doesn't really know what they represent (the Point).
// This is where we map the generic data back into our schema.
let point_reader = deserialized.get_root::<point_capnp::point::Reader>().unwrap();

// We can now get our x and y values back, and make sure they match
assert_eq!(point_reader.get_x(), 12);
assert_eq!(point_reader.get_y(), 14);
}
+

And with that, we've now got a functioning project. Here's the content I'm planning to go over next as we build up +some practical examples of Cap'N Proto in action:

\ No newline at end of file diff --git a/2018/01/captains-cookbook-part-2/index.html b/2018/01/captains-cookbook-part-2/index.html new file mode 100644 index 0000000..a54b9dc --- /dev/null +++ b/2018/01/captains-cookbook-part-2/index.html @@ -0,0 +1,75 @@ +Captain's Cookbook: Practical usage | The Old Speice Guy
Skip to main content

Captain's Cookbook: Practical usage

· 7 min read
Bradlee Speice

A look at more practical usages of Cap'N Proto

+

Part 1 of this series took a look at a basic starting project +with Cap'N Proto. In this section, we're going to take the (admittedly basic) schema and look at how we can add a pretty +basic feature - sending Cap'N Proto messages between threads. It's nothing complex, but I want to make sure that there's +some documentation surrounding practical usage of the library.

+

As a quick refresher, we build a Cap'N Proto message and go through the serialization/deserialization steps +here. Our current example is going to build on +the code we wrote there; after the deserialization step, we'll try and send the point_reader to a separate thread +for verification.

+

I'm going to walk through the attempts as I made them and my thinking throughout. +If you want to skip to the final project, check out the code available here

+

Attempt 1: Move the reference

+

As a first attempt, we're going to try and let Rust move the reference. Our code will look something like:

+
fn main() {

// ...assume that we own a `buffer: Vec<u8>` containing the binary message content from
// somewhere else

let deserialized = capnp::serialize::read_message(
&mut buffer.as_slice(),
capnp::message::ReaderOptions::new()
).unwrap();

let point_reader = deserialized.get_root::<point_capnp::point::Reader>().unwrap();

// By using `point_reader` inside the new thread, we're hoping that Rust can
// safely move the reference and invalidate the original thread's usage.
// Since the original thread doesn't use `point_reader` again, this should
// be safe, right?
let handle = std::thread:spawn(move || {

assert_eq!(point_reader.get_x(), 12);

assert_eq!(point_reader.get_y(), 14);
});

handle.join().unwrap()
}
+

Well, the Rust compiler doesn't really like this. We get four distinct errors back:

+
error[E0277]: the trait bound `*const u8: std::marker::Send` is not satisfied in `[closure@src/main.rs:31:37: 36:6 point_reader:point_capnp::point::Reader<'_>]`                                                                                                                
--> src/main.rs:31:18
|
31 | let handle = std::thread::spawn(move || {
| ^^^^^^^^^^^^^^^^^^ `*const u8` cannot be sent between threads safely
|

error[E0277]: the trait bound `*const capnp::private::layout::WirePointer: std::marker::Send` is not satisfied in `[closure@src/main.rs:31:37: 36:6 point_reader:point_capnp::point::Reader<'_>]`
--> src/main.rs:31:18
|
31 | let handle = std::thread::spawn(move || {
| ^^^^^^^^^^^^^^^^^^ `*const capnp::private::layout::WirePointer` cannot be sent between threads safely
|

error[E0277]: the trait bound `capnp::private::arena::ReaderArena: std::marker::Sync` is not satisfied
--> src/main.rs:31:18
|
31 | let handle = std::thread::spawn(move || {
| ^^^^^^^^^^^^^^^^^^ `capnp::private::arena::ReaderArena` cannot be shared between threads safely
|

error[E0277]: the trait bound `*const std::vec::Vec<std::option::Option<std::boxed::Box<capnp::private::capability::ClientHook + 'static>>>: std::marker::Send` is not satisfied in `[closure@src/main.rs:31:37: 36:6 point_reader:point_capnp::point::Reader<'_>]`
--> src/main.rs:31:18
|
31 | let handle = std::thread::spawn(move || {
| ^^^^^^^^^^^^^^^^^^ `*const std::vec::Vec<std::option::Option<std::boxed::Box<capnp::private::capability::ClientHook + 'static>>>` cannot be sent between threads safely
|

error: aborting due to 4 previous errors
+

Note, I've removed the help text for brevity, but suffice to say that these errors are intimidating. +Pay attention to the text that keeps on getting repeated though: XYZ cannot be sent between threads safely.

+

This is a bit frustrating: we own the buffer from which all the content was derived, and we don't have any +unsafe accesses in our code. We guarantee that we wait for the child thread to stop first, so there's no possibility +of the pointer becoming invalid because the original thread exits before the child thread does. So why is Rust +preventing us from doing something that really should be legal?

+

This is what is known as fighting the borrow checker. +Let our crusade begin.

+

Attempt 2: Put the Reader in a Box

+

The Box type allows us to convert a pointer we have +(in our case the point_reader) into an "owned" value, which should be easier to send across threads. +Our next attempt looks something like this:

+
fn main() {

// ...assume that we own a `buffer: Vec<u8>` containing the binary message content
// from somewhere else

let deserialized = capnp::serialize::read_message(
&mut buffer.as_slice(),
capnp::message::ReaderOptions::new()
).unwrap();

let point_reader = deserialized.get_root::<point_capnp::point::Reader>().unwrap();

let boxed_reader = Box::new(point_reader);

// Now that the reader is `Box`ed, we've proven ownership, and Rust can
// move the ownership to the new thread, right?
let handle = std::thread::spawn(move || {

assert_eq!(boxed_reader.get_x(), 12);

assert_eq!(boxed_reader.get_y(), 14);
});

handle.join().unwrap();
}
+

Spoiler alert: still doesn't work. Same errors still show up.

+
error[E0277]: the trait bound `*const u8: std::marker::Send` is not satisfied in `point_capnp::point::Reader<'_>`                       
--> src/main.rs:33:18
|
33 | let handle = std::thread::spawn(move || {
| ^^^^^^^^^^^^^^^^^^ `*const u8` cannot be sent between threads safely
|

error[E0277]: the trait bound `*const capnp::private::layout::WirePointer: std::marker::Send` is not satisfied in `point_capnp::point::Reader<'_>`
--> src/main.rs:33:18
|
33 | let handle = std::thread::spawn(move || {
| ^^^^^^^^^^^^^^^^^^ `*const capnp::private::layout::WirePointer` cannot be sent between threads safely
|

error[E0277]: the trait bound `capnp::private::arena::ReaderArena: std::marker::Sync` is not satisfied
--> src/main.rs:33:18
|
33 | let handle = std::thread::spawn(move || {
| ^^^^^^^^^^^^^^^^^^ `capnp::private::arena::ReaderArena` cannot be shared between threads safely
|

error[E0277]: the trait bound `*const std::vec::Vec<std::option::Option<std::boxed::Box<capnp::private::capability::ClientHook + 'static>>>: std::marker::Send` is not satisfied in `point_capnp::point::Reader<'_>`
--> src/main.rs:33:18
|
33 | let handle = std::thread::spawn(move || {
| ^^^^^^^^^^^^^^^^^^ `*const std::vec::Vec<std::option::Option<std::boxed::Box<capnp::private::capability::ClientHook + 'static>>>` cannot be sent between threads safely
|

error: aborting due to 4 previous errors
+

Let's be a little bit smarter about the exceptions this time though. What is that +std::marker::Send thing the compiler keeps telling us about?

+

The documentation is pretty clear; Send is used to denote:

+
+

Types that can be transferred across thread boundaries.

+
+

In our case, we are seeing the error messages for two reasons:

+
    +
  1. +

    Pointers (*const u8) are not safe to send across thread boundaries. While we're nice in our code +making sure that we wait on the child thread to finish before closing down, the Rust compiler can't make +that assumption, and so complains that we're not using this in a safe manner.

    +
  2. +
  3. +

    The point_capnp::point::Reader type is itself not safe to send across threads because it doesn't +implement the Send trait. Which is to say, the things that make up a Reader are themselves not thread-safe, +so the Reader is also not thread-safe.

    +
  4. +
+

So, how are we to actually transfer a parsed Cap'N Proto message between threads?

+

Attempt 3: The TypedReader

+

The TypedReader is a new API implemented in the Cap'N Proto Rust code. +We're interested in it here for two reasons:

+
    +
  1. +

    It allows us to define an object where the object owns the underlying data. In previous attempts, +the current context owned the data, but the Reader itself had no such control.

    +
  2. +
  3. +

    We can compose the TypedReader using objects that are safe to Send across threads, guaranteeing +that we can transfer parsed messages across threads.

    +
  4. +
+

The actual type info for the TypedReader +is a bit complex. And to be honest, I'm still really not sure what the whole point of the +PhantomData thing is either. +My impression is that it lets us enforce type safety when we know what the underlying Cap'N Proto +message represents. That is, technically the only thing we're storing is the untyped binary message; +PhantomData just enforces the principle that the binary represents some specific object that has been parsed.

+

Either way, we can carefully construct something which is safe to move between threads:

+
fn main() {

// ...assume that we own a `buffer: Vec<u8>` containing the binary message content from somewhere else

let deserialized = capnp::serialize::read_message(
&mut buffer.as_slice(),
capnp::message::ReaderOptions::new()
).unwrap();

let point_reader: capnp::message::TypedReader<capnp::serialize::OwnedSegments, point_capnp::point::Owned> =
capnp::message::TypedReader::new(deserialized);

// Because the point_reader is now working with OwnedSegments (which are owned vectors) and an Owned message
// (which is 'static lifetime), this is now safe
let handle = std::thread::spawn(move || {

// The point_reader owns its data, and we use .get() to retrieve the actual point_capnp::point::Reader
// object from it
let point_root = point_reader.get().unwrap();

assert_eq!(point_root.get_x(), 12);

assert_eq!(point_root.get_y(), 14);
});

handle.join().unwrap();
}
+

And while we've left Rust to do the dirty work of actually moving the point_reader into the new thread, +we could also use things like mpsc channels to achieve a similar effect.

+

So now we're able to define basic Cap'N Proto messages, and send them all around our programs.

\ No newline at end of file diff --git a/2018/05/hello/index.html b/2018/05/hello/index.html new file mode 100644 index 0000000..6b5e017 --- /dev/null +++ b/2018/05/hello/index.html @@ -0,0 +1,9 @@ +Hello! | The Old Speice Guy
Skip to main content
\ No newline at end of file diff --git a/2018/06/dateutil-parser-to-rust/index.html b/2018/06/dateutil-parser-to-rust/index.html new file mode 100644 index 0000000..8b92328 --- /dev/null +++ b/2018/06/dateutil-parser-to-rust/index.html @@ -0,0 +1,142 @@ +What I learned porting dateutil to Rust | The Old Speice Guy
Skip to main content

What I learned porting dateutil to Rust

· 7 min read
Bradlee Speice

I've mostly been a lurker in Rust for a while, making a couple small contributions here and there. +So launching dtparse feels like nice step towards becoming a +functioning member of society. But not too much, because then you know people start asking you to +pay bills, and ain't nobody got time for that.

+

But I built dtparse, and you can read about my thoughts on the process. Or don't. I won't tell you +what to do with your life (but you should totally keep reading).

+

Slow down, what?

+

OK, fine, I guess I should start with why someone would do this.

+

Dateutil is a Python library for handling dates. The +standard library support for time in Python is kinda dope, but there are a lot of extras that go +into making it useful beyond just the datetime +module. dateutil.parser specifically is code to take all the super-weird time formats people come +up with and turn them into something actually useful.

+

Date/time parsing, it turns out, is just like everything else involving +computers and +time: it +feels like it shouldn't be that difficult to do, until you try to do it, and you realize that people +suck and this is why +we can't we have nice things. But +alas, we'll try and make contemporary art out of the rubble and give it a pretentious name like +Time.

+

A gravel mound

+
+

Time

+
+

What makes dateutil.parser great is that there's single function with a single argument that +drives what programmers interact with: +parse(timestr). +It takes in the time as a string, and gives you back a reasonable "look, this is the best anyone can +possibly do to make sense of your input" value. It doesn't expect much of you.

+

And now it's in Rust.

+

Lost in Translation

+

Having worked at a bulge-bracket bank watching Java programmers try to be Python programmers, I'm +admittedly hesitant to publish Python code that's trying to be Rust. Interestingly, Rust code can +actually do a great job of mimicking Python. It's certainly not idiomatic Rust, but I've had better +experiences than +this guy +who attempted the same thing for D. These are the actual take-aways:

+

When transcribing code, stay as close to the original library as possible. I'm talking about +using the same variable names, same access patterns, the whole shebang. It's way too easy to make a +couple of typos, and all of a sudden your code blows up in new and exciting ways. Having a reference +manual for verbatim what your code should be means that you don't spend that long debugging +complicated logic, you're more looking for typos.

+

Also, don't use nice Rust things like enums. While +one time it worked out OK for me, +I also managed to shoot myself in the foot a couple times because dateutil stores AM/PM as a +boolean and I mixed up which was true, and which was false (side note: AM is false, PM is true). In +general, writing nice code should not be a first-pass priority when you're just trying to recreate +the same functionality.

+

Exceptions are a pain. Make peace with it. Python code is just allowed to skip stack frames. So +when a co-worker told me "Rust is getting try-catch syntax" I properly freaked out. Turns out +he's not quite right, and I'm OK with that. And while +dateutil is pretty well-behaved about not skipping multiple stack frames, +130-line try-catch blocks +take a while to verify.

+

As another Python quirk, be very careful about +long nested if-elif-else blocks. +I used to think that Python's whitespace was just there to get you to format your code correctly. I +think that no longer. It's way too easy to close a block too early and have incredibly weird issues +in the logic. Make sure you use an editor that displays indentation levels so you can keep things +straight.

+

Rust macros are not free. I originally had the +main test body +wrapped up in a macro using pyo3. It took two minutes to compile. +After +moving things to a function +compile times dropped down to ~5 seconds. Turns out 150 lines * 100 tests = a lot of redundant code +to be compiled. My new rule of thumb is that any macros longer than 10-15 lines are actually +functions that need to be liberated, man.

+

Finally, I really miss list comprehensions and dictionary comprehensions. As a quick comparison, +see +this dateutil code +and +the implementation in Rust. +I probably wrote it wrong, and I'm sorry. Ultimately though, I hope that these comprehensions can be +added through macros or syntax extensions. Either way, they're expressive, save typing, and are +super-readable. Let's get more of that.

+

Using a young language

+

Now, Rust is exciting and new, which means that there's opportunity to make a substantive impact. On +more than one occasion though, I've had issues navigating the Rust ecosystem.

+

What I'll call the "canonical library" is still being built. In Python, if you need datetime +parsing, you use dateutil. If you want decimal types, it's already in the +standard library. While I might've gotten away +with f64, dateutil uses decimals, and I wanted to follow the principle of staying as close to +the original library as possible. Thus began my quest to find a decimal library in Rust. What I +quickly found was summarized in a comment:

+
+

Writing a BigDecimal is easy. Writing a good BigDecimal is hard.

+

-cmr

+
+

In practice, this means that there are at least 4 +different +implementations available. +And that's a lot of decisions to worry about when all I'm thinking is "why can't +calendar reform be a thing" and I'm forced to dig +through a couple +different +threads to figure out if the library I'm look at is dead +or just stable.

+

And even when the "canonical library" exists, there's no guarantees that it will be well-maintained. +Chrono is the de facto date/time library in Rust, and just +released version 0.4.4 like two days ago. Meanwhile, +chrono-tz appears to be dead in the water even though +there are people happy to help maintain it. I +know relatively little about it, but it appears that most of the release process is automated; +keeping that up to date should be a no-brainer.

+

Trial Maintenance Policy

+

Specifically given "maintenance" being an +oft-discussed +issue, I'm going to try out the following policy to keep things moving on dtparse:

+
    +
  1. +

    Issues/PRs needing maintainer feedback will be updated at least weekly. I want to make sure +nobody's blocking on me.

    +
  2. +
  3. +

    To keep issues/PRs needing contributor feedback moving, I'm going to (kindly) ask the +contributor to check in after two weeks, and close the issue without resolution if I hear nothing +back after a month.

    +
  4. +
+

The second point I think has the potential to be a bit controversial, so I'm happy to receive +feedback on that. And if a contributor responds with "hey, still working on it, had a kid and I'm +running on 30 seconds of sleep a night," then first: congratulations on sustaining human life. And +second: I don't mind keeping those requests going indefinitely. I just want to try and balance +keeping things moving with giving people the necessary time they need.

+

I should also note that I'm still getting some best practices in place - CONTRIBUTING and +CONTRIBUTORS files need to be added, as well as issue/PR templates. In progress. None of us are +perfect.

+

Roadmap and Conclusion

+

So if I've now built a dateutil-compatible parser, we're done, right? Of course not! That's not +nearly ambitious enough.

+

Ultimately, I'd love to have a library that's capable of parsing everything the Linux date command +can do (and not date on OSX, because seriously, BSD coreutils are the worst). I know Rust has a +coreutils rewrite going on, and dtparse would potentially be an interesting candidate since it +doesn't bring in a lot of extra dependencies. humantime +could help pick up some of the (current) slack in dtparse, so maybe we can share and care with each +other?

+

All in all, I'm mostly hoping that nobody's already done this and I haven't spent a bit over a month +on redundant code. So if it exists, tell me. I need to know, but be nice about it, because I'm going +to take it hard.

+

And in the mean time, I'm looking forward to building more. Onwards.

\ No newline at end of file diff --git a/2018/09/isomorphic-apps/index.html b/2018/09/isomorphic-apps/index.html new file mode 100644 index 0000000..19cbb89 --- /dev/null +++ b/2018/09/isomorphic-apps/index.html @@ -0,0 +1,184 @@ +Isomorphic desktop apps with Rust | The Old Speice Guy
Skip to main content

Isomorphic desktop apps with Rust

· 10 min read
Bradlee Speice

I both despise Javascript and am stunned by its success doing some really cool things. It's +this duality that's +led me to a couple of (very) late nights over the past weeks trying to reconcile myself as I +bootstrap a simple desktop application.

+

See, as much as +Webassembly isn't trying to replace Javascript, +I want Javascript gone. There are plenty of people who don't share my views, and they are +probably nicer and more fun at parties. But I cringe every time "Webpack" is mentioned, and I think +it's hilarious that the +language specification +dramatically outpaces anyone's +actual implementation. The answer to this +conundrum is of course to recompile code from newer versions of the language to older versions of +the same language before running. At least Babel is a nice tongue-in-cheek reference.

+

Yet for as much hate as Electron receives, it does a stunningly good job at solving a really hard +problem: how the hell do I put a button on the screen and react when the user clicks it? GUI +programming is hard, straight up. But if browsers are already able to run everywhere, why don't we +take advantage of someone else solving the hard problems for us? I don't like that I have to use +Javascript for it, but I really don't feel inclined to whip out good ol' wxWidgets.

+

Now there are other native solutions (libui-rs, conrod, oh hey wxWdidgets again!), but +those also have their own issues with distribution, styling, etc. With Electron, I can +yarn create electron-app my-app and just get going, knowing that packaging/upgrades/etc. are built +in.

+

My question is: given recent innovations with WASM, are we Electron yet?

+

No, not really.

+

Instead, what would it take to get to a point where we can skip Javascript in Electron apps?

+

Truth is, WASM/Webassembly is a pretty new technology and I'm a total beginner in this area. There +may already be solutions to the issues I discuss, but I'm totally unaware of them, so I'm going to +try and organize what I did manage to discover.

+

I should also mention that the content and things I'm talking about here are not intended to be +prescriptive, but more "if someone else is interested, what do we already know doesn't work?" I +expect everything in this post to be obsolete within two months. Even over the course of writing +this, a separate blog post had +to be modified because upstream changes broke a +Rust tool the post tried to use. The post +ultimately +got updated, but +all this happened within the span of a week. Things are moving quickly.

+

I'll also note that we're going to skip asm.js and emscripten. Truth be told, I couldn't get +either of these to output anything, and so I'm just going to say +here be dragons. Everything I'm discussing here +uses the wasm32-unknown-unknown target.

+

The code that I did get running is available +over here. Feel free to use it as a starting point, +but I'm mostly including the link as a reference for the things that were attempted.

+

An Example Running Application

+

So, I did technically get a running application:

+

Electron app using WASM

+

...which you can also try out if you want:

+
git clone https://github.com/speice-io/isomorphic-rust.git
cd isomorphic_rust/percy
yarn install && yarn start
+

...but I wouldn't really call it a "high quality" starting point to base future work on. It's mostly +there to prove this is possible in the first place. And that's something to be proud of! There's a +huge amount of engineering that went into showing a window with the text "It's alive!".

+

There's also a lot of usability issues that prevent me from recommending anyone try Electron and +WASM apps at the moment, and I think that's the more important thing to discuss.

+

Issue the First: Complicated Toolchains

+

I quickly established that wasm-bindgen was necessary to "link" my Rust code to Javascript. At +that point you've got an Electron app that starts an HTML page which ultimately fetches your WASM +blob. To keep things simple, the goal was to package everything using webpack so that I could just +load a bundle.js file on the page. That decision was to be the last thing that kinda worked in +this process.

+

The first issue +I ran into +while attempting to bundle everything via webpack is a detail in the WASM spec:

+
+

This function accepts a Response object, or a promise for one, and ... [if > it] does not match +the application/wasm MIME type, the returned promise will be rejected with a TypeError;

+

WebAssembly - Additional Web Embedding API

+
+

Specifically, if you try and load a WASM blob without the MIME type set, you'll get an error. On the +web this isn't a huge issue, as the server can set MIME types when delivering the blob. With +Electron, you're resolving things with a file:// URL and thus can't control the MIME type:

+

TypeError: Incorrect response MIME type. Expected &#39;application/wasm&#39;.

+

There are a couple of solutions depending on how far into the deep end you care to venture:

+
    +
  • Embed a static file server in your Electron application
  • +
  • Use a custom protocol and custom protocol handler
  • +
  • Host your WASM blob on a website that you resolve at runtime
  • +
+

But all these are pretty bad solutions and defeat the purpose of using WASM in the first place. +Instead, my workaround was to +open a PR with webpack and use regex to remove +calls to instantiateStreaming in the +build script:

+
cargo +nightly build --target=wasm32-unknown-unknown && \
wasm-bindgen "$WASM_DIR/debug/$WASM_NAME.wasm" --out-dir "$APP_DIR" --no-typescript && \
# Have to use --mode=development so we can patch out the call to instantiateStreaming
"$DIR/node_modules/webpack-cli/bin/cli.js" --mode=development "$APP_DIR/app_loader.js" -o "$APP_DIR/bundle.js" && \
sed -i 's/.*instantiateStreaming.*//g' "$APP_DIR/bundle.js"
+

Once that lands, the +build process +becomes much simpler:

+

cargo +nightly build --target=wasm32-unknown-unknown && \
wasm-bindgen "$WASM_DIR/debug/$WASM_NAME.wasm" --out-dir "$APP_DIR" --no-typescript && \
"$DIR/node_modules/webpack-cli/bin/cli.js" --mode=production "$APP_DIR/app_loader.js" -o "$APP_DIR/bundle.js"
+

But we're not done yet! After we compile Rust into WASM and link WASM to Javascript (via +wasm-bindgen and webpack), we still have to make an Electron app. For this purpose I used a +starter app from Electron Forge, and then a +prestart script +to actually handle starting the application.

+

The +final toolchain +looks something like this:

+
    +
  • yarn start triggers the prestart script
  • +
  • prestart checks for missing tools (wasm-bindgen-cli, etc.) and then: +
      +
    • Uses cargo to compile the Rust code into WASM
    • +
    • Uses wasm-bindgen to link the WASM blob into a Javascript file with exported symbols
    • +
    • Uses webpack to bundle the page start script with the Javascript we just generated +
        +
      • Uses babel under the hood to compile the wasm-bindgen code down from ES6 into something +browser-compatible
      • +
      +
    • +
    +
  • +
  • The start script runs an Electron Forge handler to do some sanity checks
  • +
  • Electron actually starts
  • +
+

...which is complicated. I think more work needs to be done to either build a high-quality starter +app that can manage these steps, or another tool that "just handles" the complexity of linking a +compiled WASM file into something the Electron browser can run.

+

Issue the Second: WASM tools in Rust

+

For as much as I didn't enjoy the Javascript tooling needed to interface with Rust, the Rust-only +bits aren't any better at the moment. I get it, a lot of projects are just starting off, and that +leads to a fragmented ecosystem. Here's what I can recommend as a starting point:

+

Don't check in your Cargo.lock files to version control. If there's a disagreement between the +version of wasm-bindgen-cli you have installed and the wasm-bindgen you're compiling with in +Cargo.lock, you get a nasty error:

+
it looks like the Rust project used to create this wasm file was linked against
a different version of wasm-bindgen than this binary:

rust wasm file: 0.2.21
this binary: 0.2.17

Currently the bindgen format is unstable enough that these two version must
exactly match, so it's required that these two version are kept in sync by
either updating the wasm-bindgen dependency or this binary.
+

Not that I ever managed to run into this myself (coughs nervously).

+

There are two projects attempting to be "application frameworks": percy and yew. Between those, +I managed to get two +examples running +using percy, but was unable to get an +example running with yew because +of issues with "missing modules" during the webpack step:

+
ERROR in ./dist/electron_yew_wasm_bg.wasm
Module not found: Error: Can't resolve 'env' in '/home/bspeice/Development/isomorphic_rust/yew/dist'
@ ./dist/electron_yew_wasm_bg.wasm
@ ./dist/electron_yew_wasm.js
@ ./dist/app.js
@ ./dist/app_loader.js
+

If you want to work with the browser APIs directly, your choices are percy-webapis or stdweb (or +eventually web-sys). See above for my percy examples, but when I tried +an example with stdweb, I was +unable to get it running:

+
ERROR in ./dist/stdweb_electron_bg.wasm
Module not found: Error: Can't resolve 'env' in '/home/bspeice/Development/isomorphic_rust/stdweb/dist'
@ ./dist/stdweb_electron_bg.wasm
@ ./dist/stdweb_electron.js
@ ./dist/app_loader.js
+

At this point I'm pretty convinced that stdweb is causing issues for yew as well, but can't +prove it.

+

I did also get a minimal example +running that doesn't depend on any tools besides wasm-bindgen. However, it requires manually +writing "extern C" blocks for everything you need from the browser. Es no bueno.

+

Finally, from a tools and platform view, there are two up-and-coming packages that should be +mentioned: js-sys and web-sys. Their purpose is to be fundamental building blocks that exposes +the browser's APIs to Rust. If you're interested in building an app framework from scratch, these +should give you the most flexibility. I didn't touch either in my research, though I expect them to +be essential long-term.

+

So there's a lot in play from the Rust side of things, and it's just going to take some time to +figure out what works and what doesn't.

+

Issue the Third: Known Unknowns

+

Alright, so after I managed to get an application started, I stopped there. It was a good deal of +effort to chain together even a proof of concept, and at this point I'd rather learn Typescript +than keep trying to maintain an incredibly brittle pipeline. Blasphemy, I know...

+

The important point I want to make is that there's a lot unknown about how any of this holds up +outside proofs of concept. Things I didn't attempt:

+
    +
  • Testing
  • +
  • Packaging
  • +
  • Updates
  • +
  • Literally anything related to why I wanted to use Electron in the first place
  • +
+

What it Would Take

+

Much as I don't like Javascript, the tools are too shaky for me to recommend mixing Electron and +WASM at the moment. There's a lot of innovation happening, so who knows? Someone might have an +application in production a couple months from now. But at the moment, I'm personally going to stay +away.

+

Let's finish with a wishlist then - here are the things that I think need to happen before +Electron/WASM/Rust can become a thing:

+
    +
  • Webpack still needs some updates. The necessary work is in progress, but hasn't landed yet +(#7983)
  • +
  • Browser API libraries (web-sys and stdweb) need to make sure they can support running in +Electron (see module error above)
  • +
  • Projects need to stabilize. There's talk of stdweb being turned into a Rust API +on top of web-sys, and percy +moving to web-sys, both of which are big changes
  • +
  • wasm-bindgen is great, but still in the "move fast and break things" phase
  • +
  • A good "boilerplate" app would dramatically simplify the start-up costs; +electron-react-boilerplate comes to +mind as a good project to imitate
  • +
  • More blog posts/contributors! I think Electron + Rust could be cool, but I have no idea what I'm +doing
  • +
\ No newline at end of file diff --git a/2018/09/primitives-in-rust-are-weird/index.html b/2018/09/primitives-in-rust-are-weird/index.html new file mode 100644 index 0000000..0719a3d --- /dev/null +++ b/2018/09/primitives-in-rust-are-weird/index.html @@ -0,0 +1,99 @@ +Primitives in Rust are weird (and cool) | The Old Speice Guy
Skip to main content

Primitives in Rust are weird (and cool)

· 7 min read
Bradlee Speice

I wrote a really small Rust program a while back because I was curious. I was 100% convinced it +couldn't possibly run:

+
fn main() {
println!("{}", 8.to_string())
}
+

And to my complete befuddlement, it compiled, ran, and produced a completely sensible output.

+

The reason I was so surprised has to do with how Rust treats a special category of things I'm going to +call primitives. In the current version of the Rust book, you'll see them referred to as +scalars, and in older versions they'll be called primitives, but +we're going to stick with the name primitive for the time being. Explaining why this program is so +cool requires talking about a number of other programming languages, and keeping a consistent +terminology makes things easier.

+

You've been warned: this is going to be a tedious post about a relatively minor issue that +involves Java, Python, C, and x86 Assembly. And also me pretending like I know what I'm talking +about with assembly.

+

Defining primitives (Java)

+

The reason I'm using the name primitive comes from how much of my life is Java right now. For the most part I like Java, but I digress. In Java, there's a special +name for some specific types of values:

+
+
bool    char    byte
short int long
float double
+
+

They are referred to as primitives. And relative to the other bits of Java, +they have two unique features. First, they don't have to worry about the +billion-dollar mistake; +primitives in Java can never be null. Second: they can't have instance methods. +Remember that Rust program from earlier? Java has no idea what to do with it:

+
class Main {
public static void main(String[] args) {
int x = 8;
System.out.println(x.toString()); // Triggers a compiler error
}
}
+

The error is:

+
Main.java:5: error: int cannot be dereferenced
System.out.println(x.toString());
^
1 error
+

Specifically, Java's Object +and things that inherit from it are pointers under the hood, and we have to dereference them before +the fields and methods they define can be used. In contrast, primitive types are just values - +there's nothing to be dereferenced. In memory, they're just a sequence of bits.

+

If we really want, we can turn the int into an +Integer and then dereference +it, but it's a bit wasteful:

+
class Main {
public static void main(String[] args) {
int x = 8;
Integer y = Integer.valueOf(x);
System.out.println(y.toString());
}
}
+

This creates the variable y of type Integer (which inherits Object), and at run time we +dereference y to locate the toString() function and call it. Rust obviously handles things a bit +differently, but we have to dig into the low-level details to see it in action.

+

Low Level Handling of Primitives (C)

+

We first need to build a foundation for reading and understanding the assembly code the final answer +requires. Let's begin with showing how the C language (and your computer) thinks about "primitive" +values in memory:

+
void my_function(int num) {}

int main() {
int x = 8;
my_function(x);
}
+

The compiler explorer gives us an easy way of showing off the +assembly-level code that's generated: whose output has been lightly +edited

+
main:
push rbp
mov rbp, rsp
sub rsp, 16

; We assign the value `8` to `x` here
mov DWORD PTR [rbp-4], 8

; And copy the bits making up `x` to a location
; `my_function` can access (`edi`)
mov eax, DWORD PTR [rbp-4]
mov edi, eax

; Call `my_function` and give it control
call my_function

mov eax, 0
leave
ret

my_function:
push rbp
mov rbp, rsp

; Copy the bits out of the pre-determined location (`edi`)
; to somewhere we can use
mov DWORD PTR [rbp-4], edi
nop

pop rbp
ret
+

At a really low level of memory, we're copying bits around using the mov instruction; +nothing crazy. But to show how similar Rust is, let's take a look at our program translated from C +to Rust:

+
fn my_function(x: i32) {}

fn main() {
let x = 8;
my_function(x)
}
+

And the assembly generated when we stick it in the +compiler explorer: again, lightly +edited

+
example::main:
push rax

; Look familiar? We're copying bits to a location for `my_function`
; The compiler just optimizes out holding `x` in memory
mov edi, 8

; Call `my_function` and give it control
call example::my_function

pop rax
ret

example::my_function:
sub rsp, 4

; And copying those bits again, just like in C
mov dword ptr [rsp], edi

add rsp, 4
ret
+

The generated Rust assembly is functionally pretty close to the C assembly: When working with +primitives, we're just dealing with bits in memory.

+

In Java we have to dereference a pointer to call its functions; in Rust, there's no pointer to +dereference. So what exactly is going on with this .to_string() function call?

+

impl primitive (and Python)

+

Now it's time to reveal my trap card show the revelation that tied all this +together: Rust has implementations for its primitive types. That's right, impl blocks aren't +only for structs and traits, primitives get them too. Don't believe me? Check out +u32, +f64 and +char as examples.

+

But the really interesting bit is how Rust turns those impl blocks into assembly. Let's break out +the compiler explorer once again:

+
pub fn main() {
8.to_string()
}
+

And the interesting bits in the assembly: heavily trimmed down

+
example::main:
sub rsp, 24
mov rdi, rsp
lea rax, [rip + .Lbyte_str.u]
mov rsi, rax

; Cool stuff right here
call <T as alloc::string::ToString>::to_string@PLT

mov rdi, rsp
call core::ptr::drop_in_place
add rsp, 24
ret
+

Now, this assembly is a bit more complicated, but here's the big revelation: we're calling +to_string() as a function that exists all on its own, and giving it the instance of 8. Instead +of thinking of the value 8 as an instance of u32 and then peeking in to find the location of the +function we want to call (like Java), we have a function that exists outside of the instance and +just give that function the value 8.

+

This is an incredibly technical detail, but the interesting idea I had was this: if to_string() +is a static function, can I refer to the unbound function and give it an instance?

+

Better explained in code (and a compiler explorer link because I +seriously love this thing):

+
struct MyVal {
x: u32
}

impl MyVal {
fn to_string(&self) -> String {
self.x.to_string()
}
}

pub fn main() {
let my_val = MyVal { x: 8 };

// THESE ARE THE SAME
my_val.to_string();
MyVal::to_string(&my_val);
}
+

Rust is totally fine "binding" the function call to the instance, and also as a static.

+

MIND == BLOWN.

+

Python does the same thing where I can both call functions bound to their instances and also call as +an unbound function where I give it the instance:

+
class MyClass():
x = 24

def my_function(self):
print(self.x)

m = MyClass()

m.my_function()
MyClass.my_function(m)
+

And Python tries to make you think that primitives can have instance methods...

+
>>> dir(8)
['__abs__', '__add__', '__and__', '__class__', '__cmp__', '__coerce__',
'__delattr__', '__div__', '__divmod__', '__doc__', '__float__', '__floordiv__',
...
'__setattr__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__truediv__',
...]

>>> # Theoretically `8.__str__()` should exist, but:

>>> 8.__str__()
File "<stdin>", line 1
8.__str__()
^
SyntaxError: invalid syntax

>>> # It will run if we assign it first though:
>>> x = 8
>>> x.__str__()
'8'
+

...but in practice it's a bit complicated.

+

So while Python handles binding instance methods in a way similar to Rust, it's still not able to +run the example we started with.

+

Conclusion

+

This was a super-roundabout way of demonstrating it, but the way Rust handles incredibly minor +details like primitives leads to really cool effects. Primitives are optimized like C in how they +have a space-efficient memory layout, yet the language still has a lot of features I enjoy in Python +(like both instance and late binding).

+

And when you put it together, there are areas where Rust does cool things nobody else can; as a +quirky feature of Rust's type system, 8.to_string() is actually valid code.

+

Now go forth and fool your friends into thinking you know assembly. This is all I've got.

\ No newline at end of file diff --git a/2018/10/case-study-optimization/index.html b/2018/10/case-study-optimization/index.html new file mode 100644 index 0000000..37f4615 --- /dev/null +++ b/2018/10/case-study-optimization/index.html @@ -0,0 +1,92 @@ +A case study in heaptrack | The Old Speice Guy
Skip to main content

A case study in heaptrack

· 5 min read
Bradlee Speice

I remember early in my career someone joking that:

+
+

Programmers have it too easy these days. They should learn to develop in low memory environments +and be more efficient.

+
+

...though it's not like the first code I wrote was for a +graphing calculator +packing a whole 24KB of RAM.

+

But the principle remains: be efficient with the resources you have, because +what Intel giveth, Microsoft taketh away.

+

My professional work is focused on this kind of efficiency; low-latency financial markets demand +that you understand at a deep level exactly what your code is doing. As I continue experimenting +with Rust for personal projects, it's exciting to bring a utilitarian mindset with me: there's +flexibility for the times I pretend to have a garbage collector, and flexibility for the times that +I really care about how memory is used.

+

This post is a (small) case study in how I went from the former to the latter. And ultimately, it's +intended to be a starting toolkit to empower analysis of your own code.

+

Curiosity

+

When I first started building the dtparse crate, my intention was to mirror as closely as possible +the equivalent Python library. Python, as you may know, is garbage collected. Very +rarely is memory usage considered in Python, and I likewise wasn't paying too much attention when +dtparse was first being built.

+

This lackadaisical approach to memory works well enough, and I'm not planning on making dtparse +hyper-efficient. But every so often, I've wondered: "what exactly is going on in memory?" With the +advent of Rust 1.28 and the +Global Allocator trait, I had a really +great idea: build a custom allocator that allows you to track your own allocations. That way, you +can do things like writing tests for both correct results and correct memory usage. I gave it a +shot, but learned very quickly: never write your own allocator. It went from "fun +weekend project" to "I have literally no idea what my computer is doing" at breakneck speed.

+

Instead, I'll highlight a separate path I took to make sense of my memory usage: heaptrack.

+

Turning on the System Allocator

+

This is the hardest part of the post. Because Rust uses +its own allocator by default, +heaptrack is unable to properly record unmodified Rust code. To remedy this, we'll make use of the +#[global_allocator] attribute.

+

Specifically, in lib.rs or main.rs, add this:

+
use std::alloc::System;

#[global_allocator]
static GLOBAL: System = System;
+

...and that's it. Everything else comes essentially for free.

+

Running heaptrack

+

Assuming you've installed heaptrack (Homebrew in Mac, package manager +in Linux, ??? in Windows), all that's left is to fire up your application:

+
heaptrack my_application
+

It's that easy. After the program finishes, you'll see a file in your local directory with a name +like heaptrack.my_appplication.XXXX.gz. If you load that up in heaptrack_gui, you'll see +something like this:

+

heaptrack

+
+

And even these pretty colors:

+

pretty colors

+

Reading Flamegraphs

+

To make sense of our memory usage, we're going to focus on that last picture - it's called a +"flamegraph". These charts are typically used to +show how much time your program spends executing each function, but they're used here to show how +much memory was allocated during those functions instead.

+

For example, we can see that all executions happened during the main function:

+

allocations in main

+

...and within that, all allocations happened during dtparse::parse:

+

allocations in dtparse

+

...and within that, allocations happened in two different places:

+

allocations in parseinfo

+

Now I apologize that it's hard to see, but there's one area specifically that stuck out as an issue: +what the heck is the Default thing doing?

+

pretty colors

+

Optimizing dtparse

+

See, I knew that there were some allocations during calls to dtparse::parse, but I was totally +wrong about where the bulk of allocations occurred in my program. Let me post the code and see if +you can spot the mistake:

+
/// Main entry point for using `dtparse`.
pub fn parse(timestr: &str) -> ParseResult<(NaiveDateTime, Option<FixedOffset>)> {
let res = Parser::default().parse(
timestr, None, None, false, false,
None, false,
&HashMap::new(),
)?;

Ok((res.0, res.1))
}
+
+

dtparse

+
+
+

Because Parser::parse requires a mutable reference to itself, I have to create a new +Parser::default every time it receives a string. This is excessive! We'd rather have an immutable +parser that can be re-used, and avoid allocating memory in the first place.

+

Armed with that information, I put some time in to +make the parser immutable. +Now that I can re-use the same parser over and over, the allocations disappear:

+

allocations cleaned up

+

In total, we went from requiring 2 MB of memory in +version 1.0.2:

+

memory before

+

All the way down to 300KB in version 1.0.3:

+

memory after

+

Conclusion

+

In the end, you don't need to write a custom allocator to be efficient with memory, great tools +already exist to help you understand what your program is doing.

+

Use them.

+

Given that Moore's Law is +dead, we've all got to do +our part to take back what Microsoft stole.

\ No newline at end of file diff --git a/2018/12/allocation-safety/index.html b/2018/12/allocation-safety/index.html new file mode 100644 index 0000000..ee8d889 --- /dev/null +++ b/2018/12/allocation-safety/index.html @@ -0,0 +1,77 @@ +QADAPT - debug_assert! for allocations | The Old Speice Guy
Skip to main content

QADAPT - debug_assert! for allocations

· 5 min read
Bradlee Speice

I think it's part of the human condition to ignore perfectly good advice when it comes our way. A +bit over a month ago, I was dispensing sage wisdom for the ages:

+
+

I had a really great idea: build a custom allocator that allows you to track your own allocations. +I gave it a shot, but learned very quickly: never write your own allocator.

+

-- me

+
+

I proceeded to ignore it, because we never really learn from our mistakes.

+

There's another part of the human condition that derives joy from seeing things explode.

+

Explosions

+

And that's the part I'm going to focus on.

+

Why an Allocator?

+

So why, after complaining about allocators, would I still want to write one? There are three reasons +for that:

+
    +
  1. Allocation/dropping is slow
  2. +
  3. It's difficult to know exactly when Rust will allocate or drop, especially when using code that +you did not write
  4. +
  5. I want automated tools to verify behavior, instead of inspecting by hand
  6. +
+

When I say "slow," it's important to define the terms. If you're writing web applications, you'll +spend orders of magnitude more time waiting for the database than you will the allocator. However, +there's still plenty of code where micro- or nano-seconds matter; think +finance, +real-time audio, +self-driving cars, and +networking. In these situations it's simply +unacceptable for you to spend time doing things that are not your program, and waiting on the +allocator is not cool.

+

As I continue to learn Rust, it's difficult for me to predict where exactly allocations will happen. +So, I propose we play a quick trivia game: Does this code invoke the allocator?

+

Example 1

+
fn my_function() {
let v: Vec<u8> = Vec::new();
}
+

No: Rust knows how big the Vec type is, +and reserves a fixed amount of memory on the stack for the v vector. However, if we wanted to +reserve extra space (using Vec::with_capacity) the allocator would get invoked.

+

Example 2

+
fn my_function() {
let v: Box<Vec<u8>> = Box::new(Vec::new());
}
+

Yes: Because Boxes allow us to work with things that are of unknown size, it has to allocate on +the heap. While the Box is unnecessary in this snippet (release builds will optimize out the +allocation), reserving heap space more generally is needed to pass a dynamically sized type to +another function.

+

Example 3

+
fn my_function(v: Vec<u8>) {
v.push(5);
}
+

Maybe: Depending on whether the Vector we were given has space available, we may or may not +allocate. Especially when dealing with code that you did not author, it's difficult to verify that +things behave as you expect them to.

+

Blowing Things Up

+

So, how exactly does QADAPT solve these problems? Whenever an allocation or drop occurs in code +marked allocation-safe, QADAPT triggers a thread panic. We don't want to let the program continue +as if nothing strange happened, we want things to explode.

+

However, you don't want code to panic in production because of circumstances you didn't predict. +Just like debug_assert!, QADAPT will +strip out its own code when building in release mode to guarantee no panics and no performance +impact.

+

Finally, there are three ways to have QADAPT check that your code will not invoke the allocator:

+

Using a procedural macro

+

The easiest method, watch an entire function for allocator invocation:

+
use qadapt::no_alloc;
use qadapt::QADAPT;

#[global_allocator]
static Q: QADAPT = QADAPT;

#[no_alloc]
fn push_vec(v: &mut Vec<u8>) {
// This triggers a panic if v.len() == v.capacity()
v.push(5);
}

fn main() {
let v = Vec::with_capacity(1);

// This will *not* trigger a panic
push_vec(&v);

// This *will* trigger a panic
push_vec(&v);
}
+

Using a regular macro

+

For times when you need more precision:

+
use qadapt::assert_no_alloc;
use qadapt::QADAPT;

#[global_allocator]
static Q: QADAPT = QADAPT;

fn main() {
let v = Vec::with_capacity(1);

// No allocations here, we already have space reserved
assert_no_alloc!(v.push(5));

// Even though we remove an item, it doesn't trigger a drop
// because it's a scalar. If it were a `Box<_>` type,
// a drop would trigger.
assert_no_alloc!({
v.pop().unwrap();
});
}
+

Using function calls

+

Both the most precise and most tedious:

+
use qadapt::enter_protected;
use qadapt::exit_protected;
use qadapt::QADAPT;

#[global_allocator]
static Q: QADAPT = QADAPT;

fn main() {
// This triggers an allocation (on non-release builds)
let v = Vec::with_capacity(1);

enter_protected();
// This does not trigger an allocation because we've reserved size
v.push(0);
exit_protected();

// This triggers an allocation because we ran out of size,
// but doesn't panic because we're no longer protected.
v.push(1);
}
+

Caveats

+

It's important to point out that QADAPT code is synchronous, so please be careful when mixing in +asynchronous functions:

+
use futures::future::Future;
use futures::future::ok;

#[no_alloc]
fn async_capacity() -> impl Future<Item=Vec<u8>, Error=()> {
ok(12).and_then(|e| Ok(Vec::with_capacity(e)))
}

fn main() {
// This doesn't trigger a panic because the `and_then` closure
// wasn't run during the function call.
async_capacity();

// Still no panic
assert_no_alloc!(async_capacity());

// This will panic because the allocation happens during `unwrap`
// in the `assert_no_alloc!` macro
assert_no_alloc!(async_capacity().poll().unwrap());
}
+

Conclusion

+

While there's a lot more to writing high-performance code than managing your usage of the allocator, +it's critical that you do use the allocator correctly. QADAPT will verify that your code is doing +what you expect. It's usable even on stable Rust from version 1.31 onward, which isn't the case for +most allocators. Version 1.0 was released today, and you can check it out over at +crates.io or on github.

+

I'm hoping to write more about high-performance Rust in the future, and I expect that QADAPT will +help guide that. If there are topics you're interested in, let me know in the comments below!

\ No newline at end of file diff --git a/2018/12/what-small-business-really-means/index.html b/2018/12/what-small-business-really-means/index.html new file mode 100644 index 0000000..7985c4a --- /dev/null +++ b/2018/12/what-small-business-really-means/index.html @@ -0,0 +1,19 @@ +More "what companies really mean" | The Old Speice Guy
Skip to main content

More "what companies really mean"

· 2 min read
Bradlee Speice

I recently stumbled across a phenomenal small article entitled +What Startups Really Mean By "Why Should We Hire You?". +Having been interviewed by smaller companies (though not exactly startups), the questions and +subtexts are the same. There's often a question behind the question that you're actually trying to +answer, and I wish I spotted the nuance earlier in my career.

+

Let me also make note of one more question/euphemism I've come across:

+

How do you feel about production support?

+

Translation: We're a fairly small team, and when things break on an evening/weekend/Christmas +Day, can we call on you to be there?

+

I've met decidedly few people in my life who truly enjoy the "ops" side of "devops". They're +incredibly good at taking an impossible problem, pre-existing knowledge of arcane arts, and turning +that into a functioning system at the end. And if they all left for lunch, we probably wouldn't make +it out the door before the zombie apocalypse.

+

Larger organizations (in my experience, 500+ person organizations) have the luxury of hiring people +who either enjoy that, or play along nicely enough that our systems keep working.

+

Small teams have no such luck. If you're interviewing at a small company, especially as a "data +scientist" or other somesuch position, be aware that systems can and do spontaneously combust at the +most inopportune moments.

+

Terrible-but-popular answers include: It's a part of the job, and I'm happy to contribute.

\ No newline at end of file diff --git a/2019/02/08/compiler-optimizations/index.html b/2019/02/08/compiler-optimizations/index.html new file mode 100644 index 0000000..fbdc863 --- /dev/null +++ b/2019/02/08/compiler-optimizations/index.html @@ -0,0 +1,46 @@ +Allocations in Rust: Compiler optimizations | The Old Speice Guy
Skip to main content

Allocations in Rust: Compiler optimizations

· 4 min read

Up to this point, we've been discussing memory usage in the Rust language by focusing on simple +rules that are mostly right for small chunks of code. We've spent time showing how those rules work +themselves out in practice, and become familiar with reading the assembly code needed to see each +memory type (global, stack, heap) in action.

+

Throughout the series so far, we've put a handicap on the code. In the name of consistent and +understandable results, we've asked the compiler to pretty please leave the training wheels on. Now +is the time where we throw out all the rules and take off the kid gloves. As it turns out, both the +Rust compiler and the LLVM optimizers are incredibly sophisticated, and we'll step back and let them +do their job.

+

Similar to +"What Has My Compiler Done For Me Lately?", we're +focusing on interesting things the Rust language (and LLVM!) can do with memory management. We'll +still be looking at assembly code to understand what's going on, but it's important to mention +again: please use automated tools like alloc-counter to +double-check memory behavior if it's something you care about. It's far too easy to mis-read +assembly in large code sections, you should always verify behavior if you care about memory usage.

+

The guiding principal as we move forward is this: optimizing compilers won't produce worse programs +than we started with. There won't be any situations where stack allocations get moved to heap +allocations. There will, however, be an opera of optimization.

+

Update 2019-02-10: When debugging a +related issue, it was discovered that the +original code worked because LLVM optimized out the entire function, rather than just the allocation +segments. The code has been updated with proper use of +read_volatile, and a previous section +on vector capacity has been removed.

+

The Case of the Disappearing Box

+

Our first optimization comes when LLVM can reason that the lifetime of an object is sufficiently +short that heap allocations aren't necessary. In these cases, LLVM will move the allocation to the +stack instead! The way this interacts with #[inline] attributes is a bit opaque, but the important +part is that LLVM can sometimes do better than the baseline Rust language:

+
use std::alloc::{GlobalAlloc, Layout, System};
use std::sync::atomic::{AtomicBool, Ordering};

pub fn cmp(x: u32) {
// Turn on panicking if we allocate on the heap
DO_PANIC.store(true, Ordering::SeqCst);

// The compiler is able to see through the constant `Box`
// and directly compare `x` to 24 - assembly line 73
let y = Box::new(24);
let equals = x == *y;

// This call to drop is eliminated
drop(y);

// Need to mark the comparison result as volatile so that
// LLVM doesn't strip out all the code. If `y` is marked
// volatile instead, allocation will be forced.
unsafe { std::ptr::read_volatile(&equals) };

// Turn off panicking, as there are some deallocations
// when we exit main.
DO_PANIC.store(false, Ordering::SeqCst);
}

fn main() {
cmp(12)
}

#[global_allocator]
static A: PanicAllocator = PanicAllocator;
static DO_PANIC: AtomicBool = AtomicBool::new(false);
struct PanicAllocator;

unsafe impl GlobalAlloc for PanicAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if DO_PANIC.load(Ordering::SeqCst) {
panic!("Unexpected allocation.");
}
System.alloc(layout)
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if DO_PANIC.load(Ordering::SeqCst) {
panic!("Unexpected deallocation.");
}
System.dealloc(ptr, layout);
}
}
+

-- Compiler Explorer

+

-- Rust Playground

+

Dr. Array or: how I learned to love the optimizer

+

Finally, this isn't so much about LLVM figuring out different memory behavior, but LLVM stripping +out code that doesn't do anything. Optimizations of this type have a lot of nuance to them; if +you're not careful, they can make your benchmarks look +impossibly good. In Rust, the +black_box function (implemented in both +libtest and +criterion) will tell the compiler +to disable this kind of optimization. But if you let LLVM remove unnecessary code, you can end up +running programs that previously caused errors:

+
#[derive(Default)]
struct TwoFiftySix {
_a: [u64; 32]
}

#[derive(Default)]
struct EightK {
_a: [TwoFiftySix; 32]
}

#[derive(Default)]
struct TwoFiftySixK {
_a: [EightK; 32]
}

#[derive(Default)]
struct EightM {
_a: [TwoFiftySixK; 32]
}

pub fn main() {
// Normally this blows up because we can't reserve size on stack
// for the `EightM` struct. But because the compiler notices we
// never do anything with `_x`, it optimizes out the stack storage
// and the program completes successfully.
let _x = EightM::default();
}
+

-- Compiler Explorer

+

-- Rust Playground

\ No newline at end of file diff --git a/2019/02/a-heaping-helping/index.html b/2019/02/a-heaping-helping/index.html new file mode 100644 index 0000000..d1ea26f --- /dev/null +++ b/2019/02/a-heaping-helping/index.html @@ -0,0 +1,122 @@ +Allocations in Rust: Dynamic memory | The Old Speice Guy
Skip to main content

Allocations in Rust: Dynamic memory

· 6 min read
Bradlee Speice

Managing dynamic memory is hard. Some languages assume users will do it themselves (C, C++), and +some languages go to extreme lengths to protect users from themselves (Java, Python). In Rust, how +the language uses dynamic memory (also referred to as the heap) is a system called ownership. +And as the docs mention, ownership +is Rust's most unique feature.

+

The heap is used in two situations; when the compiler is unable to predict either the total size of +memory needed, or how long the memory is needed for, it allocates space in the heap.

+

This happens +pretty frequently; if you want to download the Google home page, you won't know how large it is +until your program runs. And when you're finished with Google, we deallocate the memory so it can be +used to store other webpages. If you're interested in a slightly longer explanation of the heap, +check out +The Stack and the Heap +in Rust's documentation.

+

We won't go into detail on how the heap is managed; the +ownership documentation does a +phenomenal job explaining both the "why" and "how" of memory management. Instead, we're going to +focus on understanding "when" heap allocations occur in Rust.

+

To start off, take a guess for how many allocations happen in the program below:

+
fn main() {}
+

It's obviously a trick question; while no heap allocations occur as a result of that code, the setup +needed to call main does allocate on the heap. Here's a way to show it:

+
#![feature(integer_atomics)]
use std::alloc::{GlobalAlloc, Layout, System};
use std::sync::atomic::{AtomicU64, Ordering};

static ALLOCATION_COUNT: AtomicU64 = AtomicU64::new(0);

struct CountingAllocator;

unsafe impl GlobalAlloc for CountingAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
ALLOCATION_COUNT.fetch_add(1, Ordering::SeqCst);
System.alloc(layout)
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
System.dealloc(ptr, layout);
}
}

#[global_allocator]
static A: CountingAllocator = CountingAllocator;

fn main() {
let x = ALLOCATION_COUNT.fetch_add(0, Ordering::SeqCst);
println!("There were {} allocations before calling main!", x);
}
+

-- +Rust Playground

+

As of the time of writing, there are five allocations that happen before main is ever called.

+

But when we want to understand more practically where heap allocation happens, we'll follow this +guide:

+
    +
  • Smart pointers hold their contents in the heap
  • +
  • Collections are smart pointers for many objects at a time, and reallocate when they need to grow
  • +
+

Finally, there are two "addendum" issues that are important to address when discussing Rust and the +heap:

+
    +
  • Non-heap alternatives to many standard library types are available.
  • +
  • Special allocators to track memory behavior should be used to benchmark code.
  • +
+

Smart pointers

+

The first thing to note are the "smart pointer" types. When you have data that must outlive the +scope in which it is declared, or your data is of unknown or dynamic size, you'll make use of these +types.

+

The term smart pointer comes from C++, and while it's +closely linked to a general design pattern of +"Resource Acquisition Is Initialization", we'll +use it here specifically to describe objects that are responsible for managing ownership of data +allocated on the heap. The smart pointers available in the alloc crate should look mostly +familiar:

+ +

The standard library also defines some smart pointers to manage +heap objects, though more than can be covered here. Some examples are:

+ +

Finally, there is one "gotcha": cell types +(like RefCell) look and behave +similarly, but don't involve heap allocation. The +core::cell docs have more information.

+

When a smart pointer is created, the data it is given is placed in heap memory and the location of +that data is recorded in the smart pointer. Once the smart pointer has determined it's safe to +deallocate that memory (when a Box has +gone out of scope or a reference count +goes to zero), the heap space is reclaimed. We can +prove these types use heap memory by looking at code:

+
use std::rc::Rc;
use std::sync::Arc;
use std::borrow::Cow;

pub fn my_box() {
// Drop at assembly line 1640
Box::new(0);
}

pub fn my_rc() {
// Drop at assembly line 1650
Rc::new(0);
}

pub fn my_arc() {
// Drop at assembly line 1660
Arc::new(0);
}

pub fn my_cow() {
// Drop at assembly line 1672
Cow::from("drop");
}
+

-- Compiler Explorer

+

Collections

+

Collection types use heap memory because their contents have dynamic size; they will request more +memory when needed, and can +release memory when it's +no longer necessary. This dynamic property forces Rust to heap allocate everything they contain. In +a way, collections are smart pointers for many objects at a time. Common types that fall under +this umbrella are Vec, +HashMap, and +String (not +str).

+

While collections store the objects they own in heap memory, creating new collections will not +allocate on the heap. This is a bit weird; if we call Vec::new(), the assembly shows a +corresponding call to real_drop_in_place:

+
pub fn my_vec() {
// Drop in place at line 481
Vec::<u8>::new();
}
+

-- Compiler Explorer

+

But because the vector has no elements to manage, no calls to the allocator will ever be dispatched:

+
use std::alloc::{GlobalAlloc, Layout, System};
use std::sync::atomic::{AtomicBool, Ordering};

fn main() {
// Turn on panicking if we allocate on the heap
DO_PANIC.store(true, Ordering::SeqCst);

// Interesting bit happens here
let x: Vec<u8> = Vec::new();
drop(x);

// Turn panicking back off, some deallocations occur
// after main as well.
DO_PANIC.store(false, Ordering::SeqCst);
}

#[global_allocator]
static A: PanicAllocator = PanicAllocator;
static DO_PANIC: AtomicBool = AtomicBool::new(false);
struct PanicAllocator;

unsafe impl GlobalAlloc for PanicAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if DO_PANIC.load(Ordering::SeqCst) {
panic!("Unexpected allocation.");
}
System.alloc(layout)
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if DO_PANIC.load(Ordering::SeqCst) {
panic!("Unexpected deallocation.");
}
System.dealloc(ptr, layout);
}
}
+

-- +Rust Playground

+

Other standard library types follow the same behavior; make sure to check out +HashMap::new(), +and String::new().

+

Heap Alternatives

+

While it is a bit strange to speak of the stack after spending time with the heap, it's worth +pointing out that some heap-allocated objects in Rust have stack-based counterparts provided by +other crates. If you have need of the functionality, but want to avoid allocating, there are +typically alternatives available.

+

When it comes to some standard library smart pointers +(RwLock and +Mutex), stack-based alternatives are +provided in crates like parking_lot and +spin. You can check out +lock_api::RwLock, +lock_api::Mutex, and +spin::Once if you're in need +of synchronization primitives.

+

thread_id may be necessary if you're implementing an allocator +because thread::current().id() uses a +thread_local! structure +that needs heap allocation.

+

Tracing Allocators

+

When writing performance-sensitive code, there's no alternative to measuring your code. If you +didn't write a benchmark, +you don't care about it's performance +You should never rely on your instincts when +a microsecond is an eternity.

+

Similarly, there's great work going on in Rust with allocators that keep track of what they're doing +(like alloc_counter). When it comes to tracking heap +behavior, it's easy to make mistakes; please write tests and make sure you have tools to guard +against future issues.

\ No newline at end of file diff --git a/2019/02/stacking-up/index.html b/2019/02/stacking-up/index.html new file mode 100644 index 0000000..78e113a --- /dev/null +++ b/2019/02/stacking-up/index.html @@ -0,0 +1,210 @@ +Allocations in Rust: Fixed memory | The Old Speice Guy
Skip to main content

Allocations in Rust: Fixed memory

· 16 min read
Bradlee Speice

const and static are perfectly fine, but it's relatively rare that we know at compile-time about +either values or references that will be the same for the duration of our program. Put another way, +it's not often the case that either you or your compiler knows how much memory your entire program +will ever need.

+

However, there are still some optimizations the compiler can do if it knows how much memory +individual functions will need. Specifically, the compiler can make use of "stack" memory (as +opposed to "heap" memory) which can be managed far faster in both the short- and long-term.

+

When requesting memory, the push instruction +can typically complete in 1 or 2 cycles (<1ns +on modern CPUs). Contrast that to heap memory which requires an allocator (specialized +software to track what memory is in use) to reserve space. When you're finished with stack memory, +the pop instruction runs in 1-3 cycles, as opposed to an allocator needing to worry about memory +fragmentation and other issues with the heap. All sorts of incredibly sophisticated techniques have +been used to design allocators:

+ +

But no matter how fast your allocator is, the principle remains: the fastest allocator is the one +you never use. As such, we're not going to discuss how exactly the +push and pop instructions work, but +we'll focus instead on the conditions that enable the Rust compiler to use faster stack-based +allocation for variables.

+

So, how do we know when Rust will or will not use stack allocation for objects we create? +Looking at other languages, it's often easy to delineate between stack and heap. Managed memory +languages (Python, Java, +C#) place +everything on the heap. JIT compilers (PyPy, +HotSpot) may optimize +some heap allocations away, but you should never assume it will happen. C makes things clear with +calls to special functions (like malloc(3)) needed to access +heap memory. Old C++ has the new keyword, though +modern C++/C++11 is more complicated with RAII.

+

For Rust, we can summarize as follows: stack allocation will be used for everything that doesn't +involve "smart pointers" and collections. We'll skip over a precise definition of the term "smart +pointer" for now, and instead discuss what we should watch for to understand when stack and heap +memory regions are used:

+
    +
  1. +

    Stack manipulation instructions (push, pop, and add/sub of the rsp register) indicate +allocation of stack memory:

    +
    pub fn stack_alloc(x: u32) -> u32 {
    // Space for `y` is allocated by subtracting from `rsp`,
    // and then populated
    let y = [1u8, 2, 3, 4];
    // Space for `y` is deallocated by adding back to `rsp`
    x
    }
    +

    -- Compiler Explorer

    +
  2. +
  3. +

    Tracking when exactly heap allocation calls occur is difficult. It's typically easier to watch +for call core::ptr::real_drop_in_place, and infer that a heap allocation happened in the recent +past:

    +
    pub fn heap_alloc(x: usize) -> usize {
    // Space for elements in a vector has to be allocated
    // on the heap, and is then de-allocated once the
    // vector goes out of scope
    let y: Vec<u8> = Vec::with_capacity(x);
    x
    }
    +

    -- Compiler Explorer (real_drop_in_place happens on line 1317) +Note: While the +Drop trait is +called for stack-allocated objects, +the Rust standard library only defines Drop implementations for types that involve heap +allocation.

    +
  4. +
  5. +

    If you don't want to inspect the assembly, use a custom allocator that's able to track and alert +when heap allocations occur. Crates like +alloc_counter are designed for exactly this purpose.

    +
  6. +
+

With all that in mind, let's talk about situations in which we're guaranteed to use stack memory:

+
    +
  • Structs are created on the stack.
  • +
  • Function arguments are passed on the stack, meaning the +#[inline] attribute will +not change the memory region used.
  • +
  • Enums and unions are stack-allocated.
  • +
  • Arrays are always stack-allocated.
  • +
  • Closures capture their arguments on the stack.
  • +
  • Generics will use stack allocation, even with dynamic dispatch.
  • +
  • Copy types are guaranteed to be +stack-allocated, and copying them will be done in stack memory.
  • +
  • Iterators in the standard library are +stack-allocated even when iterating over heap-based collections.
  • +
+

Structs

+

The simplest case comes first. When creating vanilla struct objects, we use stack memory to hold +their contents:

+
struct Point {
x: u64,
y: u64,
}

struct Line {
a: Point,
b: Point,
}

pub fn make_line() {
// `origin` is stored in the first 16 bytes of memory
// starting at location `rsp`
let origin = Point { x: 0, y: 0 };
// `point` makes up the next 16 bytes of memory
let point = Point { x: 1, y: 2 };

// When creating `ray`, we just move the content out of
// `origin` and `point` into the next 32 bytes of memory
let ray = Line { a: origin, b: point };
}
+

-- Compiler Explorer

+

Note that while some extra-fancy instructions are used for memory manipulation in the assembly, the +sub rsp, 64 instruction indicates we're still working with the stack.

+

Function arguments

+

Have you ever wondered how functions communicate with each other? Like, once the variables are given +to you, everything's fine. But how do you "give" those variables to another function? How do you get +the results back afterward? The answer: the compiler arranges memory and assembly instructions using +a pre-determined calling convention. This +convention governs the rules around where arguments needed by a function will be located (either in +memory offsets relative to the stack pointer rsp, or in other registers), and where the results +can be found once the function has finished. And when multiple languages agree on what the calling +conventions are, you can do things like having Go call Rust code!

+

Put simply: it's the compiler's job to figure out how to call other functions, and you can assume +that the compiler is good at its job.

+

We can see this in action using a simple example:

+
struct Point {
x: i64,
y: i64,
}

// We use integer division operations to keep
// the assembly clean, understanding the result
// isn't accurate.
fn distance(a: &Point, b: &Point) -> i64 {
// Immediately subtract from `rsp` the bytes needed
// to hold all the intermediate results - this is
// the stack allocation step

// The compiler used the `rdi` and `rsi` registers
// to pass our arguments, so read them in
let x1 = a.x;
let x2 = b.x;
let y1 = a.y;
let y2 = b.y;

// Do the actual math work
let x_pow = (x1 - x2) * (x1 - x2);
let y_pow = (y1 - y2) * (y1 - y2);
let squared = x_pow + y_pow;
squared / squared

// Our final result will be stored in the `rax` register
// so that our caller knows where to retrieve it.
// Finally, add back to `rsp` the stack memory that is
// now ready to be used by other functions.
}

pub fn total_distance() {
let start = Point { x: 1, y: 2 };
let middle = Point { x: 3, y: 4 };
let end = Point { x: 5, y: 6 };

let _dist_1 = distance(&start, &middle);
let _dist_2 = distance(&middle, &end);
}
+

-- Compiler Explorer

+

As a consequence of function arguments never using heap memory, we can also infer that functions +using the #[inline] attributes also do not heap allocate. But better than inferring, we can look +at the assembly to prove it:

+
struct Point {
x: i64,
y: i64,
}

// Note that there is no `distance` function in the assembly output,
// and the total line count goes from 229 with inlining off
// to 306 with inline on. Even still, no heap allocations occur.
#[inline(always)]
fn distance(a: &Point, b: &Point) -> i64 {
let x1 = a.x;
let x2 = b.x;
let y1 = a.y;
let y2 = b.y;

let x_pow = (a.x - b.x) * (a.x - b.x);
let y_pow = (a.y - b.y) * (a.y - b.y);
let squared = x_pow + y_pow;
squared / squared
}

pub fn total_distance() {
let start = Point { x: 1, y: 2 };
let middle = Point { x: 3, y: 4 };
let end = Point { x: 5, y: 6 };

let _dist_1 = distance(&start, &middle);
let _dist_2 = distance(&middle, &end);
}
+

-- Compiler Explorer

+

Finally, passing by value (arguments with type +Copy) and passing by reference (either +moving ownership or passing a pointer) may have slightly different layouts in assembly, but will +still use either stack memory or CPU registers:

+
pub struct Point {
x: i64,
y: i64,
}

// Moving values
pub fn distance_moved(a: Point, b: Point) -> i64 {
let x1 = a.x;
let x2 = b.x;
let y1 = a.y;
let y2 = b.y;

let x_pow = (x1 - x2) * (x1 - x2);
let y_pow = (y1 - y2) * (y1 - y2);
let squared = x_pow + y_pow;
squared / squared
}

// Borrowing values has two extra `mov` instructions on lines 21 and 22
pub fn distance_borrowed(a: &Point, b: &Point) -> i64 {
let x1 = a.x;
let x2 = b.x;
let y1 = a.y;
let y2 = b.y;

let x_pow = (x1 - x2) * (x1 - x2);
let y_pow = (y1 - y2) * (y1 - y2);
let squared = x_pow + y_pow;
squared / squared
}
+

-- Compiler Explorer

+

Enums

+

If you've ever worried that wrapping your types in +Option or +Result would finally make them +large enough that Rust decides to use heap allocation instead, fear no longer: enum and union +types don't use heap allocation:

+
enum MyEnum {
Small(u8),
Large(u64)
}

struct MyStruct {
x: MyEnum,
y: MyEnum,
}

pub fn enum_compare() {
let x = MyEnum::Small(0);
let y = MyEnum::Large(0);

let z = MyStruct { x, y };

let opt = Option::Some(z);
}
+

-- Compiler Explorer

+

Because the size of an enum is the size of its largest element plus a flag, the compiler can +predict how much memory is used no matter which variant of an enum is currently stored in a +variable. Thus, enums and unions have no need of heap allocation. There's unfortunately not a great +way to show this in assembly, so I'll instead point you to the +core::mem::size_of +documentation.

+

Arrays

+

The array type is guaranteed to be stack allocated, which is why the array size must be declared. +Interestingly enough, this can be used to cause safe Rust programs to crash:

+
// 256 bytes
#[derive(Default)]
struct TwoFiftySix {
_a: [u64; 32]
}

// 8 kilobytes
#[derive(Default)]
struct EightK {
_a: [TwoFiftySix; 32]
}

// 256 kilobytes
#[derive(Default)]
struct TwoFiftySixK {
_a: [EightK; 32]
}

// 8 megabytes - exceeds space typically provided for the stack,
// though the kernel can be instructed to allocate more.
// On Linux, you can check stack size using `ulimit -s`
#[derive(Default)]
struct EightM {
_a: [TwoFiftySixK; 32]
}

fn main() {
// Because we already have things in stack memory
// (like the current function call stack), allocating another
// eight megabytes of stack memory crashes the program
let _x = EightM::default();
}
+

-- +Rust Playground

+

There aren't any security implications of this (no memory corruption occurs), but it's good to note +that the Rust compiler won't move arrays into heap memory even if they can be reasonably expected to +overflow the stack.

+

Closures

+

Rules for how anonymous functions capture their arguments are typically language-specific. In Java, +Lambda Expressions are +actually objects created on the heap that capture local primitives by copying, and capture local +non-primitives as (final) references. +Python and +JavaScript +both bind everything by reference normally, but Python can also +capture values and JavaScript has +Arrow functions.

+

In Rust, arguments to closures are the same as arguments to other functions; closures are simply +functions that don't have a declared name. Some weird ordering of the stack may be required to +handle them, but it's the compiler's responsiblity to figure that out.

+

Each example below has the same effect, but a different assembly implementation. In the simplest +case, we immediately run a closure returned by another function. Because we don't store a reference +to the closure, the stack memory needed to store the captured values is contiguous:

+
fn my_func() -> impl FnOnce() {
let x = 24;
// Note that this closure in assembly looks exactly like
// any other function; you even use the `call` instruction
// to start running it.
move || { x; }
}

pub fn immediate() {
my_func()();
my_func()();
}
+

-- Compiler Explorer, 25 total assembly instructions

+

If we store a reference to the closure, the Rust compiler keeps values it needs in the stack memory +of the original function. Getting the details right is a bit harder, so the instruction count goes +up even though this code is functionally equivalent to our original example:

+
pub fn simple_reference() {
let x = my_func();
let y = my_func();
y();
x();
}
+

-- Compiler Explorer, 55 total assembly instructions

+

Even things like variable order can make a difference in instruction count:

+
pub fn complex() {
let x = my_func();
let y = my_func();
x();
y();
}
+

-- Compiler Explorer, 70 total assembly instructions

+

In every circumstance though, the compiler ensured that no heap allocations were necessary.

+

Generics

+

Traits in Rust come in two broad forms: static dispatch (monomorphization, impl Trait) and dynamic +dispatch (trait objects, dyn Trait). While dynamic dispatch is often associated with trait +objects being stored in the heap, dynamic dispatch can be used with stack allocated objects as well:

+
trait GetInt {
fn get_int(&self) -> u64;
}

// vtable stored at section L__unnamed_1
struct WhyNotU8 {
x: u8
}
impl GetInt for WhyNotU8 {
fn get_int(&self) -> u64 {
self.x as u64
}
}

// vtable stored at section L__unnamed_2
struct ActualU64 {
x: u64
}
impl GetInt for ActualU64 {
fn get_int(&self) -> u64 {
self.x
}
}

// `&dyn` declares that we want to use dynamic dispatch
// rather than monomorphization, so there is only one
// `retrieve_int` function that shows up in the final assembly.
// If we used generics, there would be one implementation of
// `retrieve_int` for each type that implements `GetInt`.
pub fn retrieve_int(u: &dyn GetInt) {
// In the assembly, we just call an address given to us
// in the `rsi` register and hope that it was set up
// correctly when this function was invoked.
let x = u.get_int();
}

pub fn do_call() {
// Note that even though the vtable for `WhyNotU8` and
// `ActualU64` includes a pointer to
// `core::ptr::real_drop_in_place`, it is never invoked.
let a = WhyNotU8 { x: 0 };
let b = ActualU64 { x: 0 };

retrieve_int(&a);
retrieve_int(&b);
}
+

-- Compiler Explorer

+

It's hard to imagine practical situations where dynamic dispatch would be used for objects that +aren't heap allocated, but it technically can be done.

+

Copy types

+

Understanding move semantics and copy semantics in Rust is weird at first. The Rust docs +go into detail far better than can +be addressed here, so I'll leave them to do the job. From a memory perspective though, their +guideline is reasonable: +if your type can implemement Copy, it should. +While there are potential speed tradeoffs to benchmark when discussing Copy (move semantics for +stack objects vs. copying stack pointers vs. copying stack structs), it's impossible for Copy +to introduce a heap allocation.

+

But why is this the case? Fundamentally, it's because the language controls what Copy means - +"the behavior of Copy is not overloadable" +because it's a marker trait. From there we'll note that a type +can implement Copy +if (and only if) its components implement Copy, and that +no heap-allocated types implement Copy. +Thus, assignments involving heap types are always move semantics, and new heap allocations won't +occur because of implicit operator behavior.

+
#[derive(Clone)]
struct Cloneable {
x: Box<u64>
}

// error[E0204]: the trait `Copy` may not be implemented for this type
#[derive(Copy, Clone)]
struct NotCopyable {
x: Box<u64>
}
+

-- Compiler Explorer

+

Iterators

+

In managed memory languages (like +Java), there's a subtle +difference between these two code samples:

+
public static int sum_for(List<Long> vals) {
long sum = 0;
// Regular for loop
for (int i = 0; i < vals.length; i++) {
sum += vals[i];
}
return sum;
}

public static int sum_foreach(List<Long> vals) {
long sum = 0;
// "Foreach" loop - uses iteration
for (Long l : vals) {
sum += l;
}
return sum;
}
+

In the sum_for function, nothing terribly interesting happens. In sum_foreach, an object of type +Iterator +is allocated on the heap, and will eventually be garbage-collected. This isn't a great design; +iterators are often transient objects that you need during a function and can discard once the +function ends. Sounds exactly like the issue stack-allocated objects address, no?

+

In Rust, iterators are allocated on the stack. The objects to iterate over are almost certainly in +heap memory, but the iterator itself +(Iter) doesn't need to use the heap. In +each of the examples below we iterate over a collection, but never use heap allocation:

+
use std::collections::HashMap;
// There's a lot of assembly generated, but if you search in the text,
// there are no references to `real_drop_in_place` anywhere.

pub fn sum_vec(x: &Vec<u32>) {
let mut s = 0;
// Basic iteration over vectors doesn't need allocation
for y in x {
s += y;
}
}

pub fn sum_enumerate(x: &Vec<u32>) {
let mut s = 0;
// More complex iterators are just fine too
for (_i, y) in x.iter().enumerate() {
s += y;
}
}

pub fn sum_hm(x: &HashMap<u32, u32>) {
let mut s = 0;
// And it's not just Vec, all types will allocate the iterator
// on stack memory
for y in x.values() {
s += y;
}
}
+

-- Compiler Explorer

\ No newline at end of file diff --git a/2019/02/summary/index.html b/2019/02/summary/index.html new file mode 100644 index 0000000..e5f2bac --- /dev/null +++ b/2019/02/summary/index.html @@ -0,0 +1,26 @@ +Allocations in Rust: Summary | The Old Speice Guy
Skip to main content

Allocations in Rust: Summary

· 2 min read
Bradlee Speice

While there's a lot of interesting detail captured in this series, it's often helpful to have a +document that answers some "yes/no" questions. You may not care about what an Iterator looks like +in assembly, you just need to know whether it allocates an object on the heap or not. And while Rust +will prioritize the fastest behavior it can, here are the rules for each memory type:

+

Global Allocation:

+
    +
  • const is a fixed value; the compiler is allowed to copy it wherever useful.
  • +
  • static is a fixed reference; the compiler will guarantee it is unique.
  • +
+

Stack Allocation:

+
    +
  • Everything not using a smart pointer will be allocated on the stack.
  • +
  • Structs, enums, iterators, arrays, and closures are all stack allocated.
  • +
  • Cell types (RefCell) behave like smart pointers, but are stack-allocated.
  • +
  • Inlining (#[inline]) will not affect allocation behavior for better or worse.
  • +
  • Types that are marked Copy are guaranteed to have their contents stack-allocated.
  • +
+

Heap Allocation:

+
    +
  • Smart pointers (Box, Rc, Mutex, etc.) allocate their contents in heap memory.
  • +
  • Collections (HashMap, Vec, String, etc.) allocate their contents in heap memory.
  • +
  • Some smart pointers in the standard library have counterparts in other crates that don't need heap +memory. If possible, use those.
  • +
+

Container Sizes in Rust

+

-- Raph Levien

\ No newline at end of file diff --git a/2019/02/the-whole-world/index.html b/2019/02/the-whole-world/index.html new file mode 100644 index 0000000..1ae6ba1 --- /dev/null +++ b/2019/02/the-whole-world/index.html @@ -0,0 +1,133 @@ +Allocations in Rust: Global memory | The Old Speice Guy
Skip to main content

Allocations in Rust: Global memory

· 8 min read
Bradlee Speice

The first memory type we'll look at is pretty special: when Rust can prove that a value is fixed +for the life of a program (const), and when a reference is unique for the life of a program +(static as a declaration, not +'static as a +lifetime), we can make use of global memory. This special section of data is embedded directly in +the program binary so that variables are ready to go once the program loads; no additional +computation is necessary.

+

Understanding the value/reference distinction is important for reasons we'll go into below, and +while the +full specification for +these two keywords is available, we'll take a hands-on approach to the topic.

+

const values

+

When a value is guaranteed to be unchanging in your program (where "value" may be scalars, +structs, etc.), you can declare it const. This tells the compiler that it's safe to treat the +value as never changing, and enables some interesting optimizations; not only is there no +initialization cost to creating the value (it is loaded at the same time as the executable parts of +your program), but the compiler can also copy the value around if it speeds up the code.

+

The points we need to address when talking about const are:

+
    +
  • Const values are stored in read-only memory - it's impossible to modify.
  • +
  • Values resulting from calling a const fn are materialized at compile-time.
  • +
  • The compiler may (or may not) copy const values wherever it chooses.
  • +
+

Read-Only

+

The first point is a bit strange - "read-only memory." +The Rust book +mentions in a couple places that using mut with constants is illegal, but it's also important to +demonstrate just how immutable they are. Typically in Rust you can use +interior mutability to modify +things that aren't declared mut. +RefCell provides an example of this +pattern in action:

+
use std::cell::RefCell;

fn my_mutator(cell: &RefCell<u8>) {
// Even though we're given an immutable reference,
// the `replace` method allows us to modify the inner value.
cell.replace(14);
}

fn main() {
let cell = RefCell::new(25);
// Prints out 25
println!("Cell: {:?}", cell);
my_mutator(&cell);
// Prints out 14
println!("Cell: {:?}", cell);
}
+

-- +Rust Playground

+

When const is involved though, interior mutability is impossible:

+
use std::cell::RefCell;

const CELL: RefCell<u8> = RefCell::new(25);

fn my_mutator(cell: &RefCell<u8>) {
cell.replace(14);
}

fn main() {
// First line prints 25 as expected
println!("Cell: {:?}", &CELL);
my_mutator(&CELL);
// Second line *still* prints 25
println!("Cell: {:?}", &CELL);
}
+

-- +Rust Playground

+

And a second example using Once:

+
use std::sync::Once;

const SURPRISE: Once = Once::new();

fn main() {
// This is how `Once` is supposed to be used
SURPRISE.call_once(|| println!("Initializing..."));
// Because `Once` is a `const` value, we never record it
// having been initialized the first time, and this closure
// will also execute.
SURPRISE.call_once(|| println!("Initializing again???"));
}
+

-- +Rust Playground

+

When the +const specification +refers to "rvalues", this +behavior is what they refer to. Clippy will treat this +as an error, but it's still something to be aware of.

+

Initialization

+

The next thing to mention is that const values are loaded into memory as part of your program +binary. Because of this, any const values declared in your program will be "realized" at +compile-time; accessing them may trigger a main-memory lookup (with a fixed address, so your CPU may +be able to prefetch the value), but that's it.

+
use std::cell::RefCell;

const CELL: RefCell<u32> = RefCell::new(24);

pub fn multiply(value: u32) -> u32 {
// CELL is stored at `.L__unnamed_1`
value * (*CELL.get_mut())
}
+

-- Compiler Explorer

+

The compiler creates one RefCell, uses it everywhere, and never needs to call the RefCell::new +function.

+

Copying

+

If it's helpful though, the compiler can choose to copy const values.

+
const FACTOR: u32 = 1000;

pub fn multiply(value: u32) -> u32 {
// See assembly line 4 for the `mov edi, 1000` instruction
value * FACTOR
}

pub fn multiply_twice(value: u32) -> u32 {
// See assembly lines 22 and 29 for `mov edi, 1000` instructions
value * FACTOR * FACTOR
}
+

-- Compiler Explorer

+

In this example, the FACTOR value is turned into the mov edi, 1000 instruction in both the +multiply and multiply_twice functions; the "1000" value is never "stored" anywhere, as it's +small enough to inline into the assembly instructions.

+

Finally, getting the address of a const value is possible, but not guaranteed to be unique +(because the compiler can choose to copy values). I was unable to get non-unique pointers in my +testing (even using different crates), but the specifications are clear enough: don't rely on +pointers to const values being consistent. To be frank, caring about locations for const values +is almost certainly a code smell.

+

static values

+

Static variables are related to const variables, but take a slightly different approach. When we +declare that a reference is unique for the life of a program, you have a static variable +(unrelated to the 'static lifetime). Because of the reference/value distinction with +const/static, static variables behave much more like typical "global" variables.

+

But to understand static, here's what we'll look at:

+
    +
  • static variables are globally unique locations in memory.
  • +
  • Like const, static variables are loaded at the same time as your program being read into +memory.
  • +
  • All static variables must implement the +Sync marker trait.
  • +
  • Interior mutability is safe and acceptable when using static variables.
  • +
+

Memory Uniqueness

+

The single biggest difference between const and static is the guarantees provided about +uniqueness. Where const variables may or may not be copied in code, static variables are +guarantee to be unique. If we take a previous const example and change it to static, the +difference should be clear:

+
static FACTOR: u32 = 1000;

pub fn multiply(value: u32) -> u32 {
// The assembly to `mul dword ptr [rip + example::FACTOR]` is how FACTOR gets used
value * FACTOR
}

pub fn multiply_twice(value: u32) -> u32 {
// The assembly to `mul dword ptr [rip + example::FACTOR]` is how FACTOR gets used
value * FACTOR * FACTOR
}
+

-- Compiler Explorer

+

Where previously there were plenty of references to multiplying by 1000, the new +assembly refers to FACTOR as a named memory location instead. No initialization work needs to be +done, but the compiler can no longer prove the value never changes during execution.

+

Initialization

+

Next, let's talk about initialization. The simplest case is initializing static variables with +either scalar or struct notation:

+
#[derive(Debug)]
struct MyStruct {
x: u32
}

static MY_STRUCT: MyStruct = MyStruct {
// You can even reference other statics
// declared later
x: MY_VAL
};

static MY_VAL: u32 = 24;

fn main() {
println!("Static MyStruct: {:?}", MY_STRUCT);
}
+

-- +Rust Playground

+

Things can get a bit weirder when using const fn though. In most cases, it just works:

+
#[derive(Debug)]
struct MyStruct {
x: u32
}

impl MyStruct {
const fn new() -> MyStruct {
MyStruct { x: 24 }
}
}

static MY_STRUCT: MyStruct = MyStruct::new();

fn main() {
println!("const fn Static MyStruct: {:?}", MY_STRUCT);
}
+

-- +Rust Playground

+

However, there's a caveat: you're currently not allowed to use const fn to initialize static +variables of types that aren't marked Sync. For example, +RefCell::new() is a +const fn, but because +RefCell isn't Sync, you'll +get an error at compile time:

+
use std::cell::RefCell;

// error[E0277]: `std::cell::RefCell<u8>` cannot be shared between threads safely
static MY_LOCK: RefCell<u8> = RefCell::new(0);
+

-- +Rust Playground

+

It's likely that this will +change in the future though.

+

The Sync marker

+

Which leads well to the next point: static variable types must implement the +Sync marker. Because they're globally +unique, it must be safe for you to access static variables from any thread at any time. Most +struct definitions automatically implement the Sync trait because they contain only elements +which themselves implement Sync (read more in the +Nomicon). This is why earlier examples could +get away with initializing statics, even though we never included an impl Sync for MyStruct in the +code. To demonstrate this property, Rust refuses to compile our earlier example if we add a +non-Sync element to the struct definition:

+
use std::cell::RefCell;

struct MyStruct {
x: u32,
y: RefCell<u8>,
}

// error[E0277]: `std::cell::RefCell<u8>` cannot be shared between threads safely
static MY_STRUCT: MyStruct = MyStruct {
x: 8,
y: RefCell::new(8)
};
+

-- +Rust Playground

+

Interior mutability

+

Finally, while static mut variables are allowed, mutating them is an unsafe operation. If we +want to stay in safe Rust, we can use interior mutability to accomplish similar goals:

+
use std::sync::Once;

// This example adapted from https://doc.rust-lang.org/std/sync/struct.Once.html#method.call_once
static INIT: Once = Once::new();

fn main() {
// Note that while `INIT` is declared immutable, we're still allowed
// to mutate its interior
INIT.call_once(|| println!("Initializing..."));
// This code won't panic, as the interior of INIT was modified
// as part of the previous `call_once`
INIT.call_once(|| panic!("INIT was called twice!"));
}
+

-- +Rust Playground

\ No newline at end of file diff --git a/2019/02/understanding-allocations-in-rust/index.html b/2019/02/understanding-allocations-in-rust/index.html new file mode 100644 index 0000000..6aee6af --- /dev/null +++ b/2019/02/understanding-allocations-in-rust/index.html @@ -0,0 +1,83 @@ +Allocations in Rust: Foreword | The Old Speice Guy
Skip to main content

Allocations in Rust: Foreword

· 4 min read
Bradlee Speice

There's an alchemy of distilling complex technical topics into articles and videos that change the +way programmers see the tools they interact with on a regular basis. I knew what a linker was, but +there's a staggering amount of complexity in between +the OS and main(). Rust programmers use the +Box type all the time, but there's a +rich history of the Rust language itself wrapped up in +how special it is.

+

In a similar vein, this series attempts to look at code and understand how memory is used; the +complex choreography of operating system, compiler, and program that frees you to focus on +functionality far-flung from frivolous book-keeping. The Rust compiler relieves a great deal of the +cognitive burden associated with memory management, but we're going to step into its world for a +while.

+

Let's learn a bit about memory in Rust.

+
+

Rust's three defining features of +Performance, Reliability, and Productivity are all driven to a great +degree by the how the Rust compiler understands memory usage. Unlike managed memory languages (Java, +Python), Rust +doesn't really +garbage collect; instead, it uses an +ownership system to reason about +how long objects will last in your program. In some cases, if the life of an object is fairly +transient, Rust can make use of a very fast region called the "stack." When that's not possible, +Rust uses +dynamic (heap) memory +and the ownership system to ensure you can't accidentally corrupt memory. It's not as fast, but it +is important to have available.

+

That said, there are specific situations in Rust where you'd never need to worry about the +stack/heap distinction! If you:

+
    +
  1. Never use unsafe
  2. +
  3. Never use #![feature(alloc)] or the alloc crate
  4. +
+

...then it's not possible for you to use dynamic memory!

+

For some uses of Rust, typically embedded devices, these constraints are OK. They have very limited +memory, and the program binary size itself may significantly affect what's available! There's no +operating system able to manage this +"virtual memory" thing, but that's not an issue +because there's only one running application. The +embedonomicon is ever in mind, and +interacting with the "real world" through extra peripherals is accomplished by reading and writing +to specific memory addresses.

+

Most Rust programs find these requirements overly burdensome though. C++ developers would struggle +without access to std::vector (except those +hardcore no-STL people), and Rust developers would struggle without +std::vec. But with the constraints above, +std::vec is actually a part of the +alloc crate, and thus off-limits. Box, +Rc, etc., are also unusable for the same reason.

+

Whether writing code for embedded devices or not, the important thing in both situations is how much +you know before your application starts about what its memory usage will look like. In embedded +devices, there's a small, fixed amount of memory to use. In a browser, you have no idea how large +google.com's home page is until you start trying to download it. The +compiler uses this knowledge (or lack thereof) to optimize how memory is used; put simply, your code +runs faster when the compiler can guarantee exactly how much memory your program needs while it's +running. This series is all about understanding how the compiler reasons about your program, with an +emphasis on the implications for performance.

+

Now let's address some conditions and caveats before going much further:

+
    +
  • We'll focus on "safe" Rust only; unsafe lets you use platform-specific allocation API's +(malloc) that we'll +ignore.
  • +
  • We'll assume a "debug" build of Rust code (what you get with cargo run and cargo test) and +address (pun intended) release mode at the end (cargo run --release and cargo test --release).
  • +
  • All content will be run using Rust 1.32, as that's the highest currently supported in the +Compiler Exporer. As such, we'll avoid upcoming innovations like +compile-time evaluation of static +that are available in nightly.
  • +
  • Because of the nature of the content, being able to read assembly is helpful. We'll keep it +simple, but I found a +refresher on the push and pop +instructions was helpful while writing +this.
  • +
  • I've tried to be precise in saying only what I can prove using the tools (ASM, docs) that are +available, but if there's something said in error it will be corrected expeditiously. Please let +me know at bradlee@speice.io
  • +
+

Finally, I'll do what I can to flag potential future changes but the Rust docs have a notice worth +repeating:

+
+

Rust does not currently have a rigorously and formally defined memory model.

+

-- the docs

+
\ No newline at end of file diff --git a/2019/05/making-bread/index.html b/2019/05/making-bread/index.html new file mode 100644 index 0000000..1e47171 --- /dev/null +++ b/2019/05/making-bread/index.html @@ -0,0 +1,29 @@ +Making bread | The Old Speice Guy
Skip to main content

Making bread

· 2 min read
Bradlee Speice

Having recently started my "gardening leave" between positions, I have some more personal time +available. I'm planning to stay productive, contributing to some open-source projects, but it also +occurred to me that despite talking about bread pics, this +blog has been purely technical. Maybe I'll change the site title from "The Old Speice Guy" to "Bites +and Bytes"?

+

Either way, I'm baking a little bit again, and figured it was worth taking a quick break to focus on +some lighter material. I recently learned two critically important lessons: first, the temperature +of the dough when you put the yeast in makes a huge difference.

+

Previously, when I wasn't paying attention to dough temperature:

+

Whole weat dough

+

Compared with what happens when I put the dough in the microwave for a defrost cycle because the +water I used wasn't warm enough:

+

White dough

+

I mean, just look at the bubbles!

+

White dough with bubbles

+

After shaping the dough, I've got two loaves ready:

+

Shaped loaves

+

Now, the recipe normally calls for a Dutch Oven to bake the bread because it keeps the dough from +drying out in the oven. Because I don't own a Dutch Oven, I typically put a casserole dish on the +bottom rack and fill it with water so there's still some moisture in the oven. This time, I forgot +to add the water and learned my second lesson: never add room-temperature water to a glass dish +that's currently at 500 degrees.

+

Shattered glass dish

+

Needless to say, trying to pull out sharp glass from an incredibly hot oven is not what I expected +to be doing during my garden leave.

+

In the end, the bread crust wasn't great, but the bread itself turned out pretty alright:

+

Baked bread

+

I've been writing a lot more during this break, so I'm looking forward to sharing that in the +future. In the mean-time, I'm planning on making a sandwich.

\ No newline at end of file diff --git a/2019/06/high-performance-systems/index.html b/2019/06/high-performance-systems/index.html new file mode 100644 index 0000000..f918fc1 --- /dev/null +++ b/2019/06/high-performance-systems/index.html @@ -0,0 +1,267 @@ +On building high performance systems | The Old Speice Guy
Skip to main content

On building high performance systems

· 13 min read
Bradlee Speice

Prior to working in the trading industry, my assumption was that High Frequency Trading (HFT) is +made up of people who have access to secret techniques mortal developers could only dream of. There +had to be some secret art that could only be learned if one had an appropriately tragic backstory.

+

Kung Fu fight

+
+

How I assumed HFT people learn their secret techniques

+
+

How else do you explain people working on systems that complete the round trip of market data in to +orders out (a.k.a. tick-to-trade) consistently within +750-800 nanoseconds? In roughly the time it takes a +computer to access +main memory 8 times, +trading systems are capable of reading the market data packets, deciding what orders to send, doing +risk checks, creating new packets for exchange-specific protocols, and putting those packets on the +wire.

+

Having now worked in the trading industry, I can confirm the developers aren't super-human; I've +made some simple mistakes at the very least. Instead, what shows up in public discussions is that +philosophy, not technique, separates high-performance systems from everything else. +Performance-critical systems don't rely on "this one cool C++ optimization trick" to make code fast +(though micro-optimizations have their place); there's a lot more to worry about than just the code +written for the project.

+

The framework I'd propose is this: If you want to build high-performance systems, focus first on +reducing performance variance (reducing the gap between the fastest and slowest runs of the same +code), and only look at average latency once variance is at an acceptable level.

+

Don't get me wrong, I'm a much happier person when things are fast. Computer goes from booting in 20 +seconds down to 10 because I installed a solid-state drive? Awesome. But if every fifth day it takes +a full minute to boot because of corrupted sectors? Not so great. Average speed over the course of a +week is the same in each situation, but you're painfully aware of that minute when it happens. When +it comes to code, the principal is the same: speeding up a function by an average of 10 milliseconds +doesn't mean much if there's a 100ms difference between your fastest and slowest runs. When +performance matters, you need to respond quickly every time, not just in aggregate. +High-performance systems should first optimize for time variance. Once you're consistent at the time +scale you care about, then focus on improving average time.

+

This focus on variance shows up all the time in industry too (emphasis added in all quotes below):

+
    +
  • +

    In marketing materials for +NASDAQ's matching engine, the most performance-sensitive component of the exchange, dependability +is highlighted in addition to instantaneous metrics:

    +
    +

    Able to consistently sustain an order rate of over 100,000 orders per second at sub-40 +microsecond average latency

    +
    +
  • +
  • +

    The Aeron message bus has this to say about performance:

    +
    +

    Performance is the key focus. Aeron is designed to be the highest throughput with the lowest and +most predictable latency possible of any messaging system

    +
    +
  • +
  • +

    The company PolySync, which is working on autonomous vehicles, +mentions why they picked their +specific messaging format:

    +
    +

    In general, high performance is almost always desirable for serialization. But in the world of +autonomous vehicles, steady timing performance is even more important than peak throughput. +This is because safe operation is sensitive to timing outliers. Nobody wants the system that +decides when to slam on the brakes to occasionally take 100 times longer than usual to encode +its commands.

    +
    +
  • +
  • +

    Solarflare, which makes highly-specialized network hardware, points out +variance (jitter) as a big concern for +electronic trading:

    +
    +

    The high stakes world of electronic trading, investment banks, market makers, hedge funds and +exchanges demand the lowest possible latency and jitter while utilizing the highest +bandwidth and return on their investment.

    +
    +
  • +
+

And to further clarify: we're not discussing total run-time, but variance of total run-time. There +are situations where it's not reasonably possible to make things faster, and you'd much rather be +consistent. For example, trading firms use +wireless networks because +the speed of light through air is faster than through fiber-optic cables. There's still at absolute +minimum a ~33.76 millisecond delay required to send data between, +say, +Chicago and Tokyo. +If a trading system in Chicago calls the function for "send order to Tokyo" and waits to see if a +trade occurs, there's a physical limit to how long that will take. In this situation, the focus is +on keeping variance of additional processing to a minimum, since speed of light is the limiting +factor.

+

So how does one go about looking for and eliminating performance variance? To tell the truth, I +don't think a systematic answer or flow-chart exists. There's no substitute for (A) building a deep +understanding of the entire technology stack, and (B) actually measuring system performance (though +(C) watching a lot of CppCon videos for +inspiration never hurt). Even then, every project cares about performance to a different degree; you +may need to build an entire +replica production system to +accurately benchmark at nanosecond precision, or you may be content to simply +avoid garbage collection in +your Java code.

+

Even though everyone has different needs, there are still common things to look for when trying to +isolate and eliminate variance. In no particular order, these are my focus areas when thinking about +high-performance systems:

+

Update 2019-09-21: Added notes on isolcpus and systemd affinity.

+

Language-specific

+

Garbage Collection: How often does garbage collection happen? When is it triggered? What are the +impacts?

+
    +
  • In Python, individual objects are collected +if the reference count reaches 0, and each generation is collected if +num_alloc - num_dealloc > gc_threshold whenever an allocation happens. The GIL is acquired for +the duration of generational collection.
  • +
  • Java has +many +different +collection +algorithms +to choose from, each with different characteristics. The default algorithms (Parallel GC in Java +8, G1 in Java 9) freeze the JVM while collecting, while more recent algorithms +(ZGC and +Shenandoah) are designed to keep "stop the +world" to a minimum by doing collection work in parallel.
  • +
+

Allocation: Every language has a different way of interacting with "heap" memory, but the +principle is the same: running the allocator to allocate/deallocate memory takes time that can often +be put to better use. Understanding when your language interacts with the allocator is crucial, and +not always obvious. For example: C++ and Rust don't allocate heap memory for iterators, but Java +does (meaning potential GC pauses). Take time to understand heap behavior (I made a +a guide for Rust), and look into alternative +allocators (jemalloc, +tcmalloc) that might run faster than the +operating system default.

+

Data Layout: How your data is arranged in memory matters; +data-oriented design and +cache locality can have huge +impacts on performance. The C family of languages (C, value types in C#, C++) and Rust all have +guarantees about the shape every object takes in memory that others (e.g. Java and Python) can't +make. Cachegrind and kernel +perf counters are both great for understanding +how performance relates to memory layout.

+

Just-In-Time Compilation: Languages that are compiled on the fly (LuaJIT, C#, Java, PyPy) are +great because they optimize your program for how it's actually being used, rather than how a +compiler expects it to be used. However, there's a variance problem if the program stops executing +while waiting for translation from VM bytecode to native code. As a remedy, many languages support +ahead-of-time compilation in addition to the JIT versions +(CoreRT in C# and GraalVM in Java). +On the other hand, LLVM supports +Profile Guided Optimization, +which theoretically brings JIT benefits to non-JIT languages. Finally, be careful to avoid comparing +apples and oranges during benchmarks; you don't want your code to suddenly speed up because the JIT +compiler kicked in.

+

Programming Tricks: These won't make or break performance, but can be useful in specific +circumstances. For example, C++ can use +templates instead of branches +in critical sections.

+

Kernel

+

Code you wrote is almost certainly not the only code running on your hardware. There are many ways +the operating system interacts with your program, from interrupts to system calls, that are +important to watch for. These are written from a Linux perspective, but Windows does typically have +equivalent functionality.

+

Scheduling: The kernel is normally free to schedule any process on any core, so it's important +to reserve CPU cores exclusively for the important programs. There are a few parts to this: first, +limit the CPU cores that non-critical processes are allowed to run on by excluding cores from +scheduling +(isolcpus +kernel command-line option), or by setting the init process CPU affinity +(systemd example). Second, set critical processes +to run on the isolated cores by setting the +processor affinity using +taskset. Finally, use +NO_HZ or +chrt to disable scheduling interrupts. Turning off +hyper-threading is also likely beneficial.

+

System calls: Reading from a UNIX socket? Writing to a file? In addition to not knowing how long +the I/O operation takes, these all trigger expensive +system calls (syscalls). To handle these, the CPU must +context switch to the kernel, let the kernel +operation complete, then context switch back to your program. We'd rather keep these +to a minimum (see +timestamp 18:20). Strace is your friend for understanding when +and where syscalls happen.

+

Signal Handling: Far less likely to be an issue, but signals do trigger a context switch if your +code has a handler registered. This will be highly dependent on the application, but you can +block signals +if it's an issue.

+

Interrupts: System interrupts are how devices connected to your computer notify the CPU that +something has happened. The CPU will then choose a processor core to pause and context switch to the +OS to handle the interrupt. Make sure that +SMP affinity is +set so that interrupts are handled on a CPU core not running the program you care about.

+

NUMA: While NUMA is good at making +multi-cell systems transparent, there are variance implications; if the kernel moves a process +across nodes, future memory accesses must wait for the controller on the original node. Use +numactl to handle memory-/cpu-cell pinning so this doesn't +happen.

+

Hardware

+

CPU Pipelining/Speculation: Speculative execution in modern processors gave us vulnerabilities +like Spectre, but it also gave us performance improvements like +branch prediction. And if the CPU mis-speculates +your code, there's variance associated with rewind and replay. While the compiler knows a lot about +how your CPU pipelines instructions, code can be +structured to help the branch +predictor.

+

Paging: For most systems, virtual memory is incredible. Applications live in their own worlds, +and the CPU/MMU figures out the details. +However, there's a variance penalty associated with memory paging and caching; if you access more +memory pages than the TLB can store, +you'll have to wait for the page walk. Kernel perf tools are necessary to figure out if this is an +issue, but using huge pages can +reduce TLB burdens. Alternately, running applications in a hypervisor like +Jailhouse allows one to skip virtual memory entirely, but +this is probably more work than the benefits are worth.

+

Network Interfaces: When more than one computer is involved, variance can go up dramatically. +Tuning kernel +network parameters may be +helpful, but modern systems more frequently opt to skip the kernel altogether with a technique +called kernel bypass. This typically requires +specialized hardware and drivers, but even industries like +telecom are +finding the benefits.

+

Networks

+

Routing: There's a reason financial firms are willing to pay +millions of euros +for rights to a small plot of land - having a straight-line connection from point A to point B means +the path their data takes is the shortest possible. In contrast, there are currently 6 computers in +between me and Google, but that may change at any moment if my ISP realizes a +more efficient route is available. Whether +it's using +research-quality equipment +for shortwave radio, or just making sure there's no data inadvertently going between data centers, +routing matters.

+

Protocol: TCP as a network protocol is awesome: guaranteed and in-order delivery, flow control, +and congestion control all built in. But these attributes make the most sense when networking +infrastructure is lossy; for systems that expect nearly all packets to be delivered correctly, the +setup handshaking and packet acknowledgment are just overhead. Using UDP (unicast or multicast) may +make sense in these contexts as it avoids the chatter needed to track connection state, and +gap-fill +strategies +can handle the rest.

+

Switching: Many routers/switches handle packets using "store-and-forward" behavior: wait for the +whole packet, validate checksums, and then send to the next device. In variance terms, the time +needed to move data between two nodes is proportional to the size of that data; the switch must +"store" all data before it can calculate checksums and "forward" to the next node. With +"cut-through" +designs, switches will begin forwarding data as soon as they know where the destination is, +checksums be damned. This means there's a fixed cost (at the switch) for network traffic, no matter +the size.

+

Final Thoughts

+

High-performance systems, regardless of industry, are not magical. They do require extreme precision +and attention to detail, but they're designed, built, and operated by regular people, using a lot of +tools that are publicly available. Interested in seeing how context switching affects performance of +your benchmarks? taskset should be installed in all modern Linux distributions, and can be used to +make sure the OS never migrates your process. Curious how often garbage collection triggers during a +crucial operation? Your language of choice will typically expose details of its operations +(Python, +Java). +Want to know how hard your program is stressing the TLB? Use perf record and look for +dtlb_load_misses.miss_causes_a_walk.

+

Two final guiding questions, then: first, before attempting to apply some of the technology above to +your own systems, can you first identify +where/when you care about "high-performance"? As an +example, if parts of a system rely on humans pushing buttons, CPU pinning won't have any measurable +effect. Humans are already far too slow to react in time. Second, if you're using benchmarks, are +they being designed in a way that's actually helpful? Tools like +Criterion (also in +Rust) and Google's +Benchmark output not only average run time, but variance as +well; your benchmarking environment is subject to the same concerns your production environment is.

+

Finally, I believe high-performance systems are a matter of philosophy, not necessarily technique. +Rigorous focus on variance is the first step, and there are plenty of ways to measure and mitigate +it; once that's at an acceptable level, then optimize for speed.

\ No newline at end of file diff --git a/2019/09/binary-format-shootout/index.html b/2019/09/binary-format-shootout/index.html new file mode 100644 index 0000000..cca57ab --- /dev/null +++ b/2019/09/binary-format-shootout/index.html @@ -0,0 +1,151 @@ +Binary format shootout | The Old Speice Guy
Skip to main content

Binary format shootout

· 9 min read
Bradlee Speice

I've found that in many personal projects, +analysis paralysis is particularly deadly. +Making good decisions in the beginning avoids pain and suffering later; if extra research prevents +future problems, I'm happy to continue procrastinating researching indefinitely.

+

So let's say you're in need of a binary serialization format. Data will be going over the network, +not just in memory, so having a schema document and code generation is a must. Performance is +crucial, so formats that support zero-copy de/serialization are given priority. And the more +languages supported, the better; I use Rust, but can't predict what other languages this could +interact with.

+

Given these requirements, the candidates I could find were:

+
    +
  1. Cap'n Proto has been around the longest, and is the most established
  2. +
  3. Flatbuffers is the newest, and claims to have a simpler +encoding
  4. +
  5. Simple Binary Encoding has the simplest +encoding, but the Rust implementation is unmaintained
  6. +
+

Any one of these will satisfy the project requirements: easy to transmit over a network, reasonably +fast, and polyglot support. But how do you actually pick one? It's impossible to know what issues +will follow that choice, so I tend to avoid commitment until the last possible moment.

+

Still, a choice must be made. Instead of worrying about which is "the best," I decided to build a +small proof-of-concept system in each format and pit them against each other. All code can be found +in the repository for this post.

+

We'll discuss more in detail, but a quick preview of the results:

+
    +
  • Cap'n Proto: Theoretically performs incredibly well, the implementation had issues
  • +
  • Flatbuffers: Has some quirks, but largely lived up to its "zero-copy" promises
  • +
  • SBE: Best median and worst-case performance, but the message structure has a limited feature set
  • +
+

Prologue: Binary Parsing with Nom

+

Our benchmark system will be a simple data processor; given depth-of-book market data from +IEX, serialize each message into the schema +format, read it back, and calculate total size of stock traded and the lowest/highest quoted prices. +This test isn't complex, but is representative of the project I need a binary format for.

+

But before we make it to that point, we have to actually read in the market data. To do so, I'm +using a library called nom. Version 5.0 was recently released and +brought some big changes, so this was an opportunity to build a non-trivial program and get +familiar.

+

If you don't already know about nom, it's a "parser generator". By combining different smaller +parsers, you can assemble a parser to handle complex structures without writing tedious code by +hand. For example, when parsing +PCAP files:

+
   0                   1                   2                   3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+---------------------------------------------------------------+
0 | Block Type = 0x00000006 |
+---------------------------------------------------------------+
4 | Block Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
8 | Interface ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
12 | Timestamp (High) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
16 | Timestamp (Low) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
20 | Captured Len |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
24 | Packet Len |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Data |
| ... |
+

...you can build a parser in nom that looks like +this:

+
const ENHANCED_PACKET: [u8; 4] = [0x06, 0x00, 0x00, 0x00];
pub fn enhanced_packet_block(input: &[u8]) -> IResult<&[u8], &[u8]> {
let (
remaining,
(
block_type,
block_len,
interface_id,
timestamp_high,
timestamp_low,
captured_len,
packet_len,
),
) = tuple((
tag(ENHANCED_PACKET),
le_u32,
le_u32,
le_u32,
le_u32,
le_u32,
le_u32,
))(input)?;

let (remaining, packet_data) = take(captured_len)(remaining)?;
Ok((remaining, packet_data))
}
+

While this example isn't too interesting, more complex formats (like IEX market data) are where +nom really shines.

+

Ultimately, because the nom code in this shootout was the same for all formats, we're not too +interested in its performance. Still, it's worth mentioning that building the market data parser was +actually fun; I didn't have to write tons of boring code by hand.

+

Cap'n Proto

+

Now it's time to get into the meaty part of the story. Cap'n Proto was the first format I tried +because of how long it has supported Rust (thanks to dwrensha for +maintaining the Rust port since +2014!). However, I had a ton +of performance concerns once I started using it.

+

To serialize new messages, Cap'n Proto uses a "builder" object. This builder allocates memory on the +heap to hold the message content, but because builders +can't be re-used, we have to allocate a +new buffer for every single message. I was able to work around this with a +special builder +that could re-use the buffer, but it required reading through Cap'n Proto's +benchmarks +to find an example, and used +std::mem::transmute to bypass Rust's borrow +checker.

+

The process of reading messages was better, but still had issues. Cap'n Proto has two message +encodings: a "packed" representation, and an +"unpacked" version. When reading "packed" messages, we need a buffer to unpack the message into +before we can use it; Cap'n Proto allocates a new buffer for each message we unpack, and I wasn't +able to figure out a way around that. In contrast, the unpacked message format should be where Cap'n +Proto shines; its main selling point is that there's no decoding step. +However, accomplishing zero-copy deserialization required code in the private API +(since fixed), and we allocate a vector on +every read for the segment table.

+

In the end, I put in significant work to make Cap'n Proto as fast as possible, but there were too +many issues for me to feel comfortable using it long-term.

+

Flatbuffers

+

This is the new kid on the block. After a +first attempt didn't pan out, official support +was recently launched. Flatbuffers intends to +address the same problems as Cap'n Proto: high-performance, polyglot, binary messaging. The +difference is that Flatbuffers claims to have a simpler wire format and +more flexibility.

+

On the whole, I enjoyed using Flatbuffers; the tooling is +nice, and unlike Cap'n Proto, parsing messages was actually zero-copy and zero-allocation. However, +there were still some issues.

+

First, Flatbuffers (at least in Rust) can't handle nested vectors. This is a problem for formats +like the following:

+
table Message {
symbol: string;
}
table MultiMessage {
messages:[Message];
}
+

We want to create a MultiMessage which contains a vector of Message, and each Message itself +contains a vector (the string type). I was able to work around this by +caching Message elements +in a SmallVec before building the final MultiMessage, but it was a painful process that I +believe contributed to poor serialization performance.

+

Second, streaming support in Flatbuffers seems to be something of an +afterthought. Where Cap'n Proto in Rust handles +reading messages from a stream as part of the API, Flatbuffers just sticks a u32 at the front of +each message to indicate the size. Not specifically a problem, but calculating message size without +that tag is nigh on impossible.

+

Ultimately, I enjoyed using Flatbuffers, and had to do significantly less work to make it perform +well.

+

Simple Binary Encoding

+

Support for SBE was added by the author of one of my favorite +Rust blog posts. +I've talked previously about how important +variance is in high-performance systems, so it was encouraging to read about a format that +directly addressed my +concerns. SBE has by far the simplest binary format, but it does make some tradeoffs.

+

Both Cap'n Proto and Flatbuffers use message offsets +to handle variable-length data, unions, and various +other features. In contrast, messages in SBE are essentially +just structs; +variable-length data is supported, but there's no union type.

+

As mentioned in the beginning, the Rust port of SBE works well, but is +essentially unmaintained. +However, if you don't need union types, and can accept that schemas are XML documents, it's still +worth using. SBE's implementation had the best streaming support of all formats I tested, and +doesn't trigger allocation during de/serialization.

+

Results

+

After building a test harness +for +each +format, it was +time to actually take them for a spin. I used +this script to run +the benchmarks, and the raw results are +here. All data reported +below is the average of 10 runs on a single day of IEX data. Results were validated to make sure +that each format parsed the data correctly.

+

Serialization

+

This test measures, on a +per-message basis, +how long it takes to serialize the IEX message into the desired format and write to a pre-allocated +buffer.

+
SchemaMedian99th Pctl99.9th PctlTotal
Cap'n Proto Packed413ns1751ns2943ns14.80s
Cap'n Proto Unpacked273ns1828ns2836ns10.65s
Flatbuffers355ns2185ns3497ns14.31s
SBE91ns1535ns2423ns3.91s
+

Deserialization

+

This test measures, on a +per-message basis, +how long it takes to read the previously-serialized message and perform some basic aggregation. The +aggregation code is the same for each format, so any performance differences are due solely to the +format implementation.

+
SchemaMedian99th Pctl99.9th PctlTotal
Cap'n Proto Packed539ns1216ns2599ns18.92s
Cap'n Proto Unpacked366ns737ns1583ns12.32s
Flatbuffers173ns421ns1007ns6.00s
SBE116ns286ns659ns4.05s
+

Conclusion

+

Building a benchmark turned out to be incredibly helpful in making a decision; because a "union" +type isn't important to me, I can be confident that SBE best addresses my needs.

+

While SBE was the fastest in terms of both median and worst-case performance, its worst case +performance was proportionately far higher than any other format. It seems to be that +de/serialization time scales with message size, but I'll need to do some more research to understand +what exactly is going on.

\ No newline at end of file diff --git a/2019/12/release-the-gil/index.html b/2019/12/release-the-gil/index.html new file mode 100644 index 0000000..5a05540 --- /dev/null +++ b/2019/12/release-the-gil/index.html @@ -0,0 +1,151 @@ +Release the GIL | The Old Speice Guy
Skip to main content

Release the GIL

· 9 min read
Bradlee Speice

Complaining about the Global Interpreter Lock +(GIL) seems like a rite of passage for Python developers. It's easy to criticize a design decision +made before multi-core CPU's were widely available, but the fact that it's still around indicates +that it generally works Good +Enough. Besides, there are simple and effective +workarounds; it's not hard to start a +new process and use message passing to +synchronize code running in parallel.

+

Still, wouldn't it be nice to have more than a single active interpreter thread? In an age of +asynchronicity and M:N threading, Python seems lacking. The ideal scenario is to take advantage of +both Python's productivity and the modern CPU's parallel capabilities.

+

Presented below are two strategies for releasing the GIL's icy grip without giving up on what makes +Python a nice language to start with. Bear in mind: these are just the tools, no claim is made about +whether it's a good idea to use them. Very often, unlocking the GIL is an +XY problem; you want application performance, and the +GIL seems like an obvious bottleneck. Remember that any gains from running code in parallel come at +the expense of project complexity; messing with the GIL is ultimately messing with Python's memory +model.

+
%load_ext Cython
from numba import jit

N = 1_000_000_000
+

Cython

+

Put simply, Cython is a programming language that looks a lot like Python, +gets transpiled to C/C++, and integrates +well with the CPython API. It's great for building Python +wrappers to C and C++ libraries, writing optimized code for numerical processing, and tons more. And +when it comes to managing the GIL, there are two special features:

+
    +
  • The nogil +function annotation +asserts that a Cython function is safe to use without the GIL, and compilation will fail if it +interacts with Python in an unsafe manner
  • +
  • The with nogil +context manager +explicitly unlocks the CPython GIL while active
  • +
+

Whenever Cython code runs inside a with nogil block on a separate thread, the Python interpreter +is unblocked and allowed to continue work elsewhere. We'll define a "busy work" function that +demonstrates this principle in action:

+
%%cython

# Annotating a function with `nogil` indicates only that it is safe
# to call in a `with nogil` block. It *does not* release the GIL.
cdef unsigned long fibonacci(unsigned long n) nogil:
if n <= 1:
return n

cdef unsigned long a = 0, b = 1, c = 0

c = a + b
for _i in range(2, n):
a = b
b = c
c = a + b

return c


def cython_nogil(unsigned long n):
# Explicitly release the GIL while running `fibonacci`
with nogil:
value = fibonacci(n)

return value


def cython_gil(unsigned long n):
# Because the GIL is not explicitly released, it implicitly
# remains acquired when running the `fibonacci` function
return fibonacci(n)
+

First, let's time how long it takes Cython to calculate the billionth Fibonacci number:

+
%%time
_ = cython_gil(N);
+
+

CPU times: user 365 ms, sys: 0 ns, total: 365 ms +Wall time: 372 ms

+
+
%%time
_ = cython_nogil(N);
+
+

CPU times: user 381 ms, sys: 0 ns, total: 381 ms +Wall time: 388 ms

+
+

Both versions (with and without GIL) take effectively the same amount of time to run. Even when +running this calculation in parallel on separate threads, it is expected that the run time will +double because only one thread can be active at a time:

+
%%time
from threading import Thread

# Create the two threads to run on
t1 = Thread(target=cython_gil, args=[N])
t2 = Thread(target=cython_gil, args=[N])
# Start the threads
t1.start(); t2.start()
# Wait for the threads to finish
t1.join(); t2.join()
+
+

CPU times: user 641 ms, sys: 5.62 ms, total: 647 ms +Wall time: 645 ms

+
+

However, if the first thread releases the GIL, the second thread is free to acquire it and run in +parallel:

+
%%time

t1 = Thread(target=cython_nogil, args=[N])
t2 = Thread(target=cython_gil, args=[N])
t1.start(); t2.start()
t1.join(); t2.join()
+
+

CPU times: user 717 ms, sys: 372 µs, total: 718 ms +Wall time: 358 ms

+
+

Because user time represents the sum of processing time on all threads, it doesn't change much. +The "wall time" has been cut roughly in half +because each function is running simultaneously.

+

Keep in mind that the order in which threads are started makes a difference!

+
%%time

# Note that the GIL-locked version is started first
t1 = Thread(target=cython_gil, args=[N])
t2 = Thread(target=cython_nogil, args=[N])
t1.start(); t2.start()
t1.join(); t2.join()
+
+

CPU times: user 667 ms, sys: 0 ns, total: 667 ms +Wall time: 672 ms

+
+

Even though the second thread releases the GIL while running, it can't start until the first has +completed. Thus, the overall runtime is effectively the same as running two GIL-locked threads.

+

Finally, be aware that attempting to unlock the GIL from a thread that doesn't own it will crash the +interpreter, not just the thread attempting the unlock:

+
%%cython

cdef int cython_recurse(int n) nogil:
if n <= 0:
return 0

with nogil:
return cython_recurse(n - 1)

cython_recurse(2)
+
+

Fatal Python error: PyEval_SaveThread: NULL tstate

Thread 0x00007f499effd700 (most recent call first): +File "/home/bspeice/.virtualenvs/release-the-gil/lib/python3.7/site-packages/ipykernel/parentpoller.py", line 39 in run +File "/usr/lib/python3.7/threading.py", line 926 in _bootstrap_inner +File "/usr/lib/python3.7/threading.py", line 890 in _bootstrap

+
+

In practice, avoiding this issue is simple. First, nogil functions probably shouldn't contain +with nogil blocks. Second, Cython can +conditionally acquire/release +the GIL, so these conditions can be used to synchronize access. Finally, Cython's documentation for +external C code +contains more detail on how to safely manage the GIL.

+

To conclude: use Cython's nogil annotation to assert that functions are safe for calling when the +GIL is unlocked, and with nogil to actually unlock the GIL and run those functions.

+

Numba

+

Like Cython, Numba is a "compiled Python." Where Cython works by +compiling a Python-like language to C/C++, Numba compiles Python bytecode directly to machine code +at runtime. Behavior is controlled with a special @jit decorator; calling a decorated function +first compiles it to machine code before running. Calling the function a second time re-uses that +machine code unless the argument types have changed.

+

Numba works best when a nopython=True argument is added to the @jit decorator; functions +compiled in nopython mode +avoid the CPython API and have performance comparable to C. Further, adding nogil=True to the +@jit decorator unlocks the GIL while that function is running. Note that nogil and nopython +are separate arguments; while it is necessary for code to be compiled in nopython mode in order to +release the lock, the GIL will remain locked if nogil=False (the default).

+

Let's repeat the same experiment, this time using Numba instead of Cython:

+
# The `int` type annotation is only for humans and is ignored
# by Numba.
@jit(nopython=True, nogil=True)
def numba_nogil(n: int) -> int:
if n <= 1:
return n

a = 0
b = 1

c = a + b
for _i in range(2, n):
a = b
b = c
c = a + b

return c


# Run using `nopython` mode to receive a performance boost,
# but GIL remains locked due to `nogil=False` by default.
@jit(nopython=True)
def numba_gil(n: int) -> int:
if n <= 1:
return n

a = 0
b = 1

c = a + b
for _i in range(2, n):
a = b
b = c
c = a + b

return c


# Call each function once to force compilation; we don't want
# the timing statistics to include how long it takes to compile.
numba_nogil(N)
numba_gil(N);
+

We'll perform the same tests as above; first, figure out how long it takes the function to run:

+
%%time
_ = numba_gil(N)
+
+

CPU times: user 253 ms, sys: 258 µs, total: 253 ms +Wall time: 251 ms

+
+

Aside: it's not immediately clear why Numba takes ~20% less time to run than Cython for code that should be +effectively identical after compilation. +

When running two GIL-locked threads, the result (as expected) takes around twice as long to compute:

+
%%time
t1 = Thread(target=numba_gil, args=[N])
t2 = Thread(target=numba_gil, args=[N])
t1.start(); t2.start()
t1.join(); t2.join()
+
+

CPU times: user 541 ms, sys: 3.96 ms, total: 545 ms +Wall time: 541 ms

+
+

But if the GIL-unlocking thread starts first, both threads run in parallel:

+
%%time
t1 = Thread(target=numba_nogil, args=[N])
t2 = Thread(target=numba_gil, args=[N])
t1.start(); t2.start()
t1.join(); t2.join()
+
+

CPU times: user 551 ms, sys: 7.77 ms, total: 559 ms +Wall time: 279 ms

+
+

Just like Cython, starting the GIL-locked thread first leads to poor performance:

+
%%time
t1 = Thread(target=numba_gil, args=[N])
t2 = Thread(target=numba_nogil, args=[N])
t1.start(); t2.start()
t1.join(); t2.join()
+
+

CPU times: user 524 ms, sys: 0 ns, total: 524 ms +Wall time: 522 ms

+
+

Finally, unlike Cython, Numba will unlock the GIL if and only if it is currently acquired; +recursively calling @jit(nogil=True) functions is perfectly safe:

+
from numba import jit

@jit(nopython=True, nogil=True)
def numba_recurse(n: int) -> int:
if n <= 0:
return 0

return numba_recurse(n - 1)

numba_recurse(2);
+

Conclusion

+

Before finishing, it's important to address pain points that will show up if these techniques are +used in a more realistic project:

+

First, code running in a GIL-free context will likely also need non-trivial data structures; +GIL-free functions aren't useful if they're constantly interacting with Python objects whose access +requires the GIL. Cython provides +extension types and Numba +provides a @jitclass decorator to +address this need.

+

Second, building and distributing applications that make use of Cython/Numba can be complicated. +Cython packages require running the compiler, (potentially) linking/packaging external dependencies, +and distributing a binary wheel. Numba is generally simpler because the code being distributed is +pure Python, but can be tricky since errors aren't detected until runtime.

+

Finally, while unlocking the GIL is often a solution in search of a problem, both Cython and Numba +provide tools to directly manage the GIL when appropriate. This enables true parallelism (not just +concurrency) that is impossible in vanilla Python.

\ No newline at end of file diff --git a/404.html b/404.html index 3969180..899a525 100644 --- a/404.html +++ b/404.html @@ -1,24 +1 @@ ---- -layout: page ---- - - - -
-

404

- -

Page not found :(

-

The requested page could not be found.

-
+Page Not Found | The Old Speice Guy
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

\ No newline at end of file diff --git a/CNAME b/CNAME index 8875e7a..13be74a 100644 --- a/CNAME +++ b/CNAME @@ -1 +1 @@ -speice.io +speice.io \ No newline at end of file diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 3031ef3..0000000 --- a/Gemfile +++ /dev/null @@ -1,29 +0,0 @@ -source "https://rubygems.org" - -# Hello! This is where you manage which Jekyll version is used to run. -# When you want to use a different version, change it below, save the -# file and run `bundle install`. Run Jekyll with `bundle exec`, like so: -# -# bundle exec jekyll serve -# -# This will help ensure the proper Jekyll version is running. -# Happy Jekylling! -gem "jekyll", "~> 3.8.3" - -gem "texture" - -# If you want to use GitHub Pages, remove the "gem "jekyll"" above and -# uncomment the line below. To upgrade, run `bundle update github-pages`. -# gem "github-pages", group: :jekyll_plugins - -# If you have any plugins, put them here! -group :jekyll_plugins do - gem "jekyll-feed", "~> 0.6" - gem "jekyll-remote-theme" -end - -# Windows does not include zoneinfo files, so bundle the tzinfo-data gem -gem "tzinfo-data", platforms: [:mingw, :mswin, :x64_mingw, :jruby] - -# Performance-booster for watching directories on Windows -gem "wdm", "~> 0.1.0" if Gem.win_platform? diff --git a/Gemfile.lock b/Gemfile.lock deleted file mode 100644 index 310c738..0000000 --- a/Gemfile.lock +++ /dev/null @@ -1,78 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - addressable (2.7.0) - public_suffix (>= 2.0.2, < 5.0) - colorator (1.1.0) - concurrent-ruby (1.1.6) - em-websocket (0.5.1) - eventmachine (>= 0.12.9) - http_parser.rb (~> 0.6.0) - eventmachine (1.2.7) - ffi (1.12.2) - forwardable-extended (2.6.0) - http_parser.rb (0.6.0) - i18n (0.9.5) - concurrent-ruby (~> 1.0) - jekyll (3.8.6) - addressable (~> 2.4) - colorator (~> 1.0) - em-websocket (~> 0.5) - i18n (~> 0.7) - jekyll-sass-converter (~> 1.0) - jekyll-watch (~> 2.0) - kramdown (~> 1.14) - liquid (~> 4.0) - mercenary (~> 0.3.3) - pathutil (~> 0.9) - rouge (>= 1.7, < 4) - safe_yaml (~> 1.0) - jekyll-feed (0.13.0) - jekyll (>= 3.7, < 5.0) - jekyll-remote-theme (0.4.2) - addressable (~> 2.0) - jekyll (>= 3.5, < 5.0) - jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0) - rubyzip (>= 1.3.0, < 3.0) - jekyll-sass-converter (1.5.2) - sass (~> 3.4) - jekyll-seo-tag (2.6.1) - jekyll (>= 3.3, < 5.0) - jekyll-watch (2.2.1) - listen (~> 3.0) - kramdown (1.17.0) - liquid (4.0.3) - listen (3.2.1) - rb-fsevent (~> 0.10, >= 0.10.3) - rb-inotify (~> 0.9, >= 0.9.10) - mercenary (0.3.6) - pathutil (0.16.2) - forwardable-extended (~> 2.6) - public_suffix (4.0.4) - rb-fsevent (0.10.3) - rb-inotify (0.10.1) - ffi (~> 1.0) - rouge (3.17.0) - rubyzip (2.3.0) - safe_yaml (1.0.5) - sass (3.7.4) - sass-listen (~> 4.0.0) - sass-listen (4.0.0) - rb-fsevent (~> 0.9, >= 0.9.4) - rb-inotify (~> 0.9, >= 0.9.7) - texture (0.3) - jekyll (~> 3.7) - jekyll-seo-tag (~> 2.1) - -PLATFORMS - ruby - -DEPENDENCIES - jekyll (~> 3.8.3) - jekyll-feed (~> 0.6) - jekyll-remote-theme - texture - tzinfo-data - -BUNDLED WITH - 2.1.4 diff --git a/_config.yml b/_config.yml deleted file mode 100644 index 9f9c8cd..0000000 --- a/_config.yml +++ /dev/null @@ -1,44 +0,0 @@ -# Welcome to Jekyll! -# -# This config file is meant for settings that affect your whole blog, values -# which you are expected to set up once and rarely edit after that. If you find -# yourself editing this file very often, consider using Jekyll's data files -# feature for the data you need to update frequently. -# -# For technical reasons, this file is *NOT* reloaded automatically when you use -# 'bundle exec jekyll serve'. If you change this file, please restart the server process. - -# Site settings -# These are used to personalize your new site. If you look in the HTML files, -# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on. -# You can create any custom variable you would like, and they will be accessible -# in the templates via {{ site.myvariable }}. -title: speice.io -description: The Old Speice Guy -email: bradlee@speice.io -baseurl: "" # the subpath of your site, e.g. /blog -url: "https://speice.io/" # the base hostname & protocol for your site, e.g. http://example.com -github_username: bspeice - -# Build settings -markdown: kramdown -# theme: texture -remote_theme: thelehhman/texture -plugins: - - jekyll-feed - - jekyll-remote-theme - -include: [_pages] -permalink: /:year/:month/:title.html - -# Exclude from processing. -# The following items will not be processed, by default. Create a custom list -# to override the default setting. -# exclude: -# - Gemfile -# - Gemfile.lock -# - node_modules -# - vendor/bundle/ -# - vendor/cache/ -# - vendor/gems/ -# - vendor/ruby/ diff --git a/_includes/footer.html b/_includes/footer.html deleted file mode 100644 index 4d3c143..0000000 --- a/_includes/footer.html +++ /dev/null @@ -1,23 +0,0 @@ -{% if page.layout == 'post' %} -{% comment %}Thanks to https://www.bytedude.com/jekyll-previous-and-next-posts/{% endcomment %} -
-
-
-
- {% if page.previous.url %} - « {{page.previous.title}} - {% endif %} -
-
- {% if page.next.url %} - {{page.next.title}} » - {% endif %} -
-
-
- - - -{% endif %} \ No newline at end of file diff --git a/_includes/head.html b/_includes/head.html deleted file mode 100644 index ec0f85d..0000000 --- a/_includes/head.html +++ /dev/null @@ -1,7 +0,0 @@ - - - - - -{{ page.title | default: site.title }} -{% seo %} \ No newline at end of file diff --git a/_includes/nav.html b/_includes/nav.html deleted file mode 100644 index 14ca138..0000000 --- a/_includes/nav.html +++ /dev/null @@ -1,7 +0,0 @@ - \ No newline at end of file diff --git a/_includes/page_header.html b/_includes/page_header.html deleted file mode 100644 index e486090..0000000 --- a/_includes/page_header.html +++ /dev/null @@ -1,15 +0,0 @@ -
-

{{ site.title }}

-

{{ site.description }}

- -
\ No newline at end of file diff --git a/_pages/about.md b/_pages/about.md deleted file mode 100644 index 3f45159..0000000 --- a/_pages/about.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -layout: page -title: About -permalink: /about/ ---- - -Developer currently living in New York City. - -Best ways to get in contact: - -- Email: [bradlee@speice.io](mailto:bradlee@speice.io) -- Github: [bspeice](https://github.com/bspeice) -- LinkedIn: [bradleespeice](https://www.linkedin.com/in/bradleespeice/) diff --git a/_posts/2018-05-28-hello.md b/_posts/2018-05-28-hello.md deleted file mode 100644 index f7c76c7..0000000 --- a/_posts/2018-05-28-hello.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -layout: post -title: "Hello!" -description: "" -category: -tags: [] ---- - -I'll do what I can to keep this short, there's plenty of other things we both should be doing right -now. - -If you're here for the bread pics, and to marvel in some other culinary side projects, I've got you -covered: - -![Saturday Bread]({{ "/assets/images/2018-05-28-bread.jpg" | absolute_url }}) - -And no, I'm not posting pictures of earlier attempts that ended up turning into rocks in the oven. - -Okay, just one: - -![Bread as rock]({{ "/assets/images/2018-05-28-rocks.jpg" | absolute_url }}) - -If you're here for keeping up with the man Bradlee Speice, got plenty of that too. Plus some -up-coming super-nerdy posts about how I'm changing the world. - -And if you're not here for those things: don't have a lot for you, sorry. But you're welcome to let -me know what needs to change. - -I'm looking forward to making this a place to talk about what's going on in life, I hope you'll -stick it out with me. The best way to follow what's going on is on my [About](/about/) page, but if -you want the joy of clicking links, here's a few good ones: - -- Email (people still use this?): [bradlee@speice.io](mailto:bradlee@speice.io) -- Mastodon (nerd Twitter): [@bradlee](https://mastodon.social/@bradlee) -- Chat (RiotIM): [@bspeice:matrix.com](https://matrix.to/#/@bspeice:matrix.com) -- The comments section (not for people with sanity intact): ↓↓↓ - -Thanks, and keep it amazing. diff --git a/_posts/2018-06-25-dateutil-parser-to-rust.md b/_posts/2018-06-25-dateutil-parser-to-rust.md deleted file mode 100644 index 7646f28..0000000 --- a/_posts/2018-06-25-dateutil-parser-to-rust.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -layout: post -title: "What I Learned: Porting Dateutil Parser to Rust" -description: "" -category: -tags: [dtparse, rust] ---- - -Hi. I'm Bradlee. - -I've mostly been a lurker in Rust for a while, making a couple small contributions here and there. -So launching [dtparse](https://github.com/bspeice/dtparse) feels like nice step towards becoming a -functioning member of society. But not too much, because then you know people start asking you to -pay bills, and ain't nobody got time for that. - -But I built dtparse, and you can read about my thoughts on the process. Or don't. I won't tell you -what to do with your life (but you should totally keep reading). - -# Slow down, what? - -OK, fine, I guess I should start with _why_ someone would do this. - -[Dateutil](https://github.com/dateutil/dateutil) is a Python library for handling dates. The -standard library support for time in Python is kinda dope, but there are a lot of extras that go -into making it useful beyond just the [datetime](https://docs.python.org/3.6/library/datetime.html) -module. `dateutil.parser` specifically is code to take all the super-weird time formats people come -up with and turn them into something actually useful. - -Date/time parsing, it turns out, is just like everything else involving -[computers](https://infiniteundo.com/post/25326999628/falsehoods-programmers-believe-about-time) and -[time](https://infiniteundo.com/post/25509354022/more-falsehoods-programmers-believe-about-time): it -feels like it shouldn't be that difficult to do, until you try to do it, and you realize that people -suck and this is why -[we can't we have nice things](https://zachholman.com/talk/utc-is-enough-for-everyone-right). But -alas, we'll try and make contemporary art out of the rubble and give it a pretentious name like -_Time_. - -![A gravel mound](/assets/images/2018-06-25-gravel-mound.jpg) - -> [Time](https://www.goodfreephotos.com/united-states/montana/elkhorn/remains-of-the-mining-operation-elkhorn.jpg.php) - -What makes `dateutil.parser` great is that there's single function with a single argument that -drives what programmers interact with: -[`parse(timestr)`](https://github.com/dateutil/dateutil/blob/6dde5d6298cfb81a4c594a38439462799ed2aef2/dateutil/parser/_parser.py#L1258). -It takes in the time as a string, and gives you back a reasonable "look, this is the best anyone can -possibly do to make sense of your input" value. It doesn't expect much of you. - -[And now it's in Rust.](https://github.com/bspeice/dtparse/blob/7d565d3a78876dbebd9711c9720364fe9eba7915/src/lib.rs#L1332) - -# Lost in Translation - -Having worked at a bulge-bracket bank watching Java programmers try to be Python programmers, I'm -admittedly hesitant to publish Python code that's trying to be Rust. Interestingly, Rust code can -actually do a great job of mimicking Python. It's certainly not idiomatic Rust, but I've had better -experiences than -[this guy](https://webcache.googleusercontent.com/search?q=cache:wkYMpktJtnUJ:https://jackstouffer.com/blog/porting_dateutil.html+&cd=3&hl=en&ct=clnk&gl=us) -who attempted the same thing for D. These are the actual take-aways: - -When transcribing code, **stay as close to the original library as possible**. I'm talking about -using the same variable names, same access patterns, the whole shebang. It's way too easy to make a -couple of typos, and all of a sudden your code blows up in new and exciting ways. Having a reference -manual for verbatim what your code should be means that you don't spend that long debugging -complicated logic, you're more looking for typos. - -Also, **don't use nice Rust things like enums**. While -[one time it worked out OK for me](https://github.com/bspeice/dtparse/blob/7d565d3a78876dbebd9711c9720364fe9eba7915/src/lib.rs#L88-L94), -I also managed to shoot myself in the foot a couple times because `dateutil` stores AM/PM as a -boolean and I mixed up which was true, and which was false (side note: AM is false, PM is true). In -general, writing nice code _should not be a first-pass priority_ when you're just trying to recreate -the same functionality. - -**Exceptions are a pain.** Make peace with it. Python code is just allowed to skip stack frames. So -when a co-worker told me "Rust is getting try-catch syntax" I properly freaked out. Turns out -[he's not quite right](https://github.com/rust-lang/rfcs/pull/243), and I'm OK with that. And while -`dateutil` is pretty well-behaved about not skipping multiple stack frames, -[130-line try-catch blocks](https://github.com/dateutil/dateutil/blob/16561fc99361979e88cccbd135393b06b1af7e90/dateutil/parser/_parser.py#L730-L865) -take a while to verify. - -As another Python quirk, **be very careful about -[long nested if-elif-else blocks](https://github.com/dateutil/dateutil/blob/16561fc99361979e88cccbd135393b06b1af7e90/dateutil/parser/_parser.py#L494-L568)**. -I used to think that Python's whitespace was just there to get you to format your code correctly. I -think that no longer. It's way too easy to close a block too early and have incredibly weird issues -in the logic. Make sure you use an editor that displays indentation levels so you can keep things -straight. - -**Rust macros are not free.** I originally had the -[main test body](https://github.com/bspeice/dtparse/blob/b0e737f088eca8e83ab4244c6621a2797d247697/tests/compat.rs#L63-L217) -wrapped up in a macro using [pyo3](https://github.com/PyO3/PyO3). It took two minutes to compile. -After -[moving things to a function](https://github.com/bspeice/dtparse/blob/e017018295c670e4b6c6ee1cfff00dbb233db47d/tests/compat.rs#L76-L205) -compile times dropped down to ~5 seconds. Turns out 150 lines \* 100 tests = a lot of redundant code -to be compiled. My new rule of thumb is that any macros longer than 10-15 lines are actually -functions that need to be liberated, man. - -Finally, **I really miss list comprehensions and dictionary comprehensions.** As a quick comparison, -see -[this dateutil code](https://github.com/dateutil/dateutil/blob/16561fc99361979e88cccbd135393b06b1af7e90/dateutil/parser/_parser.py#L476) -and -[the implementation in Rust](https://github.com/bspeice/dtparse/blob/7d565d3a78876dbebd9711c9720364fe9eba7915/src/lib.rs#L619-L629). -I probably wrote it wrong, and I'm sorry. Ultimately though, I hope that these comprehensions can be -added through macros or syntax extensions. Either way, they're expressive, save typing, and are -super-readable. Let's get more of that. - -# Using a young language - -Now, Rust is exciting and new, which means that there's opportunity to make a substantive impact. On -more than one occasion though, I've had issues navigating the Rust ecosystem. - -What I'll call the "canonical library" is still being built. In Python, if you need datetime -parsing, you use `dateutil`. If you want `decimal` types, it's already in the -[standard library](https://docs.python.org/3.6/library/decimal.html). While I might've gotten away -with `f64`, `dateutil` uses decimals, and I wanted to follow the principle of **staying as close to -the original library as possible**. Thus began my quest to find a decimal library in Rust. What I -quickly found was summarized in a comment: - -> Writing a BigDecimal is easy. Writing a _good_ BigDecimal is hard. -> -> [-cmr](https://github.com/rust-lang/rust/issues/8937#issuecomment-34582794) - -In practice, this means that there are at least [4](https://crates.io/crates/bigdecimal) -[different](https://crates.io/crates/rust_decimal) -[implementations](https://crates.io/crates/decimal) [available](https://crates.io/crates/decimate). -And that's a lot of decisions to worry about when all I'm thinking is "why can't -[calendar reform](https://en.wikipedia.org/wiki/Calendar_reform) be a thing" and I'm forced to dig -through a [couple](https://github.com/rust-lang/rust/issues/8937#issuecomment-31661916) -[different](https://github.com/rust-lang/rfcs/issues/334) -[threads](https://github.com/rust-num/num/issues/8) to figure out if the library I'm look at is dead -or just stable. - -And even when the "canonical library" exists, there's no guarantees that it will be well-maintained. -[Chrono](https://github.com/chronotope/chrono) is the _de facto_ date/time library in Rust, and just -released version 0.4.4 like two days ago. Meanwhile, -[chrono-tz](https://github.com/chronotope/chrono-tz) appears to be dead in the water even though -[there are people happy to help maintain it](https://github.com/chronotope/chrono-tz/issues/19). I -know relatively little about it, but it appears that most of the release process is automated; -keeping that up to date should be a no-brainer. - -## Trial Maintenance Policy - -Specifically given "maintenance" being an -[oft-discussed](https://www.reddit.com/r/rust/comments/48540g/thoughts_on_initiators_vs_maintainers/) -issue, I'm going to try out the following policy to keep things moving on `dtparse`: - -1. Issues/PRs needing _maintainer_ feedback will be updated at least weekly. I want to make sure - nobody's blocking on me. - -2. To keep issues/PRs needing _contributor_ feedback moving, I'm going to (kindly) ask the - contributor to check in after two weeks, and close the issue without resolution if I hear nothing - back after a month. - -The second point I think has the potential to be a bit controversial, so I'm happy to receive -feedback on that. And if a contributor responds with "hey, still working on it, had a kid and I'm -running on 30 seconds of sleep a night," then first: congratulations on sustaining human life. And -second: I don't mind keeping those requests going indefinitely. I just want to try and balance -keeping things moving with giving people the necessary time they need. - -I should also note that I'm still getting some best practices in place - CONTRIBUTING and -CONTRIBUTORS files need to be added, as well as issue/PR templates. In progress. None of us are -perfect. - -# Roadmap and Conclusion - -So if I've now built a `dateutil`-compatible parser, we're done, right? Of course not! That's not -nearly ambitious enough. - -Ultimately, I'd love to have a library that's capable of parsing everything the Linux `date` command -can do (and not `date` on OSX, because seriously, BSD coreutils are the worst). I know Rust has a -coreutils rewrite going on, and `dtparse` would potentially be an interesting candidate since it -doesn't bring in a lot of extra dependencies. [`humantime`](https://crates.io/crates/humantime) -could help pick up some of the (current) slack in dtparse, so maybe we can share and care with each -other? - -All in all, I'm mostly hoping that nobody's already done this and I haven't spent a bit over a month -on redundant code. So if it exists, tell me. I need to know, but be nice about it, because I'm going -to take it hard. - -And in the mean time, I'm looking forward to building more. Onwards. diff --git a/_posts/2018-09-01-primitives-in-rust-are-weird.md b/_posts/2018-09-01-primitives-in-rust-are-weird.md deleted file mode 100644 index bcc8ae8..0000000 --- a/_posts/2018-09-01-primitives-in-rust-are-weird.md +++ /dev/null @@ -1,323 +0,0 @@ ---- -layout: post -title: "Primitives in Rust are Weird (and Cool)" -description: "but mostly weird." -category: -tags: [rust, c, java, python, x86] ---- - -I wrote a really small Rust program a while back because I was curious. I was 100% convinced it -couldn't possibly run: - -```rust -fn main() { - println!("{}", 8.to_string()) -} -``` - -And to my complete befuddlement, it compiled, ran, and produced a completely sensible output. The -reason I was so surprised has to do with how Rust treats a special category of things I'm going to -call _primitives_. In the current version of the Rust book, you'll see them referred to as -[scalars][rust_scalar], and in older versions they'll be called [primitives][rust_primitive], but -we're going to stick with the name _primitive_ for the time being. Explaining why this program is so -cool requires talking about a number of other programming languages, and keeping a consistent -terminology makes things easier. - -**You've been warned:** this is going to be a tedious post about a relatively minor issue that -involves Java, Python, C, and x86 Assembly. And also me pretending like I know what I'm talking -about with assembly. - -# Defining primitives (Java) - -The reason I'm using the name _primitive_ comes from how much of my life is Java right now. Spoiler -alert: a lot of it. And for the most part I like Java, but I digress. In Java, there's a special -name for some specific types of values: - -> ``` -> bool char byte -> short int long -> float double -> ``` - -```` - -They are referred to as [primitives][java_primitive]. And relative to the other bits of Java, -they have two unique features. First, they don't have to worry about the -[billion-dollar mistake](https://en.wikipedia.org/wiki/Tony_Hoare#Apologies_and_retractions); -primitives in Java can never be `null`. Second: *they can't have instance methods*. -Remember that Rust program from earlier? Java has no idea what to do with it: - -```java -class Main { - public static void main(String[] args) { - int x = 8; - System.out.println(x.toString()); // Triggers a compiler error - } -} -```` - -The error is: - -``` -Main.java:5: error: int cannot be dereferenced - System.out.println(x.toString()); - ^ -1 error -``` - -Specifically, Java's [`Object`](https://docs.oracle.com/javase/10/docs/api/java/lang/Object.html) -and things that inherit from it are pointers under the hood, and we have to dereference them before -the fields and methods they define can be used. In contrast, _primitive types are just values_ - -there's nothing to be dereferenced. In memory, they're just a sequence of bits. - -If we really want, we can turn the `int` into an -[`Integer`](https://docs.oracle.com/javase/10/docs/api/java/lang/Integer.html) and then dereference -it, but it's a bit wasteful: - -```java -class Main { - public static void main(String[] args) { - int x = 8; - Integer y = Integer.valueOf(x); - System.out.println(y.toString()); - } -} -``` - -This creates the variable `y` of type `Integer` (which inherits `Object`), and at run time we -dereference `y` to locate the `toString()` function and call it. Rust obviously handles things a bit -differently, but we have to dig into the low-level details to see it in action. - -# Low Level Handling of Primitives (C) - -We first need to build a foundation for reading and understanding the assembly code the final answer -requires. Let's begin with showing how the `C` language (and your computer) thinks about "primitive" -values in memory: - -```c -void my_function(int num) {} - -int main() { - int x = 8; - my_function(x); -} -``` - -The [compiler explorer](https://godbolt.org/z/lgNYcc) gives us an easy way of showing off the -assembly-level code that's generated: whose output has been lightly -edited - -```nasm -main: - push rbp - mov rbp, rsp - sub rsp, 16 - - ; We assign the value `8` to `x` here - mov DWORD PTR [rbp-4], 8 - - ; And copy the bits making up `x` to a location - ; `my_function` can access (`edi`) - mov eax, DWORD PTR [rbp-4] - mov edi, eax - - ; Call `my_function` and give it control - call my_function - - mov eax, 0 - leave - ret - -my_function: - push rbp - mov rbp, rsp - - ; Copy the bits out of the pre-determined location (`edi`) - ; to somewhere we can use - mov DWORD PTR [rbp-4], edi - nop - - pop rbp - ret -``` - -At a really low level of memory, we're copying bits around using the [`mov`][x86_guide] instruction; -nothing crazy. But to show how similar Rust is, let's take a look at our program translated from C -to Rust: - -```rust -fn my_function(x: i32) {} - -fn main() { - let x = 8; - my_function(x) -} -``` - -And the assembly generated when we stick it in the -[compiler explorer](https://godbolt.org/z/cAlmk0): again, lightly -edited - -```nasm -example::main: - push rax - - ; Look familiar? We're copying bits to a location for `my_function` - ; The compiler just optimizes out holding `x` in memory - mov edi, 8 - - ; Call `my_function` and give it control - call example::my_function - - pop rax - ret - -example::my_function: - sub rsp, 4 - - ; And copying those bits again, just like in C - mov dword ptr [rsp], edi - - add rsp, 4 - ret -``` - -The generated Rust assembly is functionally pretty close to the C assembly: _When working with -primitives, we're just dealing with bits in memory_. - -In Java we have to dereference a pointer to call its functions; in Rust, there's no pointer to -dereference. So what exactly is going on with this `.to_string()` function call? - -# impl primitive (and Python) - -Now it's time to reveal my trap card show the revelation that tied all this -together: _Rust has implementations for its primitive types._ That's right, `impl` blocks aren't -only for `structs` and `traits`, primitives get them too. Don't believe me? Check out -[u32](https://doc.rust-lang.org/std/primitive.u32.html), -[f64](https://doc.rust-lang.org/std/primitive.f64.html) and -[char](https://doc.rust-lang.org/std/primitive.char.html) as examples. - -But the really interesting bit is how Rust turns those `impl` blocks into assembly. Let's break out -the [compiler explorer](https://godbolt.org/z/6LBEwq) once again: - -```rust -pub fn main() { - 8.to_string() -} -``` - -And the interesting bits in the assembly: heavily trimmed down - -```nasm -example::main: - sub rsp, 24 - mov rdi, rsp - lea rax, [rip + .Lbyte_str.u] - mov rsi, rax - - ; Cool stuff right here - call ::to_string@PLT - - mov rdi, rsp - call core::ptr::drop_in_place - add rsp, 24 - ret -``` - -Now, this assembly is a bit more complicated, but here's the big revelation: **we're calling -`to_string()` as a function that exists all on its own, and giving it the instance of `8`**. Instead -of thinking of the value 8 as an instance of `u32` and then peeking in to find the location of the -function we want to call (like Java), we have a function that exists outside of the instance and -just give that function the value `8`. - -This is an incredibly technical detail, but the interesting idea I had was this: _if `to_string()` -is a static function, can I refer to the unbound function and give it an instance?_ - -Better explained in code (and a [compiler explorer](https://godbolt.org/z/fJY-gA) link because I -seriously love this thing): - -```rust -struct MyVal { - x: u32 -} - -impl MyVal { - fn to_string(&self) -> String { - self.x.to_string() - } -} - -pub fn main() { - let my_val = MyVal { x: 8 }; - - // THESE ARE THE SAME - my_val.to_string(); - MyVal::to_string(&my_val); -} -``` - -Rust is totally fine "binding" the function call to the instance, and also as a static. - -MIND == BLOWN. - -Python does the same thing where I can both call functions bound to their instances and also call as -an unbound function where I give it the instance: - -```python -class MyClass(): - x = 24 - - def my_function(self): - print(self.x) - -m = MyClass() - -m.my_function() -MyClass.my_function(m) -``` - -And Python tries to make you _think_ that primitives can have instance methods... - -```python ->>> dir(8) -['__abs__', '__add__', '__and__', '__class__', '__cmp__', '__coerce__', -'__delattr__', '__div__', '__divmod__', '__doc__', '__float__', '__floordiv__', -... -'__setattr__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__truediv__', -...] - ->>> # Theoretically `8.__str__()` should exist, but: - ->>> 8.__str__() - File "", line 1 - 8.__str__() - ^ -SyntaxError: invalid syntax - ->>> # It will run if we assign it first though: ->>> x = 8 ->>> x.__str__() -'8' -``` - -...but in practice it's a bit complicated. - -So while Python handles binding instance methods in a way similar to Rust, it's still not able to -run the example we started with. - -# Conclusion - -This was a super-roundabout way of demonstrating it, but the way Rust handles incredibly minor -details like primitives leads to really cool effects. Primitives are optimized like C in how they -have a space-efficient memory layout, yet the language still has a lot of features I enjoy in Python -(like both instance and late binding). - -And when you put it together, there are areas where Rust does cool things nobody else can; as a -quirky feature of Rust's type system, `8.to_string()` is actually valid code. - -Now go forth and fool your friends into thinking you know assembly. This is all I've got. - -[x86_guide]: http://www.cs.virginia.edu/~evans/cs216/guides/x86.html -[java_primitive]: https://docs.oracle.com/javase/tutorial/java/nutsandbolts/datatypes.html -[rust_scalar]: https://doc.rust-lang.org/book/second-edition/ch03-02-data-types.html#scalar-types -[rust_primitive]: https://doc.rust-lang.org/book/first-edition/primitive-types.html diff --git a/_posts/2018-09-15-isomorphic-apps.md b/_posts/2018-09-15-isomorphic-apps.md deleted file mode 100644 index abc0dcb..0000000 --- a/_posts/2018-09-15-isomorphic-apps.md +++ /dev/null @@ -1,294 +0,0 @@ ---- -layout: post -title: "Isomorphic Desktop Apps with Rust" -description: "Electron + WASM = ☣" -category: -tags: [rust, javascript, webassembly] ---- - -Forgive me, but this is going to be a bit of a schizophrenic post. I both despise Javascript and the -modern ECMAScript ecosystem, and I'm stunned by its success doing some really cool things. It's -[this duality](https://www.destroyallsoftware.com/talks/the-birth-and-death-of-javascript) that's -led me to a couple of (very) late nights over the past weeks trying to reconcile myself as I -bootstrap a simple desktop application. - -See, as much as -[Webassembly isn't trying to replace Javascript](https://webassembly.org/docs/faq/#is-webassembly-trying-to-replace-javascript), -**I want Javascript gone**. There are plenty of people who don't share my views, and they are -probably nicer and more fun at parties. But I cringe every time "Webpack" is mentioned, and I think -it's hilarious that the -[language specification](https://ecma-international.org/publications/standards/Ecma-402.htm) -dramatically outpaces anyone's -[actual implementation](https://kangax.github.io/compat-table/es2016plus/). The answer to this -conundrum is of course to recompile code from newer versions of the language to older versions _of -the same language_ before running. At least [Babel] is a nice tongue-in-cheek reference. - -Yet for as much hate as [Electron] receives, it does a stunningly good job at solving a really hard -problem: _how the hell do I put a button on the screen and react when the user clicks it_? GUI -programming is hard, straight up. But if browsers are already able to run everywhere, why don't we -take advantage of someone else solving the hard problems for us? I don't like that I have to use -Javascript for it, but I really don't feel inclined to whip out good ol' [wxWidgets]. - -Now there are other native solutions ([libui-rs], [conrod], [oh hey wxWdidgets again!][wxrust]), but -those also have their own issues with distribution, styling, etc. With Electron, I can -`yarn create electron-app my-app` and just get going, knowing that packaging/upgrades/etc. are built -in. - -My question is: given recent innovations with WASM, _are we Electron yet_? - -No, not really. - -Instead, **what would it take to get to a point where we can skip Javascript in Electron apps?** - -# Setting the Stage - -Truth is, WASM/Webassembly is a pretty new technology and I'm a total beginner in this area. There -may already be solutions to the issues I discuss, but I'm totally unaware of them, so I'm going to -try and organize what I did manage to discover. - -I should also mention that the content and things I'm talking about here are not intended to be -prescriptive, but more "if someone else is interested, what do we already know doesn't work?" _I -expect everything in this post to be obsolete within two months._ Even over the course of writing -this, [a separate blog post](https://mnt.io/2018/08/28/from-rust-to-beyond-the-asm-js-galaxy/) had -to be modified because [upstream changes](https://github.com/WebAssembly/binaryen/pull/1642) broke a -[Rust tool](https://github.com/rustwasm/wasm-bindgen/pull/787) the post tried to use. The post -ultimately -[got updated](https://mnt.io/2018/08/28/from-rust-to-beyond-the-asm-js-galaxy/#comment-477), **but -all this happened within the span of a week.** Things are moving quickly. - -I'll also note that we're going to skip [asm.js] and [emscripten]. Truth be told, I couldn't get -either of these to output anything, and so I'm just going to say -[here be dragons.](https://en.wikipedia.org/wiki/Here_be_dragons) Everything I'm discussing here -uses the `wasm32-unknown-unknown` target. - -The code that I _did_ get running is available -[over here](https://github.com/speice-io/isomorphic-rust). Feel free to use it as a starting point, -but I'm mostly including the link as a reference for the things that were attempted. - -# An Example Running Application - -So, I did _technically_ get a running application: - -![Electron app using WASM](/assets/images/2018-09-15-electron-percy-wasm.png) - -...which you can also try out if you want: - -```sh -git clone https://github.com/speice-io/isomorphic-rust.git -cd isomorphic_rust/percy -yarn install && yarn start -``` - -...but I wouldn't really call it a "high quality" starting point to base future work on. It's mostly -there to prove this is possible in the first place. And that's something to be proud of! There's a -huge amount of engineering that went into showing a window with the text "It's alive!". - -There's also a lot of usability issues that prevent me from recommending anyone try Electron and -WASM apps at the moment, and I think that's the more important thing to discuss. - -# Issue the First: Complicated Toolchains - -I quickly established that [wasm-bindgen] was necessary to "link" my Rust code to Javascript. At -that point you've got an Electron app that starts an HTML page which ultimately fetches your WASM -blob. To keep things simple, the goal was to package everything using [webpack] so that I could just -load a `bundle.js` file on the page. That decision was to be the last thing that kinda worked in -this process. - -The first issue -[I ran into](https://www.reddit.com/r/rust/comments/98lpun/unable_to_load_wasm_for_electron_application/) -while attempting to bundle everything via `webpack` is a detail in the WASM spec: - -> This function accepts a Response object, or a promise for one, and ... **[if > it] does not match -> the `application/wasm` MIME type**, the returned promise will be rejected with a TypeError; -> -> [WebAssembly - Additional Web Embedding API](https://webassembly.org/docs/web/#additional-web-embedding-api) - -Specifically, if you try and load a WASM blob without the MIME type set, you'll get an error. On the -web this isn't a huge issue, as the server can set MIME types when delivering the blob. With -Electron, you're resolving things with a `file://` URL and thus can't control the MIME type: - -![TypeError: Incorrect response MIME type. Expected 'application/wasm'.](/assets/images/2018-09-15-incorrect-MIME-type.png) - -There are a couple of solutions depending on how far into the deep end you care to venture: - -- Embed a static file server in your Electron application -- Use a [custom protocol](https://electronjs.org/docs/api/protocol) and custom protocol handler -- Host your WASM blob on a website that you resolve at runtime - -But all these are pretty bad solutions and defeat the purpose of using WASM in the first place. -Instead, my workaround was to -[open a PR with `webpack`](https://github.com/webpack/webpack/issues/7918) and use regex to remove -calls to `instantiateStreaming` in the -[build script](https://github.com/speice-io/isomorphic-rust/blob/master/percy/build.sh#L21-L25): - -```sh -cargo +nightly build --target=wasm32-unknown-unknown && \ - wasm-bindgen "$WASM_DIR/debug/$WASM_NAME.wasm" --out-dir "$APP_DIR" --no-typescript && \ - # Have to use --mode=development so we can patch out the call to instantiateStreaming - "$DIR/node_modules/webpack-cli/bin/cli.js" --mode=development "$APP_DIR/app_loader.js" -o "$APP_DIR/bundle.js" && \ - sed -i 's/.*instantiateStreaming.*//g' "$APP_DIR/bundle.js" -``` - -Once that lands, the -[build process](https://github.com/speice-io/isomorphic-rust/blob/master/percy_patched_webpack/build.sh#L24-L27) -becomes much simpler: - -```sh - -cargo +nightly build --target=wasm32-unknown-unknown && \ - wasm-bindgen "$WASM_DIR/debug/$WASM_NAME.wasm" --out-dir "$APP_DIR" --no-typescript && \ - "$DIR/node_modules/webpack-cli/bin/cli.js" --mode=production "$APP_DIR/app_loader.js" -o "$APP_DIR/bundle.js" -``` - -But we're not done yet! After we compile Rust into WASM and link WASM to Javascript (via -`wasm-bindgen` and `webpack`), we still have to make an Electron app. For this purpose I used a -starter app from [Electron Forge], and then a -[`prestart` script](https://github.com/speice-io/isomorphic-rust/blob/master/percy/package.json#L8) -to actually handle starting the application. - -The -[final toolchain](https://github.com/speice-io/isomorphic-rust/blob/master/percy/package.json#L8) -looks something like this: - -- `yarn start` triggers the `prestart` script -- `prestart` checks for missing tools (`wasm-bindgen-cli`, etc.) and then: - - Uses `cargo` to compile the Rust code into WASM - - Uses `wasm-bindgen` to link the WASM blob into a Javascript file with exported symbols - - Uses `webpack` to bundle the page start script with the Javascript we just generated - - Uses `babel` under the hood to compile the `wasm-bindgen` code down from ES6 into something - browser-compatible -- The `start` script runs an Electron Forge handler to do some sanity checks -- Electron actually starts - -...which is complicated. I think more work needs to be done to either build a high-quality starter -app that can manage these steps, or another tool that "just handles" the complexity of linking a -compiled WASM file into something the Electron browser can run. - -# Issue the Second: WASM tools in Rust - -For as much as I didn't enjoy the Javascript tooling needed to interface with Rust, the Rust-only -bits aren't any better at the moment. I get it, a lot of projects are just starting off, and that -leads to a fragmented ecosystem. Here's what I can recommend as a starting point: - -Don't check in your `Cargo.lock` files to version control. If there's a disagreement between the -version of `wasm-bindgen-cli` you have installed and the `wasm-bindgen` you're compiling with in -`Cargo.lock`, you get a nasty error: - -``` -it looks like the Rust project used to create this wasm file was linked against -a different version of wasm-bindgen than this binary: - -rust wasm file: 0.2.21 - this binary: 0.2.17 - -Currently the bindgen format is unstable enough that these two version must -exactly match, so it's required that these two version are kept in sync by -either updating the wasm-bindgen dependency or this binary. -``` - -Not that I ever managed to run into this myself (_coughs nervously_). - -There are two projects attempting to be "application frameworks": [percy] and [yew]. Between those, -I managed to get [two](https://github.com/speice-io/isomorphic-rust/tree/master/percy) -[examples](https://github.com/speice-io/isomorphic-rust/tree/master/percy_patched_webpack) running -using `percy`, but was unable to get an -[example](https://github.com/speice-io/isomorphic-rust/tree/master/yew) running with `yew` because -of issues with "missing modules" during the `webpack` step: - -```sh -ERROR in ./dist/electron_yew_wasm_bg.wasm -Module not found: Error: Can't resolve 'env' in '/home/bspeice/Development/isomorphic_rust/yew/dist' - @ ./dist/electron_yew_wasm_bg.wasm - @ ./dist/electron_yew_wasm.js - @ ./dist/app.js - @ ./dist/app_loader.js -``` - -If you want to work with the browser APIs directly, your choices are [percy-webapis] or [stdweb] (or -eventually [web-sys]). See above for my `percy` examples, but when I tried -[an example with `stdweb`](https://github.com/speice-io/isomorphic-rust/tree/master/stdweb), I was -unable to get it running: - -```sh -ERROR in ./dist/stdweb_electron_bg.wasm -Module not found: Error: Can't resolve 'env' in '/home/bspeice/Development/isomorphic_rust/stdweb/dist' - @ ./dist/stdweb_electron_bg.wasm - @ ./dist/stdweb_electron.js - @ ./dist/app_loader.js -``` - -At this point I'm pretty convinced that `stdweb` is causing issues for `yew` as well, but can't -prove it. - -I did also get a [minimal example](https://github.com/speice-io/isomorphic-rust/tree/master/minimal) -running that doesn't depend on any tools besides `wasm-bindgen`. However, it requires manually -writing "`extern C`" blocks for everything you need from the browser. Es no bueno. - -Finally, from a tools and platform view, there are two up-and-coming packages that should be -mentioned: [js-sys] and [web-sys]. Their purpose is to be fundamental building blocks that exposes -the browser's APIs to Rust. If you're interested in building an app framework from scratch, these -should give you the most flexibility. I didn't touch either in my research, though I expect them to -be essential long-term. - -So there's a lot in play from the Rust side of things, and it's just going to take some time to -figure out what works and what doesn't. - -# Issue the Third: Known Unknowns - -Alright, so after I managed to get an application started, I stopped there. It was a good deal of -effort to chain together even a proof of concept, and at this point I'd rather learn [Typescript] -than keep trying to maintain an incredibly brittle pipeline. Blasphemy, I know... - -The important point I want to make is that there's a lot unknown about how any of this holds up -outside proofs of concept. Things I didn't attempt: - -- Testing -- Packaging -- Updates -- Literally anything related to why I wanted to use Electron in the first place - -# What it Would Take - -Much as I don't like Javascript, the tools are too shaky for me to recommend mixing Electron and -WASM at the moment. There's a lot of innovation happening, so who knows? Someone might have an -application in production a couple months from now. But at the moment, I'm personally going to stay -away. - -Let's finish with a wishlist then - here are the things that I think need to happen before -Electron/WASM/Rust can become a thing: - -- Webpack still needs some updates. The necessary work is in progress, but hasn't landed yet - ([#7983](https://github.com/webpack/webpack/pull/7983)) -- Browser API libraries (`web-sys` and `stdweb`) need to make sure they can support running in - Electron (see module error above) -- Projects need to stabilize. There's talk of `stdweb` being turned into a Rust API - [on top of web-sys](https://github.com/rustwasm/team/issues/226#issuecomment-418475778), and percy - [moving to web-sys](https://github.com/chinedufn/percy/issues/24), both of which are big changes -- `wasm-bindgen` is great, but still in the "move fast and break things" phase -- A good "boilerplate" app would dramatically simplify the start-up costs; - [electron-react-boilerplate](https://github.com/chentsulin/electron-react-boilerplate) comes to - mind as a good project to imitate -- More blog posts/contributors! I think Electron + Rust could be cool, but I have no idea what I'm - doing - -[wxwidgets]: https://wxwidgets.org/ -[libui-rs]: https://github.com/LeoTindall/libui-rs/ -[electron]: https://electronjs.org/ -[babel]: https://babeljs.io/ -[wxrust]: https://github.com/kenz-gelsoft/wxRust -[wasm-bindgen]: https://github.com/rustwasm/wasm-bindgen -[js-sys]: https://crates.io/crates/js-sys -[percy-webapis]: https://crates.io/crates/percy-webapis -[stdweb]: https://crates.io/crates/stdweb -[web-sys]: https://crates.io/crates/web-sys -[percy]: https://chinedufn.github.io/percy/ -[virtual-dom-rs]: https://crates.io/crates/virtual-dom-rs -[yew]: https://github.com/DenisKolodin/yew -[react]: https://reactjs.org/ -[elm]: http://elm-lang.org/ -[asm.js]: http://asmjs.org/ -[emscripten]: https://kripken.github.io/emscripten-site/ -[typescript]: https://www.typescriptlang.org/ -[electron forge]: https://electronforge.io/ -[conrod]: https://github.com/PistonDevelopers/conrod -[webpack]: https://webpack.js.org/ diff --git a/_posts/2018-10-08-case-study-optimization.md b/_posts/2018-10-08-case-study-optimization.md deleted file mode 100644 index 00f24ae..0000000 --- a/_posts/2018-10-08-case-study-optimization.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -layout: post -title: "A Case Study in Heaptrack" -description: "...because you don't need no garbage collection" -category: -tags: [] ---- - -One of my earliest conversations about programming went like this: - -> Programmers have it too easy these days. They should learn to develop in low memory environments -> and be more efficient. -> -> -- My Father (paraphrased) - -...though it's not like the first code I wrote was for a -[graphing calculator](https://education.ti.com/en/products/calculators/graphing-calculators/ti-84-plus-se) -packing a whole 24KB of RAM. By the way, _what are you doing on my lawn?_ - -The principle remains though: be efficient with the resources you have, because -[what Intel giveth, Microsoft taketh away](http://exo-blog.blogspot.com/2007/09/what-intel-giveth-microsoft-taketh-away.html). -My professional work is focused on this kind of efficiency; low-latency financial markets demand -that you understand at a deep level _exactly_ what your code is doing. As I continue experimenting -with Rust for personal projects, it's exciting to bring a utilitarian mindset with me: there's -flexibility for the times I pretend to have a garbage collector, and flexibility for the times that -I really care about how memory is used. - -This post is a (small) case study in how I went from the former to the latter. And ultimately, it's -intended to be a starting toolkit to empower analysis of your own code. - -# Curiosity - -When I first started building the [dtparse] crate, my intention was to mirror as closely as possible -the equivalent [Python library][dateutil]. Python, as you may know, is garbage collected. Very -rarely is memory usage considered in Python, and I likewise wasn't paying too much attention when -`dtparse` was first being built. - -This lackadaisical approach to memory works well enough, and I'm not planning on making `dtparse` -hyper-efficient. But every so often, I've wondered: "what exactly is going on in memory?" With the -advent of Rust 1.28 and the -[Global Allocator trait](https://doc.rust-lang.org/std/alloc/trait.GlobalAlloc.html), I had a really -great idea: _build a custom allocator that allows you to track your own allocations._ That way, you -can do things like writing tests for both correct results and correct memory usage. I gave it a -[shot][qadapt], but learned very quickly: **never write your own allocator**. It went from "fun -weekend project" to "I have literally no idea what my computer is doing" at breakneck speed. - -Instead, I'll highlight a separate path I took to make sense of my memory usage: [heaptrack]. - -# Turning on the System Allocator - -This is the hardest part of the post. Because Rust uses -[its own allocator](https://github.com/rust-lang/rust/pull/27400#issue-41256384) by default, -`heaptrack` is unable to properly record unmodified Rust code. To remedy this, we'll make use of the -`#[global_allocator]` attribute. - -Specifically, in `lib.rs` or `main.rs`, add this: - -```rust -use std::alloc::System; - -#[global_allocator] -static GLOBAL: System = System; -``` - -...and that's it. Everything else comes essentially for free. - -# Running heaptrack - -Assuming you've installed heaptrack (Homebrew in Mac, package manager -in Linux, ??? in Windows), all that's left is to fire up your application: - -``` -heaptrack my_application -``` - -It's that easy. After the program finishes, you'll see a file in your local directory with a name -like `heaptrack.my_appplication.XXXX.gz`. If you load that up in `heaptrack_gui`, you'll see -something like this: - -![heaptrack](/assets/images/2018-10-heaptrack/heaptrack-before.png) - ---- - -And even these pretty colors: - -![pretty colors](/assets/images/2018-10-heaptrack/heaptrack-flamegraph.png) - -# Reading Flamegraphs - -To make sense of our memory usage, we're going to focus on that last picture - it's called a -["flamegraph"](http://www.brendangregg.com/flamegraphs.html). These charts are typically used to -show how much time your program spends executing each function, but they're used here to show how -much memory was allocated during those functions instead. - -For example, we can see that all executions happened during the `main` function: - -![allocations in main](/assets/images/2018-10-heaptrack/heaptrack-main-colorized.png) - -...and within that, all allocations happened during `dtparse::parse`: - -![allocations in dtparse](/assets/images/2018-10-heaptrack/heaptrack-dtparse-colorized.png) - -...and within _that_, allocations happened in two different places: - -![allocations in parseinfo](/assets/images/2018-10-heaptrack/heaptrack-parseinfo-colorized.png) - -Now I apologize that it's hard to see, but there's one area specifically that stuck out as an issue: -**what the heck is the `Default` thing doing?** - -![pretty colors](/assets/images/2018-10-heaptrack/heaptrack-flamegraph-default.png) - -# Optimizing dtparse - -See, I knew that there were some allocations during calls to `dtparse::parse`, but I was totally -wrong about where the bulk of allocations occurred in my program. Let me post the code and see if -you can spot the mistake: - -```rust -/// Main entry point for using `dtparse`. -pub fn parse(timestr: &str) -> ParseResult<(NaiveDateTime, Option)> { - let res = Parser::default().parse( - timestr, None, None, false, false, - None, false, - &HashMap::new(), - )?; - - Ok((res.0, res.1)) -} -``` - -> [dtparse](https://github.com/bspeice/dtparse/blob/4d7c5dd99572823fa4a390b483c38ab020a2172f/src/lib.rs#L1286) - ---- - -Because `Parser::parse` requires a mutable reference to itself, I have to create a new -`Parser::default` every time it receives a string. This is excessive! We'd rather have an immutable -parser that can be re-used, and avoid allocating memory in the first place. - -Armed with that information, I put some time in to -[make the parser immutable](https://github.com/bspeice/dtparse/commit/741afa34517d6bc1155713bbc5d66905fea13fad#diff-b4aea3e418ccdb71239b96952d9cddb6). -Now that I can re-use the same parser over and over, the allocations disappear: - -![allocations cleaned up](/assets/images/2018-10-heaptrack/heaptrack-flamegraph-after.png) - -In total, we went from requiring 2 MB of memory in -[version 1.0.2](https://crates.io/crates/dtparse/1.0.2): - -![memory before](/assets/images/2018-10-heaptrack/heaptrack-closeup.png) - -All the way down to 300KB in [version 1.0.3](https://crates.io/crates/dtparse/1.0.3): - -![memory after](/assets/images/2018-10-heaptrack/heaptrack-closeup-after.png) - -# Conclusion - -In the end, you don't need to write a custom allocator to be efficient with memory, great tools -already exist to help you understand what your program is doing. - -**Use them.** - -Given that [Moore's Law](https://en.wikipedia.org/wiki/Moore%27s_law) is -[dead](https://www.technologyreview.com/s/601441/moores-law-is-dead-now-what/), we've all got to do -our part to take back what Microsoft stole. - -[dtparse]: https://crates.io/crates/dtparse -[dateutil]: https://github.com/dateutil/dateutil -[heaptrack]: https://github.com/KDE/heaptrack -[qadapt]: https://crates.io/crates/qadapt diff --git a/_posts/2018-12-04-what-small-business-really-means.md b/_posts/2018-12-04-what-small-business-really-means.md deleted file mode 100644 index dce374d..0000000 --- a/_posts/2018-12-04-what-small-business-really-means.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -layout: post -title: 'More "What Companies Really Mean"' -description: 'when they ask "Why should we hire you?"' -category: -tags: [] ---- - -I recently stumbled across a phenomenal small article entitled -[What Startups Really Mean By "Why Should We Hire You?"](https://angel.co/blog/what-startups-really-mean-by-why-should-we-hire-you). -Having been interviewed by smaller companies (though not exactly startups), the questions and -subtexts are the same. There's often a question behind the question that you're actually trying to -answer, and I wish I spotted the nuance earlier in my career. - -Let me also make note of one more question/euphemism I've come across: - -# How do you feel about Production Support? - -**Translation**: _We're a fairly small team, and when things break on an evening/weekend/Christmas -Day, can we call on you to be there?_ - -I've met decidedly few people in my life who truly enjoy the "ops" side of "devops". They're -incredibly good at taking an impossible problem, pre-existing knowledge of arcane arts, and turning -that into a functioning system at the end. And if they all left for lunch, we probably wouldn't make -it out the door before the zombie apocalypse. - -Larger organizations (in my experience, 500+ person organizations) have the luxury of hiring people -who either enjoy that, or play along nicely enough that our systems keep working. - -Small teams have no such luck. If you're interviewing at a small company, especially as a "data -scientist" or other somesuch position, be aware that systems can and do spontaneously combust at the -most inopportune moments. - -**Terrible-but-popular answers include**: _It's a part of the job, and I'm happy to contribute._ diff --git a/_posts/2018-12-15-allocation-safety.md b/_posts/2018-12-15-allocation-safety.md deleted file mode 100644 index 7892856..0000000 --- a/_posts/2018-12-15-allocation-safety.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -layout: post -title: "QADAPT - debug_assert! for your memory usage" -description: "...and why you want an allocator that goes 💥." -category: -tags: [] ---- - -I think it's part of the human condition to ignore perfectly good advice when it comes our way. A -bit over a month ago, I was dispensing sage wisdom for the ages: - -> I had a really great idea: build a custom allocator that allows you to track your own allocations. -> I gave it a shot, but learned very quickly: **never write your own allocator.** -> -> -- [me](/2018/10/case-study-optimization.html) - -I proceeded to ignore it, because we never really learn from our mistakes. - -There's another part of the human condition that derives joy from seeing things explode. - - - -And _that's_ the part I'm going to focus on. - -# Why an Allocator? - -So why, after complaining about allocators, would I still want to write one? There are three reasons -for that: - -1. Allocation/dropping is slow -2. It's difficult to know exactly when Rust will allocate or drop, especially when using code that - you did not write -3. I want automated tools to verify behavior, instead of inspecting by hand - -When I say "slow," it's important to define the terms. If you're writing web applications, you'll -spend orders of magnitude more time waiting for the database than you will the allocator. However, -there's still plenty of code where micro- or nano-seconds matter; think -[finance](https://www.youtube.com/watch?v=NH1Tta7purM), -[real-time audio](https://www.reddit.com/r/rust/comments/9hg7yj/synthesizer_progress_update/e6c291f), -[self-driving cars](https://polysync.io/blog/session-types-for-hearty-codecs/), and -[networking](https://carllerche.github.io/bytes/bytes/index.html). In these situations it's simply -unacceptable for you to spend time doing things that are not your program, and waiting on the -allocator is not cool. - -As I continue to learn Rust, it's difficult for me to predict where exactly allocations will happen. -So, I propose we play a quick trivia game: **Does this code invoke the allocator?** - -## Example 1 - -```rust -fn my_function() { - let v: Vec = Vec::new(); -} -``` - -**No**: Rust [knows how big](https://doc.rust-lang.org/std/mem/fn.size_of.html) the `Vec` type is, -and reserves a fixed amount of memory on the stack for the `v` vector. However, if we wanted to -reserve extra space (using `Vec::with_capacity`) the allocator would get invoked. - -## Example 2 - -```rust -fn my_function() { - let v: Box> = Box::new(Vec::new()); -} -``` - -**Yes**: Because Boxes allow us to work with things that are of unknown size, it has to allocate on -the heap. While the `Box` is unnecessary in this snippet (release builds will optimize out the -allocation), reserving heap space more generally is needed to pass a dynamically sized type to -another function. - -## Example 3 - -```rust -fn my_function(v: Vec) { - v.push(5); -} -``` - -**Maybe**: Depending on whether the Vector we were given has space available, we may or may not -allocate. Especially when dealing with code that you did not author, it's difficult to verify that -things behave as you expect them to. - -# Blowing Things Up - -So, how exactly does QADAPT solve these problems? **Whenever an allocation or drop occurs in code -marked allocation-safe, QADAPT triggers a thread panic.** We don't want to let the program continue -as if nothing strange happened, _we want things to explode_. - -However, you don't want code to panic in production because of circumstances you didn't predict. -Just like [`debug_assert!`](https://doc.rust-lang.org/std/macro.debug_assert.html), **QADAPT will -strip out its own code when building in release mode to guarantee no panics and no performance -impact.** - -Finally, there are three ways to have QADAPT check that your code will not invoke the allocator: - -## Using a procedural macro - -The easiest method, watch an entire function for allocator invocation: - -```rust -use qadapt::no_alloc; -use qadapt::QADAPT; - -#[global_allocator] -static Q: QADAPT = QADAPT; - -#[no_alloc] -fn push_vec(v: &mut Vec) { - // This triggers a panic if v.len() == v.capacity() - v.push(5); -} - -fn main() { - let v = Vec::with_capacity(1); - - // This will *not* trigger a panic - push_vec(&v); - - // This *will* trigger a panic - push_vec(&v); -} -``` - -## Using a regular macro - -For times when you need more precision: - -```rust -use qadapt::assert_no_alloc; -use qadapt::QADAPT; - -#[global_allocator] -static Q: QADAPT = QADAPT; - -fn main() { - let v = Vec::with_capacity(1); - - // No allocations here, we already have space reserved - assert_no_alloc!(v.push(5)); - - // Even though we remove an item, it doesn't trigger a drop - // because it's a scalar. If it were a `Box<_>` type, - // a drop would trigger. - assert_no_alloc!({ - v.pop().unwrap(); - }); -} -``` - -## Using function calls - -Both the most precise and most tedious: - -```rust -use qadapt::enter_protected; -use qadapt::exit_protected; -use qadapt::QADAPT; - -#[global_allocator] -static Q: QADAPT = QADAPT; - -fn main() { - // This triggers an allocation (on non-release builds) - let v = Vec::with_capacity(1); - - enter_protected(); - // This does not trigger an allocation because we've reserved size - v.push(0); - exit_protected(); - - // This triggers an allocation because we ran out of size, - // but doesn't panic because we're no longer protected. - v.push(1); -} -``` - -## Caveats - -It's important to point out that QADAPT code is synchronous, so please be careful when mixing in -asynchronous functions: - -```rust -use futures::future::Future; -use futures::future::ok; - -#[no_alloc] -fn async_capacity() -> impl Future, Error=()> { - ok(12).and_then(|e| Ok(Vec::with_capacity(e))) -} - -fn main() { - // This doesn't trigger a panic because the `and_then` closure - // wasn't run during the function call. - async_capacity(); - - // Still no panic - assert_no_alloc!(async_capacity()); - - // This will panic because the allocation happens during `unwrap` - // in the `assert_no_alloc!` macro - assert_no_alloc!(async_capacity().poll().unwrap()); -} -``` - -# Conclusion - -While there's a lot more to writing high-performance code than managing your usage of the allocator, -it's critical that you do use the allocator correctly. QADAPT will verify that your code is doing -what you expect. It's usable even on stable Rust from version 1.31 onward, which isn't the case for -most allocators. Version 1.0 was released today, and you can check it out over at -[crates.io](https://crates.io/crates/qadapt) or on [github](https://github.com/bspeice/qadapt). - -I'm hoping to write more about high-performance Rust in the future, and I expect that QADAPT will -help guide that. If there are topics you're interested in, let me know in the comments below! - -[qadapt]: https://crates.io/crates/qadapt diff --git a/_posts/2019-02-04-understanding-allocations-in-rust.md b/_posts/2019-02-04-understanding-allocations-in-rust.md deleted file mode 100644 index 48b9df6..0000000 --- a/_posts/2019-02-04-understanding-allocations-in-rust.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: post -title: "Allocations in Rust" -description: "An introduction to the memory model." -category: -tags: [rust, understanding-allocations] ---- - -There's an alchemy of distilling complex technical topics into articles and videos that change the -way programmers see the tools they interact with on a regular basis. I knew what a linker was, but -there's a staggering amount of complexity in between -[the OS and `main()`](https://www.youtube.com/watch?v=dOfucXtyEsU). Rust programmers use the -[`Box`](https://doc.rust-lang.org/stable/std/boxed/struct.Box.html) type all the time, but there's a -rich history of the Rust language itself wrapped up in -[how special it is](https://manishearth.github.io/blog/2017/01/10/rust-tidbits-box-is-special/). - -In a similar vein, this series attempts to look at code and understand how memory is used; the -complex choreography of operating system, compiler, and program that frees you to focus on -functionality far-flung from frivolous book-keeping. The Rust compiler relieves a great deal of the -cognitive burden associated with memory management, but we're going to step into its world for a -while. - -Let's learn a bit about memory in Rust. - -# Table of Contents - -This series is intended as both learning and reference material; we'll work through the different -memory types Rust uses, and explain the implications of each. Ultimately, a summary will be provided -as a cheat sheet for easy future reference. To that end, a table of contents is in order: - -- Foreword -- [Global Memory Usage: The Whole World](/2019/02/the-whole-world.html) -- [Fixed Memory: Stacking Up](/2019/02/stacking-up.html) -- [Dynamic Memory: A Heaping Helping](/2019/02/a-heaping-helping.html) -- [Compiler Optimizations: What It's Done For You Lately](/2019/02/compiler-optimizations.html) -- [Summary: What Are the Rules?](/2019/02/summary.html) - -# Foreword - -Rust's three defining features of -[Performance, Reliability, and Productivity](https://www.rust-lang.org/) are all driven to a great -degree by the how the Rust compiler understands memory usage. Unlike managed memory languages (Java, -Python), Rust -[doesn't really](https://words.steveklabnik.com/borrow-checking-escape-analysis-and-the-generational-hypothesis) -garbage collect; instead, it uses an -[ownership](https://doc.rust-lang.org/book/ch04-01-what-is-ownership.html) system to reason about -how long objects will last in your program. In some cases, if the life of an object is fairly -transient, Rust can make use of a very fast region called the "stack." When that's not possible, -Rust uses -[dynamic (heap) memory](https://en.wikipedia.org/wiki/Memory_management#Dynamic_memory_allocation) -and the ownership system to ensure you can't accidentally corrupt memory. It's not as fast, but it -is important to have available. - -That said, there are specific situations in Rust where you'd never need to worry about the -stack/heap distinction! If you: - -1. Never use `unsafe` -2. Never use `#![feature(alloc)]` or the [`alloc` crate](https://doc.rust-lang.org/alloc/index.html) - -...then it's not possible for you to use dynamic memory! - -For some uses of Rust, typically embedded devices, these constraints are OK. They have very limited -memory, and the program binary size itself may significantly affect what's available! There's no -operating system able to manage this -["virtual memory"](https://en.wikipedia.org/wiki/Virtual_memory) thing, but that's not an issue -because there's only one running application. The -[embedonomicon](https://docs.rust-embedded.org/embedonomicon/preface.html) is ever in mind, and -interacting with the "real world" through extra peripherals is accomplished by reading and writing -to [specific memory addresses](https://bob.cs.sonoma.edu/IntroCompOrg-RPi/sec-gpio-mem.html). - -Most Rust programs find these requirements overly burdensome though. C++ developers would struggle -without access to [`std::vector`](https://en.cppreference.com/w/cpp/container/vector) (except those -hardcore no-STL people), and Rust developers would struggle without -[`std::vec`](https://doc.rust-lang.org/std/vec/struct.Vec.html). But with the constraints above, -`std::vec` is actually a part of the -[`alloc` crate](https://doc.rust-lang.org/alloc/vec/struct.Vec.html), and thus off-limits. `Box`, -`Rc`, etc., are also unusable for the same reason. - -Whether writing code for embedded devices or not, the important thing in both situations is how much -you know _before your application starts_ about what its memory usage will look like. In embedded -devices, there's a small, fixed amount of memory to use. In a browser, you have no idea how large -[google.com](https://www.google.com)'s home page is until you start trying to download it. The -compiler uses this knowledge (or lack thereof) to optimize how memory is used; put simply, your code -runs faster when the compiler can guarantee exactly how much memory your program needs while it's -running. This series is all about understanding how the compiler reasons about your program, with an -emphasis on the implications for performance. - -Now let's address some conditions and caveats before going much further: - -- We'll focus on "safe" Rust only; `unsafe` lets you use platform-specific allocation API's - ([`malloc`](https://www.tutorialspoint.com/c_standard_library/c_function_malloc.htm)) that we'll - ignore. -- We'll assume a "debug" build of Rust code (what you get with `cargo run` and `cargo test`) and - address (pun intended) release mode at the end (`cargo run --release` and `cargo test --release`). -- All content will be run using Rust 1.32, as that's the highest currently supported in the - [Compiler Exporer](https://godbolt.org/). As such, we'll avoid upcoming innovations like - [compile-time evaluation of `static`](https://github.com/rust-lang/rfcs/blob/master/text/0911-const-fn.md) - that are available in nightly. -- Because of the nature of the content, being able to read assembly is helpful. We'll keep it - simple, but I [found](https://stackoverflow.com/a/4584131/1454178) a - [refresher](https://stackoverflow.com/a/26026278/1454178) on the `push` and `pop` - [instructions](http://www.cs.virginia.edu/~evans/cs216/guides/x86.html) was helpful while writing - this. -- I've tried to be precise in saying only what I can prove using the tools (ASM, docs) that are - available, but if there's something said in error it will be corrected expeditiously. Please let - me know at [bradlee@speice.io](mailto:bradlee@speice.io) - -Finally, I'll do what I can to flag potential future changes but the Rust docs have a notice worth -repeating: - -> Rust does not currently have a rigorously and formally defined memory model. -> -> -- [the docs](https://doc.rust-lang.org/std/ptr/fn.read_volatile.html) diff --git a/_posts/2019-02-05-the-whole-world.md b/_posts/2019-02-05-the-whole-world.md deleted file mode 100644 index ef3bc47..0000000 --- a/_posts/2019-02-05-the-whole-world.md +++ /dev/null @@ -1,337 +0,0 @@ ---- -layout: post -title: "Global Memory Usage: The Whole World" -description: "Static considered slightly less harmful." -category: -tags: [rust, understanding-allocations] ---- - -The first memory type we'll look at is pretty special: when Rust can prove that a _value_ is fixed -for the life of a program (`const`), and when a _reference_ is unique for the life of a program -(`static` as a declaration, not -[`'static`](https://doc.rust-lang.org/book/ch10-03-lifetime-syntax.html#the-static-lifetime) as a -lifetime), we can make use of global memory. This special section of data is embedded directly in -the program binary so that variables are ready to go once the program loads; no additional -computation is necessary. - -Understanding the value/reference distinction is important for reasons we'll go into below, and -while the -[full specification](https://github.com/rust-lang/rfcs/blob/master/text/0246-const-vs-static.md) for -these two keywords is available, we'll take a hands-on approach to the topic. - -# **const** - -When a _value_ is guaranteed to be unchanging in your program (where "value" may be scalars, -`struct`s, etc.), you can declare it `const`. This tells the compiler that it's safe to treat the -value as never changing, and enables some interesting optimizations; not only is there no -initialization cost to creating the value (it is loaded at the same time as the executable parts of -your program), but the compiler can also copy the value around if it speeds up the code. - -The points we need to address when talking about `const` are: - -- `Const` values are stored in read-only memory - it's impossible to modify. -- Values resulting from calling a `const fn` are materialized at compile-time. -- The compiler may (or may not) copy `const` values wherever it chooses. - -## Read-Only - -The first point is a bit strange - "read-only memory." -[The Rust book](https://doc.rust-lang.org/book/ch03-01-variables-and-mutability.html#differences-between-variables-and-constants) -mentions in a couple places that using `mut` with constants is illegal, but it's also important to -demonstrate just how immutable they are. _Typically_ in Rust you can use -[interior mutability](https://doc.rust-lang.org/book/ch15-05-interior-mutability.html) to modify -things that aren't declared `mut`. -[`RefCell`](https://doc.rust-lang.org/std/cell/struct.RefCell.html) provides an example of this -pattern in action: - -```rust -use std::cell::RefCell; - -fn my_mutator(cell: &RefCell) { - // Even though we're given an immutable reference, - // the `replace` method allows us to modify the inner value. - cell.replace(14); -} - -fn main() { - let cell = RefCell::new(25); - // Prints out 25 - println!("Cell: {:?}", cell); - my_mutator(&cell); - // Prints out 14 - println!("Cell: {:?}", cell); -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=8e4bea1a718edaff4507944e825a54b2) - -When `const` is involved though, interior mutability is impossible: - -```rust -use std::cell::RefCell; - -const CELL: RefCell = RefCell::new(25); - -fn my_mutator(cell: &RefCell) { - cell.replace(14); -} - -fn main() { - // First line prints 25 as expected - println!("Cell: {:?}", &CELL); - my_mutator(&CELL); - // Second line *still* prints 25 - println!("Cell: {:?}", &CELL); -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=88fe98110c33c1b3a51e341f48b8ae00) - -And a second example using [`Once`](https://doc.rust-lang.org/std/sync/struct.Once.html): - -```rust -use std::sync::Once; - -const SURPRISE: Once = Once::new(); - -fn main() { - // This is how `Once` is supposed to be used - SURPRISE.call_once(|| println!("Initializing...")); - // Because `Once` is a `const` value, we never record it - // having been initialized the first time, and this closure - // will also execute. - SURPRISE.call_once(|| println!("Initializing again???")); -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=c3cc5979b5e5434eca0f9ec4a06ee0ed) - -When the -[`const` specification](https://github.com/rust-lang/rfcs/blob/26197104b7bb9a5a35db243d639aee6e46d35d75/text/0246-const-vs-static.md) -refers to ["rvalues"](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3055.pdf), this -behavior is what they refer to. [Clippy](https://github.com/rust-lang/rust-clippy) will treat this -as an error, but it's still something to be aware of. - -## Initialization == Compilation - -The next thing to mention is that `const` values are loaded into memory _as part of your program -binary_. Because of this, any `const` values declared in your program will be "realized" at -compile-time; accessing them may trigger a main-memory lookup (with a fixed address, so your CPU may -be able to prefetch the value), but that's it. - -```rust -use std::cell::RefCell; - -const CELL: RefCell = RefCell::new(24); - -pub fn multiply(value: u32) -> u32 { - // CELL is stored at `.L__unnamed_1` - value * (*CELL.get_mut()) -} -``` - --- [Compiler Explorer](https://godbolt.org/z/Th8boO) - -The compiler creates one `RefCell`, uses it everywhere, and never needs to call the `RefCell::new` -function. - -## Copying - -If it's helpful though, the compiler can choose to copy `const` values. - -```rust -const FACTOR: u32 = 1000; - -pub fn multiply(value: u32) -> u32 { - // See assembly line 4 for the `mov edi, 1000` instruction - value * FACTOR -} - -pub fn multiply_twice(value: u32) -> u32 { - // See assembly lines 22 and 29 for `mov edi, 1000` instructions - value * FACTOR * FACTOR -} -``` - --- [Compiler Explorer](https://godbolt.org/z/ZtS54X) - -In this example, the `FACTOR` value is turned into the `mov edi, 1000` instruction in both the -`multiply` and `multiply_twice` functions; the "1000" value is never "stored" anywhere, as it's -small enough to inline into the assembly instructions. - -Finally, getting the address of a `const` value is possible, but not guaranteed to be unique -(because the compiler can choose to copy values). I was unable to get non-unique pointers in my -testing (even using different crates), but the specifications are clear enough: _don't rely on -pointers to `const` values being consistent_. To be frank, caring about locations for `const` values -is almost certainly a code smell. - -# **static** - -Static variables are related to `const` variables, but take a slightly different approach. When we -declare that a _reference_ is unique for the life of a program, you have a `static` variable -(unrelated to the `'static` lifetime). Because of the reference/value distinction with -`const`/`static`, static variables behave much more like typical "global" variables. - -But to understand `static`, here's what we'll look at: - -- `static` variables are globally unique locations in memory. -- Like `const`, `static` variables are loaded at the same time as your program being read into - memory. -- All `static` variables must implement the - [`Sync`](https://doc.rust-lang.org/std/marker/trait.Sync.html) marker trait. -- Interior mutability is safe and acceptable when using `static` variables. - -## Memory Uniqueness - -The single biggest difference between `const` and `static` is the guarantees provided about -uniqueness. Where `const` variables may or may not be copied in code, `static` variables are -guarantee to be unique. If we take a previous `const` example and change it to `static`, the -difference should be clear: - -```rust -static FACTOR: u32 = 1000; - -pub fn multiply(value: u32) -> u32 { - // The assembly to `mul dword ptr [rip + example::FACTOR]` is how FACTOR gets used - value * FACTOR -} - -pub fn multiply_twice(value: u32) -> u32 { - // The assembly to `mul dword ptr [rip + example::FACTOR]` is how FACTOR gets used - value * FACTOR * FACTOR -} -``` - --- [Compiler Explorer](https://godbolt.org/z/uxmiRQ) - -Where [previously](#copying) there were plenty of references to multiplying by 1000, the new -assembly refers to `FACTOR` as a named memory location instead. No initialization work needs to be -done, but the compiler can no longer prove the value never changes during execution. - -## Initialization == Compilation - -Next, let's talk about initialization. The simplest case is initializing static variables with -either scalar or struct notation: - -```rust -#[derive(Debug)] -struct MyStruct { - x: u32 -} - -static MY_STRUCT: MyStruct = MyStruct { - // You can even reference other statics - // declared later - x: MY_VAL -}; - -static MY_VAL: u32 = 24; - -fn main() { - println!("Static MyStruct: {:?}", MY_STRUCT); -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=b538dbc46076f12db047af4f4403ee6e) - -Things can get a bit weirder when using `const fn` though. In most cases, it just works: - -```rust -#[derive(Debug)] -struct MyStruct { - x: u32 -} - -impl MyStruct { - const fn new() -> MyStruct { - MyStruct { x: 24 } - } -} - -static MY_STRUCT: MyStruct = MyStruct::new(); - -fn main() { - println!("const fn Static MyStruct: {:?}", MY_STRUCT); -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=8c796a6e7fc273c12115091b707b0255) - -However, there's a caveat: you're currently not allowed to use `const fn` to initialize static -variables of types that aren't marked `Sync`. For example, -[`RefCell::new()`](https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.new) is a -`const fn`, but because -[`RefCell` isn't `Sync`](https://doc.rust-lang.org/std/cell/struct.RefCell.html#impl-Sync), you'll -get an error at compile time: - -```rust -use std::cell::RefCell; - -// error[E0277]: `std::cell::RefCell` cannot be shared between threads safely -static MY_LOCK: RefCell = RefCell::new(0); -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=c76ef86e473d07117a1700e21fd45560) - -It's likely that this will -[change in the future](https://github.com/rust-lang/rfcs/blob/master/text/0911-const-fn.md) though. - -## **Sync** - -Which leads well to the next point: static variable types must implement the -[`Sync` marker](https://doc.rust-lang.org/std/marker/trait.Sync.html). Because they're globally -unique, it must be safe for you to access static variables from any thread at any time. Most -`struct` definitions automatically implement the `Sync` trait because they contain only elements -which themselves implement `Sync` (read more in the -[Nomicon](https://doc.rust-lang.org/nomicon/send-and-sync.html)). This is why earlier examples could -get away with initializing statics, even though we never included an `impl Sync for MyStruct` in the -code. To demonstrate this property, Rust refuses to compile our earlier example if we add a -non-`Sync` element to the `struct` definition: - -```rust -use std::cell::RefCell; - -struct MyStruct { - x: u32, - y: RefCell, -} - -// error[E0277]: `std::cell::RefCell` cannot be shared between threads safely -static MY_STRUCT: MyStruct = MyStruct { - x: 8, - y: RefCell::new(8) -}; -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=40074d0248f056c296b662dbbff97cfc) - -## Interior Mutability - -Finally, while `static mut` variables are allowed, mutating them is an `unsafe` operation. If we -want to stay in `safe` Rust, we can use interior mutability to accomplish similar goals: - -```rust -use std::sync::Once; - -// This example adapted from https://doc.rust-lang.org/std/sync/struct.Once.html#method.call_once -static INIT: Once = Once::new(); - -fn main() { - // Note that while `INIT` is declared immutable, we're still allowed - // to mutate its interior - INIT.call_once(|| println!("Initializing...")); - // This code won't panic, as the interior of INIT was modified - // as part of the previous `call_once` - INIT.call_once(|| panic!("INIT was called twice!")); -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=3ba003a981a7ed7400240caadd384d59) diff --git a/_posts/2019-02-06-stacking-up.md b/_posts/2019-02-06-stacking-up.md deleted file mode 100644 index b060ea1..0000000 --- a/_posts/2019-02-06-stacking-up.md +++ /dev/null @@ -1,601 +0,0 @@ ---- -layout: post -title: "Fixed Memory: Stacking Up" -description: "We don't need no allocator." -category: -tags: [rust, understanding-allocations] ---- - -`const` and `static` are perfectly fine, but it's relatively rare that we know at compile-time about -either values or references that will be the same for the duration of our program. Put another way, -it's not often the case that either you or your compiler knows how much memory your entire program -will ever need. - -However, there are still some optimizations the compiler can do if it knows how much memory -individual functions will need. Specifically, the compiler can make use of "stack" memory (as -opposed to "heap" memory) which can be managed far faster in both the short- and long-term. When -requesting memory, the [`push` instruction](http://www.cs.virginia.edu/~evans/cs216/guides/x86.html) -can typically complete in [1 or 2 cycles](https://agner.org/optimize/instruction_tables.ods) (<1 -nanosecond on modern CPUs). Contrast that to heap memory which requires an allocator (specialized -software to track what memory is in use) to reserve space. When you're finished with stack memory, -the `pop` instruction runs in 1-3 cycles, as opposed to an allocator needing to worry about memory -fragmentation and other issues with the heap. All sorts of incredibly sophisticated techniques have -been used to design allocators: - -- [Garbage Collection]() - strategies like [Tracing](https://en.wikipedia.org/wiki/Tracing_garbage_collection) (used in - [Java](https://www.oracle.com/technetwork/java/javase/tech/g1-intro-jsp-135488.html)) and - [Reference counting](https://en.wikipedia.org/wiki/Reference_counting) (used in - [Python](https://docs.python.org/3/extending/extending.html#reference-counts)) -- Thread-local structures to prevent locking the allocator in - [tcmalloc](https://jamesgolick.com/2013/5/19/how-tcmalloc-works.html) -- Arena structures used in [jemalloc](http://jemalloc.net/), which - [until recently](https://blog.rust-lang.org/2019/01/17/Rust-1.32.0.html#jemalloc-is-removed-by-default) - was the primary allocator for Rust programs! - -But no matter how fast your allocator is, the principle remains: the fastest allocator is the one -you never use. As such, we're not going to discuss how exactly the -[`push` and `pop` instructions work](http://www.cs.virginia.edu/~evans/cs216/guides/x86.html), but -we'll focus instead on the conditions that enable the Rust compiler to use faster stack-based -allocation for variables. - -So, **how do we know when Rust will or will not use stack allocation for objects we create?** -Looking at other languages, it's often easy to delineate between stack and heap. Managed memory -languages (Python, Java, -[C#](https://blogs.msdn.microsoft.com/ericlippert/2010/09/30/the-truth-about-value-types/)) place -everything on the heap. JIT compilers ([PyPy](https://www.pypy.org/), -[HotSpot](https://www.oracle.com/technetwork/java/javase/tech/index-jsp-136373.html)) may optimize -some heap allocations away, but you should never assume it will happen. C makes things clear with -calls to special functions (like [malloc(3)](https://linux.die.net/man/3/malloc)) needed to access -heap memory. Old C++ has the [`new`](https://stackoverflow.com/a/655086/1454178) keyword, though -modern C++/C++11 is more complicated with [RAII](https://en.cppreference.com/w/cpp/language/raii). - -For Rust, we can summarize as follows: **stack allocation will be used for everything that doesn't -involve "smart pointers" and collections**. We'll skip over a precise definition of the term "smart -pointer" for now, and instead discuss what we should watch for to understand when stack and heap -memory regions are used: - -1. Stack manipulation instructions (`push`, `pop`, and `add`/`sub` of the `rsp` register) indicate - allocation of stack memory: - - ```rust - pub fn stack_alloc(x: u32) -> u32 { - // Space for `y` is allocated by subtracting from `rsp`, - // and then populated - let y = [1u8, 2, 3, 4]; - // Space for `y` is deallocated by adding back to `rsp` - x - } - ``` - - -- [Compiler Explorer](https://godbolt.org/z/5WSgc9) - -2. Tracking when exactly heap allocation calls occur is difficult. It's typically easier to watch - for `call core::ptr::real_drop_in_place`, and infer that a heap allocation happened in the recent - past: - - ```rust - pub fn heap_alloc(x: usize) -> usize { - // Space for elements in a vector has to be allocated - // on the heap, and is then de-allocated once the - // vector goes out of scope - let y: Vec = Vec::with_capacity(x); - x - } - ``` - - -- [Compiler Explorer](https://godbolt.org/z/epfgoQ) (`real_drop_in_place` happens on line 1317) - Note: While the - [`Drop` trait](https://doc.rust-lang.org/std/ops/trait.Drop.html) is - [called for stack-allocated objects](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=87edf374d8983816eb3d8cfeac657b46), - the Rust standard library only defines `Drop` implementations for types that involve heap - allocation. - -3. If you don't want to inspect the assembly, use a custom allocator that's able to track and alert - when heap allocations occur. Crates like - [`alloc_counter`](https://crates.io/crates/alloc_counter) are designed for exactly this purpose. - -With all that in mind, let's talk about situations in which we're guaranteed to use stack memory: - -- Structs are created on the stack. -- Function arguments are passed on the stack, meaning the - [`#[inline]` attribute](https://doc.rust-lang.org/reference/attributes.html#inline-attribute) will - not change the memory region used. -- Enums and unions are stack-allocated. -- [Arrays](https://doc.rust-lang.org/std/primitive.array.html) are always stack-allocated. -- Closures capture their arguments on the stack. -- Generics will use stack allocation, even with dynamic dispatch. -- [`Copy`](https://doc.rust-lang.org/std/marker/trait.Copy.html) types are guaranteed to be - stack-allocated, and copying them will be done in stack memory. -- [`Iterator`s](https://doc.rust-lang.org/std/iter/trait.Iterator.html) in the standard library are - stack-allocated even when iterating over heap-based collections. - -# Structs - -The simplest case comes first. When creating vanilla `struct` objects, we use stack memory to hold -their contents: - -```rust -struct Point { - x: u64, - y: u64, -} - -struct Line { - a: Point, - b: Point, -} - -pub fn make_line() { - // `origin` is stored in the first 16 bytes of memory - // starting at location `rsp` - let origin = Point { x: 0, y: 0 }; - // `point` makes up the next 16 bytes of memory - let point = Point { x: 1, y: 2 }; - - // When creating `ray`, we just move the content out of - // `origin` and `point` into the next 32 bytes of memory - let ray = Line { a: origin, b: point }; -} -``` - --- [Compiler Explorer](https://godbolt.org/z/vri9BE) - -Note that while some extra-fancy instructions are used for memory manipulation in the assembly, the -`sub rsp, 64` instruction indicates we're still working with the stack. - -# Function arguments - -Have you ever wondered how functions communicate with each other? Like, once the variables are given -to you, everything's fine. But how do you "give" those variables to another function? How do you get -the results back afterward? The answer: the compiler arranges memory and assembly instructions using -a pre-determined [calling convention](http://llvm.org/docs/LangRef.html#calling-conventions). This -convention governs the rules around where arguments needed by a function will be located (either in -memory offsets relative to the stack pointer `rsp`, or in other registers), and where the results -can be found once the function has finished. And when multiple languages agree on what the calling -conventions are, you can do things like having [Go call Rust code](https://blog.filippo.io/rustgo/)! - -Put simply: it's the compiler's job to figure out how to call other functions, and you can assume -that the compiler is good at its job. - -We can see this in action using a simple example: - -```rust -struct Point { - x: i64, - y: i64, -} - -// We use integer division operations to keep -// the assembly clean, understanding the result -// isn't accurate. -fn distance(a: &Point, b: &Point) -> i64 { - // Immediately subtract from `rsp` the bytes needed - // to hold all the intermediate results - this is - // the stack allocation step - - // The compiler used the `rdi` and `rsi` registers - // to pass our arguments, so read them in - let x1 = a.x; - let x2 = b.x; - let y1 = a.y; - let y2 = b.y; - - // Do the actual math work - let x_pow = (x1 - x2) * (x1 - x2); - let y_pow = (y1 - y2) * (y1 - y2); - let squared = x_pow + y_pow; - squared / squared - - // Our final result will be stored in the `rax` register - // so that our caller knows where to retrieve it. - // Finally, add back to `rsp` the stack memory that is - // now ready to be used by other functions. -} - -pub fn total_distance() { - let start = Point { x: 1, y: 2 }; - let middle = Point { x: 3, y: 4 }; - let end = Point { x: 5, y: 6 }; - - let _dist_1 = distance(&start, &middle); - let _dist_2 = distance(&middle, &end); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/Qmx4ST) - -As a consequence of function arguments never using heap memory, we can also infer that functions -using the `#[inline]` attributes also do not heap allocate. But better than inferring, we can look -at the assembly to prove it: - -```rust -struct Point { - x: i64, - y: i64, -} - -// Note that there is no `distance` function in the assembly output, -// and the total line count goes from 229 with inlining off -// to 306 with inline on. Even still, no heap allocations occur. -#[inline(always)] -fn distance(a: &Point, b: &Point) -> i64 { - let x1 = a.x; - let x2 = b.x; - let y1 = a.y; - let y2 = b.y; - - let x_pow = (a.x - b.x) * (a.x - b.x); - let y_pow = (a.y - b.y) * (a.y - b.y); - let squared = x_pow + y_pow; - squared / squared -} - -pub fn total_distance() { - let start = Point { x: 1, y: 2 }; - let middle = Point { x: 3, y: 4 }; - let end = Point { x: 5, y: 6 }; - - let _dist_1 = distance(&start, &middle); - let _dist_2 = distance(&middle, &end); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/30Sh66) - -Finally, passing by value (arguments with type -[`Copy`](https://doc.rust-lang.org/std/marker/trait.Copy.html)) and passing by reference (either -moving ownership or passing a pointer) may have slightly different layouts in assembly, but will -still use either stack memory or CPU registers: - -```rust -pub struct Point { - x: i64, - y: i64, -} - -// Moving values -pub fn distance_moved(a: Point, b: Point) -> i64 { - let x1 = a.x; - let x2 = b.x; - let y1 = a.y; - let y2 = b.y; - - let x_pow = (x1 - x2) * (x1 - x2); - let y_pow = (y1 - y2) * (y1 - y2); - let squared = x_pow + y_pow; - squared / squared -} - -// Borrowing values has two extra `mov` instructions on lines 21 and 22 -pub fn distance_borrowed(a: &Point, b: &Point) -> i64 { - let x1 = a.x; - let x2 = b.x; - let y1 = a.y; - let y2 = b.y; - - let x_pow = (x1 - x2) * (x1 - x2); - let y_pow = (y1 - y2) * (y1 - y2); - let squared = x_pow + y_pow; - squared / squared -} -``` - --- [Compiler Explorer](https://godbolt.org/z/06hGiv) - -# Enums - -If you've ever worried that wrapping your types in -[`Option`](https://doc.rust-lang.org/stable/core/option/enum.Option.html) or -[`Result`](https://doc.rust-lang.org/stable/core/result/enum.Result.html) would finally make them -large enough that Rust decides to use heap allocation instead, fear no longer: `enum` and union -types don't use heap allocation: - -```rust -enum MyEnum { - Small(u8), - Large(u64) -} - -struct MyStruct { - x: MyEnum, - y: MyEnum, -} - -pub fn enum_compare() { - let x = MyEnum::Small(0); - let y = MyEnum::Large(0); - - let z = MyStruct { x, y }; - - let opt = Option::Some(z); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/HK7zBx) - -Because the size of an `enum` is the size of its largest element plus a flag, the compiler can -predict how much memory is used no matter which variant of an enum is currently stored in a -variable. Thus, enums and unions have no need of heap allocation. There's unfortunately not a great -way to show this in assembly, so I'll instead point you to the -[`core::mem::size_of`](https://doc.rust-lang.org/stable/core/mem/fn.size_of.html#size-of-enums) -documentation. - -# Arrays - -The array type is guaranteed to be stack allocated, which is why the array size must be declared. -Interestingly enough, this can be used to cause safe Rust programs to crash: - -```rust -// 256 bytes -#[derive(Default)] -struct TwoFiftySix { - _a: [u64; 32] -} - -// 8 kilobytes -#[derive(Default)] -struct EightK { - _a: [TwoFiftySix; 32] -} - -// 256 kilobytes -#[derive(Default)] -struct TwoFiftySixK { - _a: [EightK; 32] -} - -// 8 megabytes - exceeds space typically provided for the stack, -// though the kernel can be instructed to allocate more. -// On Linux, you can check stack size using `ulimit -s` -#[derive(Default)] -struct EightM { - _a: [TwoFiftySixK; 32] -} - -fn main() { - // Because we already have things in stack memory - // (like the current function call stack), allocating another - // eight megabytes of stack memory crashes the program - let _x = EightM::default(); -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=587a6380a4914bcbcef4192c90c01dc4) - -There aren't any security implications of this (no memory corruption occurs), but it's good to note -that the Rust compiler won't move arrays into heap memory even if they can be reasonably expected to -overflow the stack. - -# Closures - -Rules for how anonymous functions capture their arguments are typically language-specific. In Java, -[Lambda Expressions](https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html) are -actually objects created on the heap that capture local primitives by copying, and capture local -non-primitives as (`final`) references. -[Python](https://docs.python.org/3.7/reference/expressions.html#lambda) and -[JavaScript](https://javascriptweblog.wordpress.com/2010/10/25/understanding-javascript-closures/) -both bind _everything_ by reference normally, but Python can also -[capture values](https://stackoverflow.com/a/235764/1454178) and JavaScript has -[Arrow functions](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/Arrow_functions). - -In Rust, arguments to closures are the same as arguments to other functions; closures are simply -functions that don't have a declared name. Some weird ordering of the stack may be required to -handle them, but it's the compiler's responsiblity to figure that out. - -Each example below has the same effect, but a different assembly implementation. In the simplest -case, we immediately run a closure returned by another function. Because we don't store a reference -to the closure, the stack memory needed to store the captured values is contiguous: - -```rust -fn my_func() -> impl FnOnce() { - let x = 24; - // Note that this closure in assembly looks exactly like - // any other function; you even use the `call` instruction - // to start running it. - move || { x; } -} - -pub fn immediate() { - my_func()(); - my_func()(); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/mgJ2zl), 25 total assembly instructions - -If we store a reference to the closure, the Rust compiler keeps values it needs in the stack memory -of the original function. Getting the details right is a bit harder, so the instruction count goes -up even though this code is functionally equivalent to our original example: - -```rust -pub fn simple_reference() { - let x = my_func(); - let y = my_func(); - y(); - x(); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/K_dj5n), 55 total assembly instructions - -Even things like variable order can make a difference in instruction count: - -```rust -pub fn complex() { - let x = my_func(); - let y = my_func(); - x(); - y(); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/p37qFl), 70 total assembly instructions - -In every circumstance though, the compiler ensured that no heap allocations were necessary. - -# Generics - -Traits in Rust come in two broad forms: static dispatch (monomorphization, `impl Trait`) and dynamic -dispatch (trait objects, `dyn Trait`). While dynamic dispatch is often _associated_ with trait -objects being stored in the heap, dynamic dispatch can be used with stack allocated objects as well: - -```rust -trait GetInt { - fn get_int(&self) -> u64; -} - -// vtable stored at section L__unnamed_1 -struct WhyNotU8 { - x: u8 -} -impl GetInt for WhyNotU8 { - fn get_int(&self) -> u64 { - self.x as u64 - } -} - -// vtable stored at section L__unnamed_2 -struct ActualU64 { - x: u64 -} -impl GetInt for ActualU64 { - fn get_int(&self) -> u64 { - self.x - } -} - -// `&dyn` declares that we want to use dynamic dispatch -// rather than monomorphization, so there is only one -// `retrieve_int` function that shows up in the final assembly. -// If we used generics, there would be one implementation of -// `retrieve_int` for each type that implements `GetInt`. -pub fn retrieve_int(u: &dyn GetInt) { - // In the assembly, we just call an address given to us - // in the `rsi` register and hope that it was set up - // correctly when this function was invoked. - let x = u.get_int(); -} - -pub fn do_call() { - // Note that even though the vtable for `WhyNotU8` and - // `ActualU64` includes a pointer to - // `core::ptr::real_drop_in_place`, it is never invoked. - let a = WhyNotU8 { x: 0 }; - let b = ActualU64 { x: 0 }; - - retrieve_int(&a); - retrieve_int(&b); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/u_yguS) - -It's hard to imagine practical situations where dynamic dispatch would be used for objects that -aren't heap allocated, but it technically can be done. - -# Copy types - -Understanding move semantics and copy semantics in Rust is weird at first. The Rust docs -[go into detail](https://doc.rust-lang.org/stable/core/marker/trait.Copy.html) far better than can -be addressed here, so I'll leave them to do the job. From a memory perspective though, their -guideline is reasonable: -[if your type can implemement `Copy`, it should](https://doc.rust-lang.org/stable/core/marker/trait.Copy.html#when-should-my-type-be-copy). -While there are potential speed tradeoffs to _benchmark_ when discussing `Copy` (move semantics for -stack objects vs. copying stack pointers vs. copying stack `struct`s), _it's impossible for `Copy` -to introduce a heap allocation_. - -But why is this the case? Fundamentally, it's because the language controls what `Copy` means - -["the behavior of `Copy` is not overloadable"](https://doc.rust-lang.org/std/marker/trait.Copy.html#whats-the-difference-between-copy-and-clone) -because it's a marker trait. From there we'll note that a type -[can implement `Copy`](https://doc.rust-lang.org/std/marker/trait.Copy.html#when-can-my-type-be-copy) -if (and only if) its components implement `Copy`, and that -[no heap-allocated types implement `Copy`](https://doc.rust-lang.org/std/marker/trait.Copy.html#implementors). -Thus, assignments involving heap types are always move semantics, and new heap allocations won't -occur because of implicit operator behavior. - -```rust -#[derive(Clone)] -struct Cloneable { - x: Box -} - -// error[E0204]: the trait `Copy` may not be implemented for this type -#[derive(Copy, Clone)] -struct NotCopyable { - x: Box -} -``` - --- [Compiler Explorer](https://godbolt.org/z/VToRuK) - -# Iterators - -In managed memory languages (like -[Java](https://www.youtube.com/watch?v=bSkpMdDe4g4&feature=youtu.be&t=357)), there's a subtle -difference between these two code samples: - -```java -public static int sum_for(List vals) { - long sum = 0; - // Regular for loop - for (int i = 0; i < vals.length; i++) { - sum += vals[i]; - } - return sum; -} - -public static int sum_foreach(List vals) { - long sum = 0; - // "Foreach" loop - uses iteration - for (Long l : vals) { - sum += l; - } - return sum; -} -``` - -In the `sum_for` function, nothing terribly interesting happens. In `sum_foreach`, an object of type -[`Iterator`](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/Iterator.html) -is allocated on the heap, and will eventually be garbage-collected. This isn't a great design; -iterators are often transient objects that you need during a function and can discard once the -function ends. Sounds exactly like the issue stack-allocated objects address, no? - -In Rust, iterators are allocated on the stack. The objects to iterate over are almost certainly in -heap memory, but the iterator itself -([`Iter`](https://doc.rust-lang.org/std/slice/struct.Iter.html)) doesn't need to use the heap. In -each of the examples below we iterate over a collection, but never use heap allocation: - -```rust -use std::collections::HashMap; -// There's a lot of assembly generated, but if you search in the text, -// there are no references to `real_drop_in_place` anywhere. - -pub fn sum_vec(x: &Vec) { - let mut s = 0; - // Basic iteration over vectors doesn't need allocation - for y in x { - s += y; - } -} - -pub fn sum_enumerate(x: &Vec) { - let mut s = 0; - // More complex iterators are just fine too - for (_i, y) in x.iter().enumerate() { - s += y; - } -} - -pub fn sum_hm(x: &HashMap) { - let mut s = 0; - // And it's not just Vec, all types will allocate the iterator - // on stack memory - for y in x.values() { - s += y; - } -} -``` - --- [Compiler Explorer](https://godbolt.org/z/FTT3CT) diff --git a/_posts/2019-02-07-a-heaping-helping.md b/_posts/2019-02-07-a-heaping-helping.md deleted file mode 100644 index b68c447..0000000 --- a/_posts/2019-02-07-a-heaping-helping.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -layout: post -title: "Dynamic Memory: A Heaping Helping" -description: "The reason Rust exists." -category: -tags: [rust, understanding-allocations] ---- - -Managing dynamic memory is hard. Some languages assume users will do it themselves (C, C++), and -some languages go to extreme lengths to protect users from themselves (Java, Python). In Rust, how -the language uses dynamic memory (also referred to as the **heap**) is a system called _ownership_. -And as the docs mention, ownership -[is Rust's most unique feature](https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html). - -The heap is used in two situations; when the compiler is unable to predict either the _total size of -memory needed_, or _how long the memory is needed for_, it allocates space in the heap. This happens -pretty frequently; if you want to download the Google home page, you won't know how large it is -until your program runs. And when you're finished with Google, we deallocate the memory so it can be -used to store other webpages. If you're interested in a slightly longer explanation of the heap, -check out -[The Stack and the Heap](https://doc.rust-lang.org/book/ch04-01-what-is-ownership.html#the-stack-and-the-heap) -in Rust's documentation. - -We won't go into detail on how the heap is managed; the -[ownership documentation](https://doc.rust-lang.org/book/ch04-01-what-is-ownership.html) does a -phenomenal job explaining both the "why" and "how" of memory management. Instead, we're going to -focus on understanding "when" heap allocations occur in Rust. - -To start off, take a guess for how many allocations happen in the program below: - -```rust -fn main() {} -``` - -It's obviously a trick question; while no heap allocations occur as a result of that code, the setup -needed to call `main` does allocate on the heap. Here's a way to show it: - -```rust -#![feature(integer_atomics)] -use std::alloc::{GlobalAlloc, Layout, System}; -use std::sync::atomic::{AtomicU64, Ordering}; - -static ALLOCATION_COUNT: AtomicU64 = AtomicU64::new(0); - -struct CountingAllocator; - -unsafe impl GlobalAlloc for CountingAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - ALLOCATION_COUNT.fetch_add(1, Ordering::SeqCst); - System.alloc(layout) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - System.dealloc(ptr, layout); - } -} - -#[global_allocator] -static A: CountingAllocator = CountingAllocator; - -fn main() { - let x = ALLOCATION_COUNT.fetch_add(0, Ordering::SeqCst); - println!("There were {} allocations before calling main!", x); -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=nightly&mode=debug&edition=2018&gist=fb5060025ba79fc0f906b65a4ef8eb8e) - -As of the time of writing, there are five allocations that happen before `main` is ever called. - -But when we want to understand more practically where heap allocation happens, we'll follow this -guide: - -- Smart pointers hold their contents in the heap -- Collections are smart pointers for many objects at a time, and reallocate when they need to grow - -Finally, there are two "addendum" issues that are important to address when discussing Rust and the -heap: - -- Non-heap alternatives to many standard library types are available. -- Special allocators to track memory behavior should be used to benchmark code. - -# Smart pointers - -The first thing to note are the "smart pointer" types. When you have data that must outlive the -scope in which it is declared, or your data is of unknown or dynamic size, you'll make use of these -types. - -The term [smart pointer](https://en.wikipedia.org/wiki/Smart_pointer) comes from C++, and while it's -closely linked to a general design pattern of -["Resource Acquisition Is Initialization"](https://en.cppreference.com/w/cpp/language/raii), we'll -use it here specifically to describe objects that are responsible for managing ownership of data -allocated on the heap. The smart pointers available in the `alloc` crate should look mostly -familiar: - -- [`Box`](https://doc.rust-lang.org/alloc/boxed/struct.Box.html) -- [`Rc`](https://doc.rust-lang.org/alloc/rc/struct.Rc.html) -- [`Arc`](https://doc.rust-lang.org/alloc/sync/struct.Arc.html) -- [`Cow`](https://doc.rust-lang.org/alloc/borrow/enum.Cow.html) - -The [standard library](https://doc.rust-lang.org/std/) also defines some smart pointers to manage -heap objects, though more than can be covered here. Some examples are: - -- [`RwLock`](https://doc.rust-lang.org/std/sync/struct.RwLock.html) -- [`Mutex`](https://doc.rust-lang.org/std/sync/struct.Mutex.html) - -Finally, there is one ["gotcha"](https://www.merriam-webster.com/dictionary/gotcha): **cell types** -(like [`RefCell`](https://doc.rust-lang.org/stable/core/cell/struct.RefCell.html)) look and behave -similarly, but **don't involve heap allocation**. The -[`core::cell` docs](https://doc.rust-lang.org/stable/core/cell/index.html) have more information. - -When a smart pointer is created, the data it is given is placed in heap memory and the location of -that data is recorded in the smart pointer. Once the smart pointer has determined it's safe to -deallocate that memory (when a `Box` has -[gone out of scope](https://doc.rust-lang.org/stable/std/boxed/index.html) or a reference count -[goes to zero](https://doc.rust-lang.org/alloc/rc/index.html)), the heap space is reclaimed. We can -prove these types use heap memory by looking at code: - -```rust -use std::rc::Rc; -use std::sync::Arc; -use std::borrow::Cow; - -pub fn my_box() { - // Drop at assembly line 1640 - Box::new(0); -} - -pub fn my_rc() { - // Drop at assembly line 1650 - Rc::new(0); -} - -pub fn my_arc() { - // Drop at assembly line 1660 - Arc::new(0); -} - -pub fn my_cow() { - // Drop at assembly line 1672 - Cow::from("drop"); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/4AMQug) - -# Collections - -Collection types use heap memory because their contents have dynamic size; they will request more -memory [when needed](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.reserve), and can -[release memory](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.shrink_to_fit) when it's -no longer necessary. This dynamic property forces Rust to heap allocate everything they contain. In -a way, **collections are smart pointers for many objects at a time**. Common types that fall under -this umbrella are [`Vec`](https://doc.rust-lang.org/stable/alloc/vec/struct.Vec.html), -[`HashMap`](https://doc.rust-lang.org/stable/std/collections/struct.HashMap.html), and -[`String`](https://doc.rust-lang.org/stable/alloc/string/struct.String.html) (not -[`str`](https://doc.rust-lang.org/std/primitive.str.html)). - -While collections store the objects they own in heap memory, _creating new collections will not -allocate on the heap_. This is a bit weird; if we call `Vec::new()`, the assembly shows a -corresponding call to `real_drop_in_place`: - -```rust -pub fn my_vec() { - // Drop in place at line 481 - Vec::::new(); -} -``` - --- [Compiler Explorer](https://godbolt.org/z/1WkNtC) - -But because the vector has no elements to manage, no calls to the allocator will ever be dispatched: - -```rust -use std::alloc::{GlobalAlloc, Layout, System}; -use std::sync::atomic::{AtomicBool, Ordering}; - -fn main() { - // Turn on panicking if we allocate on the heap - DO_PANIC.store(true, Ordering::SeqCst); - - // Interesting bit happens here - let x: Vec = Vec::new(); - drop(x); - - // Turn panicking back off, some deallocations occur - // after main as well. - DO_PANIC.store(false, Ordering::SeqCst); -} - -#[global_allocator] -static A: PanicAllocator = PanicAllocator; -static DO_PANIC: AtomicBool = AtomicBool::new(false); -struct PanicAllocator; - -unsafe impl GlobalAlloc for PanicAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if DO_PANIC.load(Ordering::SeqCst) { - panic!("Unexpected allocation."); - } - System.alloc(layout) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if DO_PANIC.load(Ordering::SeqCst) { - panic!("Unexpected deallocation."); - } - System.dealloc(ptr, layout); - } -} -``` - --- -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=831a297d176d015b1f9ace01ae416cc6) - -Other standard library types follow the same behavior; make sure to check out -[`HashMap::new()`](https://doc.rust-lang.org/std/collections/hash_map/struct.HashMap.html#method.new), -and [`String::new()`](https://doc.rust-lang.org/std/string/struct.String.html#method.new). - -# Heap Alternatives - -While it is a bit strange to speak of the stack after spending time with the heap, it's worth -pointing out that some heap-allocated objects in Rust have stack-based counterparts provided by -other crates. If you have need of the functionality, but want to avoid allocating, there are -typically alternatives available. - -When it comes to some standard library smart pointers -([`RwLock`](https://doc.rust-lang.org/std/sync/struct.RwLock.html) and -[`Mutex`](https://doc.rust-lang.org/std/sync/struct.Mutex.html)), stack-based alternatives are -provided in crates like [parking_lot](https://crates.io/crates/parking_lot) and -[spin](https://crates.io/crates/spin). You can check out -[`lock_api::RwLock`](https://docs.rs/lock_api/0.1.5/lock_api/struct.RwLock.html), -[`lock_api::Mutex`](https://docs.rs/lock_api/0.1.5/lock_api/struct.Mutex.html), and -[`spin::Once`](https://mvdnes.github.io/rust-docs/spin-rs/spin/struct.Once.html) if you're in need -of synchronization primitives. - -[thread_id](https://crates.io/crates/thread-id) may be necessary if you're implementing an allocator -because [`thread::current().id()`](https://doc.rust-lang.org/std/thread/struct.ThreadId.html) uses a -[`thread_local!` structure](https://doc.rust-lang.org/stable/src/std/sys_common/thread_info.rs.html#17-36) -that needs heap allocation. - -# Tracing Allocators - -When writing performance-sensitive code, there's no alternative to measuring your code. If you -didn't write a benchmark, -[you don't care about it's performance](https://www.youtube.com/watch?v=2EWejmkKlxs&feature=youtu.be&t=263) -You should never rely on your instincts when -[a microsecond is an eternity](https://www.youtube.com/watch?v=NH1Tta7purM). - -Similarly, there's great work going on in Rust with allocators that keep track of what they're doing -(like [`alloc_counter`](https://crates.io/crates/alloc_counter)). When it comes to tracking heap -behavior, it's easy to make mistakes; please write tests and make sure you have tools to guard -against future issues. diff --git a/_posts/2019-02-08-compiler-optimizations.md b/_posts/2019-02-08-compiler-optimizations.md deleted file mode 100644 index 4b8b385..0000000 --- a/_posts/2019-02-08-compiler-optimizations.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: post -title: "Compiler Optimizations: What It's Done Lately" -description: "A lot. The answer is a lot." -category: -tags: [rust, understanding-allocations] ---- - -**Update 2019-02-10**: When debugging a -[related issue](https://gitlab.com/sio4/code/alloc-counter/issues/1), it was discovered that the -original code worked because LLVM optimized out the entire function, rather than just the allocation -segments. The code has been updated with proper use of -[`read_volatile`](https://doc.rust-lang.org/std/ptr/fn.read_volatile.html), and a previous section -on vector capacity has been removed. - ---- - -Up to this point, we've been discussing memory usage in the Rust language by focusing on simple -rules that are mostly right for small chunks of code. We've spent time showing how those rules work -themselves out in practice, and become familiar with reading the assembly code needed to see each -memory type (global, stack, heap) in action. - -Throughout the series so far, we've put a handicap on the code. In the name of consistent and -understandable results, we've asked the compiler to pretty please leave the training wheels on. Now -is the time where we throw out all the rules and take off the kid gloves. As it turns out, both the -Rust compiler and the LLVM optimizers are incredibly sophisticated, and we'll step back and let them -do their job. - -Similar to -["What Has My Compiler Done For Me Lately?"](https://www.youtube.com/watch?v=bSkpMdDe4g4), we're -focusing on interesting things the Rust language (and LLVM!) can do with memory management. We'll -still be looking at assembly code to understand what's going on, but it's important to mention -again: **please use automated tools like [alloc-counter](https://crates.io/crates/alloc_counter) to -double-check memory behavior if it's something you care about**. It's far too easy to mis-read -assembly in large code sections, you should always verify behavior if you care about memory usage. - -The guiding principal as we move forward is this: _optimizing compilers won't produce worse programs -than we started with._ There won't be any situations where stack allocations get moved to heap -allocations. There will, however, be an opera of optimization. - -# The Case of the Disappearing Box - -Our first optimization comes when LLVM can reason that the lifetime of an object is sufficiently -short that heap allocations aren't necessary. In these cases, LLVM will move the allocation to the -stack instead! The way this interacts with `#[inline]` attributes is a bit opaque, but the important -part is that LLVM can sometimes do better than the baseline Rust language: - -```rust -use std::alloc::{GlobalAlloc, Layout, System}; -use std::sync::atomic::{AtomicBool, Ordering}; - -pub fn cmp(x: u32) { - // Turn on panicking if we allocate on the heap - DO_PANIC.store(true, Ordering::SeqCst); - - // The compiler is able to see through the constant `Box` - // and directly compare `x` to 24 - assembly line 73 - let y = Box::new(24); - let equals = x == *y; - - // This call to drop is eliminated - drop(y); - - // Need to mark the comparison result as volatile so that - // LLVM doesn't strip out all the code. If `y` is marked - // volatile instead, allocation will be forced. - unsafe { std::ptr::read_volatile(&equals) }; - - // Turn off panicking, as there are some deallocations - // when we exit main. - DO_PANIC.store(false, Ordering::SeqCst); -} - -fn main() { - cmp(12) -} - -#[global_allocator] -static A: PanicAllocator = PanicAllocator; -static DO_PANIC: AtomicBool = AtomicBool::new(false); -struct PanicAllocator; - -unsafe impl GlobalAlloc for PanicAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if DO_PANIC.load(Ordering::SeqCst) { - panic!("Unexpected allocation."); - } - System.alloc(layout) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if DO_PANIC.load(Ordering::SeqCst) { - panic!("Unexpected deallocation."); - } - System.dealloc(ptr, layout); - } -} -``` - -## -- [Compiler Explorer](https://godbolt.org/z/BZ_Yp3) - -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=release&edition=2018&gist=4a765f753183d5b919f62c71d2109d5d) - -# Dr. Array or: How I Learned to Love the Optimizer - -Finally, this isn't so much about LLVM figuring out different memory behavior, but LLVM stripping -out code that doesn't do anything. Optimizations of this type have a lot of nuance to them; if -you're not careful, they can make your benchmarks look -[impossibly good](https://www.youtube.com/watch?v=nXaxk27zwlk&feature=youtu.be&t=1199). In Rust, the -`black_box` function (implemented in both -[`libtest`](https://doc.rust-lang.org/1.1.0/test/fn.black_box.html) and -[`criterion`](https://docs.rs/criterion/0.2.10/criterion/fn.black_box.html)) will tell the compiler -to disable this kind of optimization. But if you let LLVM remove unnecessary code, you can end up -running programs that previously caused errors: - -```rust -#[derive(Default)] -struct TwoFiftySix { - _a: [u64; 32] -} - -#[derive(Default)] -struct EightK { - _a: [TwoFiftySix; 32] -} - -#[derive(Default)] -struct TwoFiftySixK { - _a: [EightK; 32] -} - -#[derive(Default)] -struct EightM { - _a: [TwoFiftySixK; 32] -} - -pub fn main() { - // Normally this blows up because we can't reserve size on stack - // for the `EightM` struct. But because the compiler notices we - // never do anything with `_x`, it optimizes out the stack storage - // and the program completes successfully. - let _x = EightM::default(); -} -``` - -## -- [Compiler Explorer](https://godbolt.org/z/daHn7P) - -[Rust Playground](https://play.rust-lang.org/?version=stable&mode=release&edition=2018&gist=4c253bf26072119896ab93c6ef064dc0) diff --git a/_posts/2019-02-09-summary.md b/_posts/2019-02-09-summary.md deleted file mode 100644 index dd7f06d..0000000 --- a/_posts/2019-02-09-summary.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -layout: post -title: "Summary: What are the Allocation Rules?" -description: "A synopsis and reference." -category: -tags: [rust, understanding-allocations] ---- - -While there's a lot of interesting detail captured in this series, it's often helpful to have a -document that answers some "yes/no" questions. You may not care about what an `Iterator` looks like -in assembly, you just need to know whether it allocates an object on the heap or not. And while Rust -will prioritize the fastest behavior it can, here are the rules for each memory type: - -**Heap Allocation**: - -- Smart pointers (`Box`, `Rc`, `Mutex`, etc.) allocate their contents in heap memory. -- Collections (`HashMap`, `Vec`, `String`, etc.) allocate their contents in heap memory. -- Some smart pointers in the standard library have counterparts in other crates that don't need heap - memory. If possible, use those. - -**Stack Allocation**: - -- Everything not using a smart pointer will be allocated on the stack. -- Structs, enums, iterators, arrays, and closures are all stack allocated. -- Cell types (`RefCell`) behave like smart pointers, but are stack-allocated. -- Inlining (`#[inline]`) will not affect allocation behavior for better or worse. -- Types that are marked `Copy` are guaranteed to have their contents stack-allocated. - -**Global Allocation**: - -- `const` is a fixed value; the compiler is allowed to copy it wherever useful. -- `static` is a fixed reference; the compiler will guarantee it is unique. - -![Container Sizes in Rust](/assets/images/2019-02-04-container-size.svg) -- -[Raph Levien](https://docs.google.com/presentation/d/1q-c7UAyrUlM-eZyTo1pd8SZ0qwA_wYxmPZVOQkoDmH4/edit?usp=sharing) diff --git a/_posts/2019-05-03-making-bread.md b/_posts/2019-05-03-making-bread.md deleted file mode 100644 index de794e0..0000000 --- a/_posts/2019-05-03-making-bread.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -layout: post -title: "Making Bread" -description: "...because I've got some free time now. 🍞" -category: -tags: [baking] ---- - -Having recently started my "gardening leave" between positions, I have some more personal time -available. I'm planning to stay productive, contributing to some open-source projects, but it also -occurred to me that despite [talking about](https://speice.io/2018/05/hello.html) bread pics, this -blog has been purely technical. Maybe I'll change the site title from "The Old Speice Guy" to "Bites -and Bytes"? - -Either way, I'm baking a little bit again, and figured it was worth taking a quick break to focus on -some lighter material. I recently learned two critically important lessons: first, the temperature -of the dough when you put the yeast in makes a huge difference. - -Previously, when I wasn't paying attention to dough temperature: - -![Whole weat dough](/assets/images/2019-05-03-making-bread/whole-wheat-not-rising.jpg) - -Compared with what happens when I put the dough in the microwave for a defrost cycle because the -water I used wasn't warm enough: - -![White dough](/assets/images/2019-05-03-making-bread/white-dough-rising-before-fold.jpg) - -I mean, just look at the bubbles! - -![White dough with bubbles](/assets/images/2019-05-03-making-bread/white-dough-rising-after-fold.jpg) - -After shaping the dough, I've got two loaves ready: - -![Shaped loaves](/assets/images/2019-05-03-making-bread/shaped-loaves.jpg) - -Now, the recipe normally calls for a Dutch Oven to bake the bread because it keeps the dough from -drying out in the oven. Because I don't own a Dutch Oven, I typically put a casserole dish on the -bottom rack and fill it with water so there's still some moisture in the oven. This time, I forgot -to add the water and learned my second lesson: never add room-temperature water to a glass dish -that's currently at 500 degrees. - -![Shattered glass dish](/assets/images/2019-05-03-making-bread/shattered-glass.jpg) - -Needless to say, trying to pull out sharp glass from an incredibly hot oven is not what I expected -to be doing during my garden leave. - -In the end, the bread crust wasn't great, but the bread itself turned out pretty alright: - -![Baked bread](/assets/images/2019-05-03-making-bread/final-product.jpg) - -I've been writing a lot more during this break, so I'm looking forward to sharing that in the -future. In the mean-time, I'm planning on making a sandwich. diff --git a/_posts/2019-06-31-high-performance-systems.md b/_posts/2019-06-31-high-performance-systems.md deleted file mode 100644 index 23ef44b..0000000 --- a/_posts/2019-06-31-high-performance-systems.md +++ /dev/null @@ -1,296 +0,0 @@ ---- -layout: post -title: "On Building High Performance Systems" -description: "" -category: -tags: [] ---- - -**Update 2019-09-21**: Added notes on `isolcpus` and `systemd` affinity. - -Prior to working in the trading industry, my assumption was that High Frequency Trading (HFT) is -made up of people who have access to secret techniques mortal developers could only dream of. There -had to be some secret art that could only be learned if one had an appropriately tragic backstory: - -kung-fu fight -> How I assumed HFT people learn their secret techniques - -How else do you explain people working on systems that complete the round trip of market data in to -orders out (a.k.a. tick-to-trade) consistently within -[750-800 nanoseconds](https://stackoverflow.com/a/22082528/1454178)? In roughly the time it takes a -computer to access -[main memory 8 times](https://people.eecs.berkeley.edu/~rcs/research/interactive_latency.html), -trading systems are capable of reading the market data packets, deciding what orders to send, doing -risk checks, creating new packets for exchange-specific protocols, and putting those packets on the -wire. - -Having now worked in the trading industry, I can confirm the developers aren't super-human; I've -made some simple mistakes at the very least. Instead, what shows up in public discussions is that -philosophy, not technique, separates high-performance systems from everything else. -Performance-critical systems don't rely on "this one cool C++ optimization trick" to make code fast -(though micro-optimizations have their place); there's a lot more to worry about than just the code -written for the project. - -The framework I'd propose is this: **If you want to build high-performance systems, focus first on -reducing performance variance** (reducing the gap between the fastest and slowest runs of the same -code), **and only look at average latency once variance is at an acceptable level**. - -Don't get me wrong, I'm a much happier person when things are fast. Computer goes from booting in 20 -seconds down to 10 because I installed a solid-state drive? Awesome. But if every fifth day it takes -a full minute to boot because of corrupted sectors? Not so great. Average speed over the course of a -week is the same in each situation, but you're painfully aware of that minute when it happens. When -it comes to code, the principal is the same: speeding up a function by an average of 10 milliseconds -doesn't mean much if there's a 100ms difference between your fastest and slowest runs. When -performance matters, you need to respond quickly _every time_, not just in aggregate. -High-performance systems should first optimize for time variance. Once you're consistent at the time -scale you care about, then focus on improving average time. - -This focus on variance shows up all the time in industry too (emphasis added in all quotes below): - -- In [marketing materials](https://business.nasdaq.com/market-tech/marketplaces/trading) for - NASDAQ's matching engine, the most performance-sensitive component of the exchange, dependability - is highlighted in addition to instantaneous metrics: - - > Able to **consistently sustain** an order rate of over 100,000 orders per second at sub-40 - > microsecond average latency - -- The [Aeron](https://github.com/real-logic/aeron) message bus has this to say about performance: - - > Performance is the key focus. Aeron is designed to be the highest throughput with the lowest and - > **most predictable latency possible** of any messaging system - -- The company PolySync, which is working on autonomous vehicles, - [mentions why](https://polysync.io/blog/session-types-for-hearty-codecs/) they picked their - specific messaging format: - - > In general, high performance is almost always desirable for serialization. But in the world of - > autonomous vehicles, **steady timing performance is even more important** than peak throughput. - > This is because safe operation is sensitive to timing outliers. Nobody wants the system that - > decides when to slam on the brakes to occasionally take 100 times longer than usual to encode - > its commands. - -- [Solarflare](https://solarflare.com/), which makes highly-specialized network hardware, points out - variance (jitter) as a big concern for - [electronic trading](https://solarflare.com/electronic-trading/): - > The high stakes world of electronic trading, investment banks, market makers, hedge funds and - > exchanges demand the **lowest possible latency and jitter** while utilizing the highest - > bandwidth and return on their investment. - -And to further clarify: we're not discussing _total run-time_, but variance of total run-time. There -are situations where it's not reasonably possible to make things faster, and you'd much rather be -consistent. For example, trading firms use -[wireless networks](https://sniperinmahwah.wordpress.com/2017/06/07/network-effects-part-i/) because -the speed of light through air is faster than through fiber-optic cables. There's still at _absolute -minimum_ a [~33.76 millisecond](http://tinyurl.com/y2vd7tn8) delay required to send data between, -say, -[Chicago and Tokyo](https://www.theice.com/market-data/connectivity-and-feeds/wireless/tokyo-chicago). -If a trading system in Chicago calls the function for "send order to Tokyo" and waits to see if a -trade occurs, there's a physical limit to how long that will take. In this situation, the focus is -on keeping variance of _additional processing_ to a minimum, since speed of light is the limiting -factor. - -So how does one go about looking for and eliminating performance variance? To tell the truth, I -don't think a systematic answer or flow-chart exists. There's no substitute for (A) building a deep -understanding of the entire technology stack, and (B) actually measuring system performance (though -(C) watching a lot of [CppCon](https://www.youtube.com/channel/UCMlGfpWw-RUdWX_JbLCukXg) videos for -inspiration never hurt). Even then, every project cares about performance to a different degree; you -may need to build an entire -[replica production system](https://www.youtube.com/watch?v=NH1Tta7purM&feature=youtu.be&t=3015) to -accurately benchmark at nanosecond precision, or you may be content to simply -[avoid garbage collection](https://www.youtube.com/watch?v=BD9cRbxWQx8&feature=youtu.be&t=1335) in -your Java code. - -Even though everyone has different needs, there are still common things to look for when trying to -isolate and eliminate variance. In no particular order, these are my focus areas when thinking about -high-performance systems: - -## Language-specific - -**Garbage Collection**: How often does garbage collection happen? When is it triggered? What are the -impacts? - -- [In Python](https://rushter.com/blog/python-garbage-collector/), individual objects are collected - if the reference count reaches 0, and each generation is collected if - `num_alloc - num_dealloc > gc_threshold` whenever an allocation happens. The GIL is acquired for - the duration of generational collection. -- Java has - [many](https://docs.oracle.com/en/java/javase/12/gctuning/parallel-collector1.html#GUID-DCDD6E46-0406-41D1-AB49-FB96A50EB9CE) - [different](https://docs.oracle.com/en/java/javase/12/gctuning/garbage-first-garbage-collector.html#GUID-ED3AB6D3-FD9B-4447-9EDF-983ED2F7A573) - [collection](https://docs.oracle.com/en/java/javase/12/gctuning/garbage-first-garbage-collector-tuning.html#GUID-90E30ACA-8040-432E-B3A0-1E0440AB556A) - [algorithms](https://docs.oracle.com/en/java/javase/12/gctuning/z-garbage-collector1.html#GUID-A5A42691-095E-47BA-B6DC-FB4E5FAA43D0) - to choose from, each with different characteristics. The default algorithms (Parallel GC in Java - 8, G1 in Java 9) freeze the JVM while collecting, while more recent algorithms - ([ZGC](https://wiki.openjdk.java.net/display/zgc) and - [Shenandoah](https://wiki.openjdk.java.net/display/shenandoah)) are designed to keep "stop the - world" to a minimum by doing collection work in parallel. - -**Allocation**: Every language has a different way of interacting with "heap" memory, but the -principle is the same: running the allocator to allocate/deallocate memory takes time that can often -be put to better use. Understanding when your language interacts with the allocator is crucial, and -not always obvious. For example: C++ and Rust don't allocate heap memory for iterators, but Java -does (meaning potential GC pauses). Take time to understand heap behavior (I made a -[a guide for Rust](/2019/02/understanding-allocations-in-rust.html)), and look into alternative -allocators ([jemalloc](http://jemalloc.net/), -[tcmalloc](https://gperftools.github.io/gperftools/tcmalloc.html)) that might run faster than the -operating system default. - -**Data Layout**: How your data is arranged in memory matters; -[data-oriented design](https://www.youtube.com/watch?v=yy8jQgmhbAU) and -[cache locality](https://www.youtube.com/watch?v=2EWejmkKlxs&feature=youtu.be&t=1185) can have huge -impacts on performance. The C family of languages (C, value types in C#, C++) and Rust all have -guarantees about the shape every object takes in memory that others (e.g. Java and Python) can't -make. [Cachegrind](http://valgrind.org/docs/manual/cg-manual.html) and kernel -[perf](https://perf.wiki.kernel.org/index.php/Main_Page) counters are both great for understanding -how performance relates to memory layout. - -**Just-In-Time Compilation**: Languages that are compiled on the fly (LuaJIT, C#, Java, PyPy) are -great because they optimize your program for how it's actually being used, rather than how a -compiler expects it to be used. However, there's a variance problem if the program stops executing -while waiting for translation from VM bytecode to native code. As a remedy, many languages support -ahead-of-time compilation in addition to the JIT versions -([CoreRT](https://github.com/dotnet/corert) in C# and [GraalVM](https://www.graalvm.org/) in Java). -On the other hand, LLVM supports -[Profile Guided Optimization](https://clang.llvm.org/docs/UsersManual.html#profile-guided-optimization), -which theoretically brings JIT benefits to non-JIT languages. Finally, be careful to avoid comparing -apples and oranges during benchmarks; you don't want your code to suddenly speed up because the JIT -compiler kicked in. - -**Programming Tricks**: These won't make or break performance, but can be useful in specific -circumstances. For example, C++ can use -[templates instead of branches](https://www.youtube.com/watch?v=NH1Tta7purM&feature=youtu.be&t=1206) -in critical sections. - -## Kernel - -Code you wrote is almost certainly not the _only_ code running on your hardware. There are many ways -the operating system interacts with your program, from interrupts to system calls, that are -important to watch for. These are written from a Linux perspective, but Windows does typically have -equivalent functionality. - -**Scheduling**: The kernel is normally free to schedule any process on any core, so it's important -to reserve CPU cores exclusively for the important programs. There are a few parts to this: first, -limit the CPU cores that non-critical processes are allowed to run on by excluding cores from -scheduling -([`isolcpus`](https://www.linuxtopia.org/online_books/linux_kernel/kernel_configuration/re46.html) -kernel command-line option), or by setting the `init` process CPU affinity -([`systemd` example](https://access.redhat.com/solutions/2884991)). Second, set critical processes -to run on the isolated cores by setting the -[processor affinity](https://en.wikipedia.org/wiki/Processor_affinity) using -[taskset](https://linux.die.net/man/1/taskset). Finally, use -[`NO_HZ`](https://github.com/torvalds/linux/blob/master/Documentation/timers/NO_HZ.txt) or -[`chrt`](https://linux.die.net/man/1/chrt) to disable scheduling interrupts. Turning off -hyper-threading is also likely beneficial. - -**System calls**: Reading from a UNIX socket? Writing to a file? In addition to not knowing how long -the I/O operation takes, these all trigger expensive -[system calls (syscalls)](https://en.wikipedia.org/wiki/System_call). To handle these, the CPU must -[context switch](https://en.wikipedia.org/wiki/Context_switch) to the kernel, let the kernel -operation complete, then context switch back to your program. We'd rather keep these -[to a minimum](https://www.destroyallsoftware.com/talks/the-birth-and-death-of-javascript) (see -timestamp 18:20). [Strace](https://linux.die.net/man/1/strace) is your friend for understanding when -and where syscalls happen. - -**Signal Handling**: Far less likely to be an issue, but signals do trigger a context switch if your -code has a handler registered. This will be highly dependent on the application, but you can -[block signals](https://www.linuxprogrammingblog.com/all-about-linux-signals?page=show#Blocking_signals) -if it's an issue. - -**Interrupts**: System interrupts are how devices connected to your computer notify the CPU that -something has happened. The CPU will then choose a processor core to pause and context switch to the -OS to handle the interrupt. Make sure that -[SMP affinity](http://www.alexonlinux.com/smp-affinity-and-proper-interrupt-handling-in-linux) is -set so that interrupts are handled on a CPU core not running the program you care about. - -**[NUMA](https://www.kernel.org/doc/html/latest/vm/numa.html)**: While NUMA is good at making -multi-cell systems transparent, there are variance implications; if the kernel moves a process -across nodes, future memory accesses must wait for the controller on the original node. Use -[numactl](https://linux.die.net/man/8/numactl) to handle memory-/cpu-cell pinning so this doesn't -happen. - -## Hardware - -**CPU Pipelining/Speculation**: Speculative execution in modern processors gave us vulnerabilities -like Spectre, but it also gave us performance improvements like -[branch prediction](https://stackoverflow.com/a/11227902/1454178). And if the CPU mis-speculates -your code, there's variance associated with rewind and replay. While the compiler knows a lot about -how your CPU [pipelines instructions](https://youtu.be/nAbCKa0FzjQ?t=4467), code can be -[structured to help](https://www.youtube.com/watch?v=NH1Tta7purM&feature=youtu.be&t=755) the branch -predictor. - -**Paging**: For most systems, virtual memory is incredible. Applications live in their own worlds, -and the CPU/[MMU](https://en.wikipedia.org/wiki/Memory_management_unit) figures out the details. -However, there's a variance penalty associated with memory paging and caching; if you access more -memory pages than the [TLB](https://en.wikipedia.org/wiki/Translation_lookaside_buffer) can store, -you'll have to wait for the page walk. Kernel perf tools are necessary to figure out if this is an -issue, but using [huge pages](https://blog.pythian.com/performance-tuning-hugepages-in-linux/) can -reduce TLB burdens. Alternately, running applications in a hypervisor like -[Jailhouse](https://github.com/siemens/jailhouse) allows one to skip virtual memory entirely, but -this is probably more work than the benefits are worth. - -**Network Interfaces**: When more than one computer is involved, variance can go up dramatically. -Tuning kernel -[network parameters](https://github.com/leandromoreira/linux-network-performance-parameters) may be -helpful, but modern systems more frequently opt to skip the kernel altogether with a technique -called [kernel bypass](https://blog.cloudflare.com/kernel-bypass/). This typically requires -specialized hardware and [drivers](https://www.openonload.org/), but even industries like -[telecom](https://www.bbc.co.uk/rd/blog/2018-04-high-speed-networking-open-source-kernel-bypass) are -finding the benefits. - -## Networks - -**Routing**: There's a reason financial firms are willing to pay -[millions of euros](https://sniperinmahwah.wordpress.com/2019/03/26/4-les-moeres-english-version/) -for rights to a small plot of land - having a straight-line connection from point A to point B means -the path their data takes is the shortest possible. In contrast, there are currently 6 computers in -between me and Google, but that may change at any moment if my ISP realizes a -[more efficient route](https://en.wikipedia.org/wiki/Border_Gateway_Protocol) is available. Whether -it's using -[research-quality equipment](https://sniperinmahwah.wordpress.com/2018/05/07/shortwave-trading-part-i-the-west-chicago-tower-mystery/) -for shortwave radio, or just making sure there's no data inadvertently going between data centers, -routing matters. - -**Protocol**: TCP as a network protocol is awesome: guaranteed and in-order delivery, flow control, -and congestion control all built in. But these attributes make the most sense when networking -infrastructure is lossy; for systems that expect nearly all packets to be delivered correctly, the -setup handshaking and packet acknowledgment are just overhead. Using UDP (unicast or multicast) may -make sense in these contexts as it avoids the chatter needed to track connection state, and -[gap-fill](https://iextrading.com/docs/IEX%20Transport%20Specification.pdf) -[strategies](http://www.nasdaqtrader.com/content/technicalsupport/specifications/dataproducts/moldudp64.pdf) -can handle the rest. - -**Switching**: Many routers/switches handle packets using "store-and-forward" behavior: wait for the -whole packet, validate checksums, and then send to the next device. In variance terms, the time -needed to move data between two nodes is proportional to the size of that data; the switch must -"store" all data before it can calculate checksums and "forward" to the next node. With -["cut-through"](https://www.networkworld.com/article/2241573/latency-and-jitter--cut-through-design-pays-off-for-arista--blade.html) -designs, switches will begin forwarding data as soon as they know where the destination is, -checksums be damned. This means there's a fixed cost (at the switch) for network traffic, no matter -the size. - -# Final Thoughts - -High-performance systems, regardless of industry, are not magical. They do require extreme precision -and attention to detail, but they're designed, built, and operated by regular people, using a lot of -tools that are publicly available. Interested in seeing how context switching affects performance of -your benchmarks? `taskset` should be installed in all modern Linux distributions, and can be used to -make sure the OS never migrates your process. Curious how often garbage collection triggers during a -crucial operation? Your language of choice will typically expose details of its operations -([Python](https://docs.python.org/3/library/gc.html), -[Java](https://www.oracle.com/technetwork/java/javase/tech/vmoptions-jsp-140102.html#DebuggingOptions)). -Want to know how hard your program is stressing the TLB? Use `perf record` and look for -`dtlb_load_misses.miss_causes_a_walk`. - -Two final guiding questions, then: first, before attempting to apply some of the technology above to -your own systems, can you first identify -[where/when you care](http://wiki.c2.com/?PrematureOptimization) about "high-performance"? As an -example, if parts of a system rely on humans pushing buttons, CPU pinning won't have any measurable -effect. Humans are already far too slow to react in time. Second, if you're using benchmarks, are -they being designed in a way that's actually helpful? Tools like -[Criterion](http://www.serpentine.com/criterion/) (also in -[Rust](https://github.com/bheisler/criterion.rs)) and Google's -[Benchmark](https://github.com/google/benchmark) output not only average run time, but variance as -well; your benchmarking environment is subject to the same concerns your production environment is. - -Finally, I believe high-performance systems are a matter of philosophy, not necessarily technique. -Rigorous focus on variance is the first step, and there are plenty of ways to measure and mitigate -it; once that's at an acceptable level, then optimize for speed. diff --git a/_posts/2019-09-28-binary-format-shootout.md b/_posts/2019-09-28-binary-format-shootout.md deleted file mode 100644 index 675dc37..0000000 --- a/_posts/2019-09-28-binary-format-shootout.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -layout: post -title: "Binary Format Shootout" -description: "Cap'n Proto vs. Flatbuffers vs. SBE" -category: -tags: [rust] ---- - -I've found that in many personal projects, -[analysis paralysis](https://en.wikipedia.org/wiki/Analysis_paralysis) is particularly deadly. -Making good decisions in the beginning avoids pain and suffering later; if extra research prevents -future problems, I'm happy to continue ~~procrastinating~~ researching indefinitely. - -So let's say you're in need of a binary serialization format. Data will be going over the network, -not just in memory, so having a schema document and code generation is a must. Performance is -crucial, so formats that support zero-copy de/serialization are given priority. And the more -languages supported, the better; I use Rust, but can't predict what other languages this could -interact with. - -Given these requirements, the candidates I could find were: - -1. [Cap'n Proto](https://capnproto.org/) has been around the longest, and is the most established -2. [Flatbuffers](https://google.github.io/flatbuffers/) is the newest, and claims to have a simpler - encoding -3. [Simple Binary Encoding](https://github.com/real-logic/simple-binary-encoding) has the simplest - encoding, but the Rust implementation is unmaintained - -Any one of these will satisfy the project requirements: easy to transmit over a network, reasonably -fast, and polyglot support. But how do you actually pick one? It's impossible to know what issues -will follow that choice, so I tend to avoid commitment until the last possible moment. - -Still, a choice must be made. Instead of worrying about which is "the best," I decided to build a -small proof-of-concept system in each format and pit them against each other. All code can be found -in the [repository](https://github.com/speice-io/marketdata-shootout) for this post. - -We'll discuss more in detail, but a quick preview of the results: - -- Cap'n Proto: Theoretically performs incredibly well, the implementation had issues -- Flatbuffers: Has some quirks, but largely lived up to its "zero-copy" promises -- SBE: Best median and worst-case performance, but the message structure has a limited feature set - -# Prologue: Binary Parsing with Nom - -Our benchmark system will be a simple data processor; given depth-of-book market data from -[IEX](https://iextrading.com/trading/market-data/#deep), serialize each message into the schema -format, read it back, and calculate total size of stock traded and the lowest/highest quoted prices. -This test isn't complex, but is representative of the project I need a binary format for. - -But before we make it to that point, we have to actually read in the market data. To do so, I'm -using a library called [`nom`](https://github.com/Geal/nom). Version 5.0 was recently released and -brought some big changes, so this was an opportunity to build a non-trivial program and get -familiar. - -If you don't already know about `nom`, it's a "parser generator". By combining different smaller -parsers, you can assemble a parser to handle complex structures without writing tedious code by -hand. For example, when parsing -[PCAP files](https://www.winpcap.org/ntar/draft/PCAP-DumpFileFormat.html#rfc.section.3.3): - -``` - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +---------------------------------------------------------------+ - 0 | Block Type = 0x00000006 | - +---------------------------------------------------------------+ - 4 | Block Total Length | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - 8 | Interface ID | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -12 | Timestamp (High) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -16 | Timestamp (Low) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -20 | Captured Len | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -24 | Packet Len | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Packet Data | - | ... | -``` - -...you can build a parser in `nom` that looks like -[this](https://github.com/speice-io/marketdata-shootout/blob/369613843d39cfdc728e1003123bf87f79422497/src/parsers.rs#L59-L93): - -```rust -const ENHANCED_PACKET: [u8; 4] = [0x06, 0x00, 0x00, 0x00]; -pub fn enhanced_packet_block(input: &[u8]) -> IResult<&[u8], &[u8]> { - let ( - remaining, - ( - block_type, - block_len, - interface_id, - timestamp_high, - timestamp_low, - captured_len, - packet_len, - ), - ) = tuple(( - tag(ENHANCED_PACKET), - le_u32, - le_u32, - le_u32, - le_u32, - le_u32, - le_u32, - ))(input)?; - - let (remaining, packet_data) = take(captured_len)(remaining)?; - Ok((remaining, packet_data)) -} -``` - -While this example isn't too interesting, more complex formats (like IEX market data) are where -[`nom` really shines](https://github.com/speice-io/marketdata-shootout/blob/369613843d39cfdc728e1003123bf87f79422497/src/iex.rs). - -Ultimately, because the `nom` code in this shootout was the same for all formats, we're not too -interested in its performance. Still, it's worth mentioning that building the market data parser was -actually fun; I didn't have to write tons of boring code by hand. - -# Part 1: Cap'n Proto - -Now it's time to get into the meaty part of the story. Cap'n Proto was the first format I tried -because of how long it has supported Rust (thanks to [dwrensha](https://github.com/dwrensha) for -maintaining the Rust port since -[2014!](https://github.com/capnproto/capnproto-rust/releases/tag/rustc-0.10)). However, I had a ton -of performance concerns once I started using it. - -To serialize new messages, Cap'n Proto uses a "builder" object. This builder allocates memory on the -heap to hold the message content, but because builders -[can't be re-used](https://github.com/capnproto/capnproto-rust/issues/111), we have to allocate a -new buffer for every single message. I was able to work around this with a -[special builder](https://github.com/speice-io/marketdata-shootout/blob/369613843d39cfdc728e1003123bf87f79422497/src/capnp_runner.rs#L17-L51) -that could re-use the buffer, but it required reading through Cap'n Proto's -[benchmarks](https://github.com/capnproto/capnproto-rust/blob/master/benchmark/benchmark.rs#L124-L156) -to find an example, and used -[`std::mem::transmute`](https://doc.rust-lang.org/std/mem/fn.transmute.html) to bypass Rust's borrow -checker. - -The process of reading messages was better, but still had issues. Cap'n Proto has two message -encodings: a ["packed"](https://capnproto.org/encoding.html#packing) representation, and an -"unpacked" version. When reading "packed" messages, we need a buffer to unpack the message into -before we can use it; Cap'n Proto allocates a new buffer for each message we unpack, and I wasn't -able to figure out a way around that. In contrast, the unpacked message format should be where Cap'n -Proto shines; its main selling point is that there's [no decoding step](https://capnproto.org/). -However, accomplishing zero-copy deserialization required code in the private API -([since fixed](https://github.com/capnproto/capnproto-rust/issues/148)), and we allocate a vector on -every read for the segment table. - -In the end, I put in significant work to make Cap'n Proto as fast as possible, but there were too -many issues for me to feel comfortable using it long-term. - -# Part 2: Flatbuffers - -This is the new kid on the block. After a -[first attempt](https://github.com/google/flatbuffers/pull/3894) didn't pan out, official support -was [recently launched](https://github.com/google/flatbuffers/pull/4898). Flatbuffers intends to -address the same problems as Cap'n Proto: high-performance, polyglot, binary messaging. The -difference is that Flatbuffers claims to have a simpler wire format and -[more flexibility](https://google.github.io/flatbuffers/flatbuffers_benchmarks.html). - -On the whole, I enjoyed using Flatbuffers; the [tooling](https://crates.io/crates/flatc-rust) is -nice, and unlike Cap'n Proto, parsing messages was actually zero-copy and zero-allocation. However, -there were still some issues. - -First, Flatbuffers (at least in Rust) can't handle nested vectors. This is a problem for formats -like the following: - -``` -table Message { - symbol: string; -} -table MultiMessage { - messages:[Message]; -} -``` - -We want to create a `MultiMessage` which contains a vector of `Message`, and each `Message` itself -contains a vector (the `string` type). I was able to work around this by -[caching `Message` elements](https://github.com/speice-io/marketdata-shootout/blob/e9d07d148bf36a211a6f86802b313c4918377d1b/src/flatbuffers_runner.rs#L83) -in a `SmallVec` before building the final `MultiMessage`, but it was a painful process that I -believe contributed to poor serialization performance. - -Second, streaming support in Flatbuffers seems to be something of an -[afterthought](https://github.com/google/flatbuffers/issues/3898). Where Cap'n Proto in Rust handles -reading messages from a stream as part of the API, Flatbuffers just sticks a `u32` at the front of -each message to indicate the size. Not specifically a problem, but calculating message size without -that tag is nigh on impossible. - -Ultimately, I enjoyed using Flatbuffers, and had to do significantly less work to make it perform -well. - -# Part 3: Simple Binary Encoding - -Support for SBE was added by the author of one of my favorite -[Rust blog posts](https://web.archive.org/web/20190427124806/https://polysync.io/blog/session-types-for-hearty-codecs/). -I've [talked previously]({% post_url 2019-06-31-high-performance-systems %}) about how important -variance is in high-performance systems, so it was encouraging to read about a format that -[directly addressed](https://github.com/real-logic/simple-binary-encoding/wiki/Why-Low-Latency) my -concerns. SBE has by far the simplest binary format, but it does make some tradeoffs. - -Both Cap'n Proto and Flatbuffers use [message offsets](https://capnproto.org/encoding.html#structs) -to handle variable-length data, [unions](https://capnproto.org/language.html#unions), and various -other features. In contrast, messages in SBE are essentially -[just structs](https://github.com/real-logic/simple-binary-encoding/blob/master/sbe-samples/src/main/resources/example-schema.xml); -variable-length data is supported, but there's no union type. - -As mentioned in the beginning, the Rust port of SBE works well, but is -[essentially unmaintained](https://users.rust-lang.org/t/zero-cost-abstraction-frontier-no-copy-low-allocation-ordered-decoding/11515/9). -However, if you don't need union types, and can accept that schemas are XML documents, it's still -worth using. SBE's implementation had the best streaming support of all formats I tested, and -doesn't trigger allocation during de/serialization. - -# Results - -After building a test harness -[for](https://github.com/speice-io/marketdata-shootout/blob/master/src/capnp_runner.rs) -[each](https://github.com/speice-io/marketdata-shootout/blob/master/src/flatbuffers_runner.rs) -[format](https://github.com/speice-io/marketdata-shootout/blob/master/src/sbe_runner.rs), it was -time to actually take them for a spin. I used -[this script](https://github.com/speice-io/marketdata-shootout/blob/master/run_shootout.sh) to run -the benchmarks, and the raw results are -[here](https://github.com/speice-io/marketdata-shootout/blob/master/shootout.csv). All data reported -below is the average of 10 runs on a single day of IEX data. Results were validated to make sure -that each format parsed the data correctly. - -## Serialization - -This test measures, on a -[per-message basis](https://github.com/speice-io/marketdata-shootout/blob/master/src/main.rs#L268-L272), -how long it takes to serialize the IEX message into the desired format and write to a pre-allocated -buffer. - -| Schema | Median | 99th Pctl | 99.9th Pctl | Total | -| :------------------- | :----- | :-------- | :---------- | :----- | -| Cap'n Proto Packed | 413ns | 1751ns | 2943ns | 14.80s | -| Cap'n Proto Unpacked | 273ns | 1828ns | 2836ns | 10.65s | -| Flatbuffers | 355ns | 2185ns | 3497ns | 14.31s | -| SBE | 91ns | 1535ns | 2423ns | 3.91s | - -## Deserialization - -This test measures, on a -[per-message basis](https://github.com/speice-io/marketdata-shootout/blob/master/src/main.rs#L294-L298), -how long it takes to read the previously-serialized message and perform some basic aggregation. The -aggregation code is the same for each format, so any performance differences are due solely to the -format implementation. - -| Schema | Median | 99th Pctl | 99.9th Pctl | Total | -| :------------------- | :----- | :-------- | :---------- | :----- | -| Cap'n Proto Packed | 539ns | 1216ns | 2599ns | 18.92s | -| Cap'n Proto Unpacked | 366ns | 737ns | 1583ns | 12.32s | -| Flatbuffers | 173ns | 421ns | 1007ns | 6.00s | -| SBE | 116ns | 286ns | 659ns | 4.05s | - -# Conclusion - -Building a benchmark turned out to be incredibly helpful in making a decision; because a "union" -type isn't important to me, I can be confident that SBE best addresses my needs. - -While SBE was the fastest in terms of both median and worst-case performance, its worst case -performance was proportionately far higher than any other format. It seems to be that -de/serialization time scales with message size, but I'll need to do some more research to understand -what exactly is going on. diff --git a/_posts/2019-12-14-release-the-gil.md b/_posts/2019-12-14-release-the-gil.md deleted file mode 100644 index 00b47a6..0000000 --- a/_posts/2019-12-14-release-the-gil.md +++ /dev/null @@ -1,370 +0,0 @@ ---- -layout: post -title: "Release the GIL" -description: "Strategies for Parallelism in Python" -category: -tags: [python] ---- - -Complaining about the [Global Interpreter Lock](https://wiki.python.org/moin/GlobalInterpreterLock) -(GIL) seems like a rite of passage for Python developers. It's easy to criticize a design decision -made before multi-core CPU's were widely available, but the fact that it's still around indicates -that it generally works [Good](https://wiki.c2.com/?PrematureOptimization) -[Enough](https://wiki.c2.com/?YouArentGonnaNeedIt). Besides, there are simple and effective -workarounds; it's not hard to start a -[new process](https://docs.python.org/3/library/multiprocessing.html) and use message passing to -synchronize code running in parallel. - -Still, wouldn't it be nice to have more than a single active interpreter thread? In an age of -asynchronicity and _M:N_ threading, Python seems lacking. The ideal scenario is to take advantage of -both Python's productivity and the modern CPU's parallel capabilities. - -Presented below are two strategies for releasing the GIL's icy grip without giving up on what makes -Python a nice language to start with. Bear in mind: these are just the tools, no claim is made about -whether it's a good idea to use them. Very often, unlocking the GIL is an -[XY problem](https://en.wikipedia.org/wiki/XY_problem); you want application performance, and the -GIL seems like an obvious bottleneck. Remember that any gains from running code in parallel come at -the expense of project complexity; messing with the GIL is ultimately messing with Python's memory -model. - -```python -%load_ext Cython -from numba import jit - -N = 1_000_000_000 -``` - -# Cython - -Put simply, [Cython](https://cython.org/) is a programming language that looks a lot like Python, -gets [transpiled](https://en.wikipedia.org/wiki/Source-to-source_compiler) to C/C++, and integrates -well with the [CPython](https://en.wikipedia.org/wiki/CPython) API. It's great for building Python -wrappers to C and C++ libraries, writing optimized code for numerical processing, and tons more. And -when it comes to managing the GIL, there are two special features: - -- The `nogil` - [function annotation](https://cython.readthedocs.io/en/latest/src/userguide/external_C_code.html#declaring-a-function-as-callable-without-the-gil) - asserts that a Cython function is safe to use without the GIL, and compilation will fail if it - interacts with Python in an unsafe manner -- The `with nogil` - [context manager](https://cython.readthedocs.io/en/latest/src/userguide/external_C_code.html#releasing-the-gil) - explicitly unlocks the CPython GIL while active - -Whenever Cython code runs inside a `with nogil` block on a separate thread, the Python interpreter -is unblocked and allowed to continue work elsewhere. We'll define a "busy work" function that -demonstrates this principle in action: - -```python -%%cython - -# Annotating a function with `nogil` indicates only that it is safe -# to call in a `with nogil` block. It *does not* release the GIL. -cdef unsigned long fibonacci(unsigned long n) nogil: - if n <= 1: - return n - - cdef unsigned long a = 0, b = 1, c = 0 - - c = a + b - for _i in range(2, n): - a = b - b = c - c = a + b - - return c - - -def cython_nogil(unsigned long n): - # Explicitly release the GIL while running `fibonacci` - with nogil: - value = fibonacci(n) - - return value - - -def cython_gil(unsigned long n): - # Because the GIL is not explicitly released, it implicitly - # remains acquired when running the `fibonacci` function - return fibonacci(n) -``` - -First, let's time how long it takes Cython to calculate the billionth Fibonacci number: - -```python -%%time -_ = cython_gil(N); -``` - ->
-> CPU times: user 365 ms, sys: 0 ns, total: 365 ms
-> Wall time: 372 ms
-> 
- -```python -%%time -_ = cython_nogil(N); -``` - ->
-> CPU times: user 381 ms, sys: 0 ns, total: 381 ms
-> Wall time: 388 ms
-> 
- -Both versions (with and without GIL) take effectively the same amount of time to run. Even when -running this calculation in parallel on separate threads, it is expected that the run time will -double because only one thread can be active at a time: - -```python -%%time -from threading import Thread - -# Create the two threads to run on -t1 = Thread(target=cython_gil, args=[N]) -t2 = Thread(target=cython_gil, args=[N]) -# Start the threads -t1.start(); t2.start() -# Wait for the threads to finish -t1.join(); t2.join() -``` - ->
-> CPU times: user 641 ms, sys: 5.62 ms, total: 647 ms
-> Wall time: 645 ms
-> 
- -However, if the first thread releases the GIL, the second thread is free to acquire it and run in -parallel: - -```python -%%time - -t1 = Thread(target=cython_nogil, args=[N]) -t2 = Thread(target=cython_gil, args=[N]) -t1.start(); t2.start() -t1.join(); t2.join() -``` - ->
-> CPU times: user 717 ms, sys: 372 µs, total: 718 ms
-> Wall time: 358 ms
-> 
- -Because `user` time represents the sum of processing time on all threads, it doesn't change much. -The ["wall time"](https://en.wikipedia.org/wiki/Elapsed_real_time) has been cut roughly in half -because each function is running simultaneously. - -Keep in mind that the **order in which threads are started** makes a difference! - -```python -%%time - -# Note that the GIL-locked version is started first -t1 = Thread(target=cython_gil, args=[N]) -t2 = Thread(target=cython_nogil, args=[N]) -t1.start(); t2.start() -t1.join(); t2.join() -``` - ->
-> CPU times: user 667 ms, sys: 0 ns, total: 667 ms
-> Wall time: 672 ms
-> 
- -Even though the second thread releases the GIL while running, it can't start until the first has -completed. Thus, the overall runtime is effectively the same as running two GIL-locked threads. - -Finally, be aware that attempting to unlock the GIL from a thread that doesn't own it will crash the -**interpreter**, not just the thread attempting the unlock: - -```python -%%cython - -cdef int cython_recurse(int n) nogil: - if n <= 0: - return 0 - - with nogil: - return cython_recurse(n - 1) - -cython_recurse(2) -``` - ->
-> Fatal Python error: PyEval_SaveThread: NULL tstate
-> 
-> Thread 0x00007f499effd700 (most recent call first):
->   File "/home/bspeice/.virtualenvs/release-the-gil/lib/python3.7/site-packages/ipykernel/parentpoller.py", line 39 in run
->   File "/usr/lib/python3.7/threading.py", line 926 in _bootstrap_inner
->   File "/usr/lib/python3.7/threading.py", line 890 in _bootstrap
-> 
- -In practice, avoiding this issue is simple. First, `nogil` functions probably shouldn't contain -`with nogil` blocks. Second, Cython can -[conditionally acquire/release](https://cython.readthedocs.io/en/latest/src/userguide/external_C_code.html#conditional-acquiring-releasing-the-gil) -the GIL, so these conditions can be used to synchronize access. Finally, Cython's documentation for -[external C code](https://cython.readthedocs.io/en/latest/src/userguide/external_C_code.html#acquiring-and-releasing-the-gil) -contains more detail on how to safely manage the GIL. - -To conclude: use Cython's `nogil` annotation to assert that functions are safe for calling when the -GIL is unlocked, and `with nogil` to actually unlock the GIL and run those functions. - -# Numba - -Like Cython, [Numba](https://numba.pydata.org/) is a "compiled Python." Where Cython works by -compiling a Python-like language to C/C++, Numba compiles Python bytecode _directly to machine code_ -at runtime. Behavior is controlled with a special `@jit` decorator; calling a decorated function -first compiles it to machine code before running. Calling the function a second time re-uses that -machine code unless the argument types have changed. - -Numba works best when a `nopython=True` argument is added to the `@jit` decorator; functions -compiled in [`nopython`](http://numba.pydata.org/numba-doc/latest/user/jit.html?#nopython) mode -avoid the CPython API and have performance comparable to C. Further, adding `nogil=True` to the -`@jit` decorator unlocks the GIL while that function is running. Note that `nogil` and `nopython` -are separate arguments; while it is necessary for code to be compiled in `nopython` mode in order to -release the lock, the GIL will remain locked if `nogil=False` (the default). - -Let's repeat the same experiment, this time using Numba instead of Cython: - -```python -# The `int` type annotation is only for humans and is ignored -# by Numba. -@jit(nopython=True, nogil=True) -def numba_nogil(n: int) -> int: - if n <= 1: - return n - - a = 0 - b = 1 - - c = a + b - for _i in range(2, n): - a = b - b = c - c = a + b - - return c - - -# Run using `nopython` mode to receive a performance boost, -# but GIL remains locked due to `nogil=False` by default. -@jit(nopython=True) -def numba_gil(n: int) -> int: - if n <= 1: - return n - - a = 0 - b = 1 - - c = a + b - for _i in range(2, n): - a = b - b = c - c = a + b - - return c - - -# Call each function once to force compilation; we don't want -# the timing statistics to include how long it takes to compile. -numba_nogil(N) -numba_gil(N); -``` - -We'll perform the same tests as above; first, figure out how long it takes the function to run: - -```python -%%time -_ = numba_gil(N) -``` - ->
-> CPU times: user 253 ms, sys: 258 µs, total: 253 ms
-> Wall time: 251 ms
-> 
- - -Aside: it's not immediately clear why Numba takes ~20% less time to run than Cython for code that should be -effectively identical after compilation. - - -When running two GIL-locked threads, the result (as expected) takes around twice as long to compute: - -```python -%%time -t1 = Thread(target=numba_gil, args=[N]) -t2 = Thread(target=numba_gil, args=[N]) -t1.start(); t2.start() -t1.join(); t2.join() -``` - ->
-> CPU times: user 541 ms, sys: 3.96 ms, total: 545 ms
-> Wall time: 541 ms
-> 
- -But if the GIL-unlocking thread starts first, both threads run in parallel: - -```python -%%time -t1 = Thread(target=numba_nogil, args=[N]) -t2 = Thread(target=numba_gil, args=[N]) -t1.start(); t2.start() -t1.join(); t2.join() -``` - ->
-> CPU times: user 551 ms, sys: 7.77 ms, total: 559 ms
-> Wall time: 279 ms
-> 
- -Just like Cython, starting the GIL-locked thread first leads to poor performance: - -```python -%%time -t1 = Thread(target=numba_gil, args=[N]) -t2 = Thread(target=numba_nogil, args=[N]) -t1.start(); t2.start() -t1.join(); t2.join() -``` - ->
-> CPU times: user 524 ms, sys: 0 ns, total: 524 ms
-> Wall time: 522 ms
-> 
- -Finally, unlike Cython, Numba will unlock the GIL if and only if it is currently acquired; -recursively calling `@jit(nogil=True)` functions is perfectly safe: - -```python -from numba import jit - -@jit(nopython=True, nogil=True) -def numba_recurse(n: int) -> int: - if n <= 0: - return 0 - - return numba_recurse(n - 1) - -numba_recurse(2); -``` - -# Conclusion - -Before finishing, it's important to address pain points that will show up if these techniques are -used in a more realistic project: - -First, code running in a GIL-free context will likely also need non-trivial data structures; -GIL-free functions aren't useful if they're constantly interacting with Python objects whose access -requires the GIL. Cython provides -[extension types](http://docs.cython.org/en/latest/src/tutorial/cdef_classes.html) and Numba -provides a [`@jitclass`](https://numba.pydata.org/numba-doc/dev/user/jitclass.html) decorator to -address this need. - -Second, building and distributing applications that make use of Cython/Numba can be complicated. -Cython packages require running the compiler, (potentially) linking/packaging external dependencies, -and distributing a binary wheel. Numba is generally simpler because the code being distributed is -pure Python, but can be tricky since errors aren't detected until runtime. - -Finally, while unlocking the GIL is often a solution in search of a problem, both Cython and Numba -provide tools to directly manage the GIL when appropriate. This enables true parallelism (not just -[concurrency](https://stackoverflow.com/a/1050257)) that is impossible in vanilla Python. diff --git a/_posts/2022-11-20-webpack-industrial-complex.md b/_posts/2022-11-20-webpack-industrial-complex.md deleted file mode 100644 index 33fe67a..0000000 --- a/_posts/2022-11-20-webpack-industrial-complex.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -layout: post -title: "The webpack industrial complex" -description: "Reflections on a new project" -category: -tags: [webpack, react, vite] ---- - -This started because I wanted to build a synthesizer. Setting a goal of "digital DX7" was ambitious, but I needed something unrelated to the day job. Beyond that, working with audio seemed like a good challenge. I enjoy performance-focused code, and performance problems in audio are conspicuous. Building a web project was an obvious choice because of the web audio API documentation and independence from a large Digital Audio Workstation (DAW). - -The project was soon derailed trying to sort out technical issues unrelated to the original purpose. Finding a resolution was a frustrating journey, and it's still not clear whether those problems were my fault. As a result, I'm writing this to try making sense of it, as a case study/reference material, and to salvage something from the process. - -## Starting strong - -The sole starting requirement was to write everything in TypeScript. Not because of project scale, but because guardrails help with unfamiliar territory. Keeping that in mind, the first question was: how does one start a new project? All I actually need is "compile TypeScript, show it in a browser." - -Create React App (CRA) came to the rescue and the rest of that evening was a joy. My TypeScript/JavaScript skills were rusty, but the online documentation was helpful. I had never understood the appeal of JSX (why put a DOM in JavaScript?) until it made connecting an `onEvent` handler and a function easy. - -Some quick dimensional analysis later and there was a sine wave oscillator playing A=440 through the speakers. I specifically remember thinking "modern browsers are magical." - -## Continuing on - -Now comes the first mistake: I began to worry about "scale" before encountering an actual problem. Rather than rendering audio in the main thread, why not use audio worklets and render in a background thread instead? - -The first sign something was amiss came from the TypeScript compiler errors showing the audio worklet API [was missing](https://github.com/microsoft/TypeScript/issues/28308). After searching out Github issues and (unsuccessfully) tweaking the `.tsconfig` settings, I settled on installing a package and moving on. - -The next problem came from actually using the API. Worklets must load from separate "modules," but it wasn't clear how to guarantee the worklet code stayed separate from the application. I saw recommendations to use `new URL(, import.meta.url)` and it worked! Well, kind of: - -![Browser error](/assets/images/2022-11-20-video_mp2t.png) - -That file has the audio processor code, so why does it get served with `Content-Type: video/mp2t`? - -## Floundering about - -Now comes the second mistake: even though I didn't understand the error, I ignored recommendations to [just use JavaScript](https://hackernoon.com/implementing-audioworklets-with-react-8a80a470474) and stuck by the original TypeScript requirement. - -I tried different project structures. Moving the worklet code to a new folder didn't help, nor did setting up a monorepo and placing it in a new package. - -I tried three different CRA tools - `react-app-rewired`, `craco`, `customize-react-app` - but got the same problem. Each has varying levels of compatibility with recent CRA versions, so it wasn't clear if I had the right solution but implemented it incorrectly. After attempting to eject the application and panicking after seeing the configuration, I abandoned that as well. - -I tried changing the webpack configuration: using [new](https://github.com/webpack/webpack/issues/11543#issuecomment-917673256) [loaders](https://github.com/popelenkow/worker-url), setting [asset rules](https://github.com/webpack/webpack/discussions/14093#discussioncomment-1257149), even [changing how webpack detects worker resources](https://github.com/webpack/webpack/issues/11543#issuecomment-826897590). In hindsight, entry points may have been the answer. But because CRA actively resists attempts to change its webpack configuration, and I couldn't find audio worklet examples in any other framework, I gave up. - -I tried so many application frameworks. Next.js looked like a good candidate, but added its own [bespoke webpack complexity](https://github.com/vercel/next.js/issues/24907) to the existing confusion. Astro had the best "getting started" experience, but I refuse to install an IDE-specific plugin. I first used Deno while exploring Lume, but it couldn't import the audio worklet types (maybe because of module compatibility?). Each framework was unique in its own way (shout-out to SvelteKit) but I couldn't figure out how to make them work. - -## Learning and reflecting - -I ended up using Vite and vite-plugin-react-pages to handle both "build the app" and "bundle worklets," but the specific tool choice isn't important. Instead, the focus should be on lessons learned. - -For myself: - -- I'm obsessed with tooling, to the point it can derail the original goal. While it comes from a good place (for example: "types are awesome"), it can get in the way of more important work -- I tend to reach for online resources right after seeing a new problem. While finding help online is often faster, spending time understanding the problem would have been more productive than cycling through (often outdated) blog posts - -For the tools: - -- Resource bundling is great and solves a genuine challenge. I've heard too many horror stories of developers writing modules by hand to believe this is unnecessary complexity -- Webpack is a build system and modern frameworks are deeply dependent on it (hence the "webpack industrial complex"). While this often saves users from unnecessary complexity, there's no path forward if something breaks -- There's little ability to mix and match tools across frameworks. Next.js and Gatsby let users extend webpack, but because each framework adds its own modules, changes aren't portable. After spending a week looking at webpack, I had an example running with parcel in thirty minutes, but couldn't integrate it - -In the end, learning new systems is fun, but a focus on tools that "just work" can leave users out in the cold if they break down. \ No newline at end of file diff --git a/archive/index.html b/archive/index.html new file mode 100644 index 0000000..fc96043 --- /dev/null +++ b/archive/index.html @@ -0,0 +1 @@ +Archive | The Old Speice Guy \ No newline at end of file diff --git a/assets/css/fonts.css b/assets/css/fonts.css deleted file mode 100644 index e07ecfc..0000000 --- a/assets/css/fonts.css +++ /dev/null @@ -1,15 +0,0 @@ -@font-face { - font-family: 'JetBrains Mono'; - src: url('/assets/font/JetBrainsMono-Regular.woff2') format('woff2'), - url('/assets/font/JetBrainsMono-Regular.woff') format('woff'); - font-weight: normal; - font-style: normal; -} - -@font-face { - font-family: 'Lato'; - src: url('/assets/font/lato-regular-webfont.woff2') format('woff2'), - url('/assets/font/lato-regular-webfont.woff') format('woff'); - font-weight: normal; - font-style: normal; -} \ No newline at end of file diff --git a/assets/css/style.scss b/assets/css/style.scss deleted file mode 100644 index ea280e2..0000000 --- a/assets/css/style.scss +++ /dev/null @@ -1,119 +0,0 @@ ---- ---- - -// Import the theme rules -@import "theme"; - -body { - max-width: 100%; - overflow-x: hidden; - font-family: 'Lato', sans-serif; -} - -.navbar { - color: $gray; -} - -.separator { - margin-right: .45rem; - margin-left: .25rem; - color: #000; - &:after { - content: '\00a0/'; - } -} - -header { - padding-top: 80px; - padding-bottom: 0; -}; - -header h1,h2 { - color: #000; -} - -.post-description { - color: #555; -} - -.post-container a { - color: #555; - border-bottom-color: $gray; - border-bottom-style: dotted; - border-bottom-width: 1px; - - position: relative; - display: inline-block; - padding: 1px 1px; - transition: color ease 0.3s; - - &::after { - content: ''; - position: absolute; - z-index: -1; - width: 100%; - height: 0%; - left: 0; - bottom: 0; - background-color: $gray; - transition: all ease 0.3s; - } - - &:hover { - color: #fff; - border-bottom-style: solid; - &::after { - height: 100%; - } - } -} - -body pre { - font-size: 15px; -} - -pre.highlight, code { - font-family: 'JetBrains Mono', monospace; -} - -div.highlighter-rouge { - // Default theme uses `width: 100vw`, which while cool, does cause the page - // to exceed screen width and trigger horizontal scrolling. No bueno. - width: 99vw; -} - -.post-date { - // On the front page, make sure titles don't force wrapping the date box content - text-align: right; - white-space: nowrap; -} - -blockquote { - color: #555; - right: 100px; - margin-left: 0; - padding-left: 1.8rem; - border-left: 5px solid $gray; -} - -.post-nav { - /* Insert your custom styling here. Example: - - font-size: 14px; - */ - display: flex; - margin-top: 1em; - margin-bottom: 1em; -} -.post-nav div { - /* flex-grow, flex-shrink, flex-basis */ - flex: 1 1 0; -} -.post-nav-next { - text-align: right; -} - -th, td { - border-bottom: 1px solid $gray; - padding: 0.75em; -} diff --git a/assets/css/styles.ae6ff4a3.css b/assets/css/styles.ae6ff4a3.css new file mode 100644 index 0000000..9ed0982 --- /dev/null +++ b/assets/css/styles.ae6ff4a3.css @@ -0,0 +1 @@ +:root{--ifm-color-scheme:light;--ifm-dark-value:10%;--ifm-darker-value:15%;--ifm-darkest-value:30%;--ifm-light-value:15%;--ifm-lighter-value:30%;--ifm-lightest-value:50%;--ifm-contrast-background-value:90%;--ifm-contrast-foreground-value:70%;--ifm-contrast-background-dark-value:70%;--ifm-contrast-foreground-dark-value:90%;--ifm-color-primary:#3578e5;--ifm-color-secondary:#ebedf0;--ifm-color-success:#00a400;--ifm-color-info:#54c7ec;--ifm-color-warning:#ffba00;--ifm-color-danger:#fa383e;--ifm-color-primary-dark:#306cce;--ifm-color-primary-darker:#2d66c3;--ifm-color-primary-darkest:#2554a0;--ifm-color-primary-light:#538ce9;--ifm-color-primary-lighter:#72a1ed;--ifm-color-primary-lightest:#9abcf2;--ifm-color-primary-contrast-background:#ebf2fc;--ifm-color-primary-contrast-foreground:#102445;--ifm-color-secondary-dark:#d4d5d8;--ifm-color-secondary-darker:#c8c9cc;--ifm-color-secondary-darkest:#a4a6a8;--ifm-color-secondary-light:#eef0f2;--ifm-color-secondary-lighter:#f1f2f5;--ifm-color-secondary-lightest:#f5f6f8;--ifm-color-secondary-contrast-background:#fdfdfe;--ifm-color-secondary-contrast-foreground:#474748;--ifm-color-success-dark:#009400;--ifm-color-success-darker:#008b00;--ifm-color-success-darkest:#007300;--ifm-color-success-light:#26b226;--ifm-color-success-lighter:#4dbf4d;--ifm-color-success-lightest:#80d280;--ifm-color-success-contrast-background:#e6f6e6;--ifm-color-success-contrast-foreground:#003100;--ifm-color-info-dark:#4cb3d4;--ifm-color-info-darker:#47a9c9;--ifm-color-info-darkest:#3b8ba5;--ifm-color-info-light:#6ecfef;--ifm-color-info-lighter:#87d8f2;--ifm-color-info-lightest:#aae3f6;--ifm-color-info-contrast-background:#eef9fd;--ifm-color-info-contrast-foreground:#193c47;--ifm-color-warning-dark:#e6a700;--ifm-color-warning-darker:#d99e00;--ifm-color-warning-darkest:#b38200;--ifm-color-warning-light:#ffc426;--ifm-color-warning-lighter:#ffcf4d;--ifm-color-warning-lightest:#ffdd80;--ifm-color-warning-contrast-background:#fff8e6;--ifm-color-warning-contrast-foreground:#4d3800;--ifm-color-danger-dark:#e13238;--ifm-color-danger-darker:#d53035;--ifm-color-danger-darkest:#af272b;--ifm-color-danger-light:#fb565b;--ifm-color-danger-lighter:#fb7478;--ifm-color-danger-lightest:#fd9c9f;--ifm-color-danger-contrast-background:#ffebec;--ifm-color-danger-contrast-foreground:#4b1113;--ifm-color-white:#fff;--ifm-color-black:#000;--ifm-color-gray-0:var(--ifm-color-white);--ifm-color-gray-100:#f5f6f7;--ifm-color-gray-200:#ebedf0;--ifm-color-gray-300:#dadde1;--ifm-color-gray-400:#ccd0d5;--ifm-color-gray-500:#bec3c9;--ifm-color-gray-600:#8d949e;--ifm-color-gray-700:#606770;--ifm-color-gray-800:#444950;--ifm-color-gray-900:#1c1e21;--ifm-color-gray-1000:var(--ifm-color-black);--ifm-color-emphasis-0:var(--ifm-color-gray-0);--ifm-color-emphasis-100:var(--ifm-color-gray-100);--ifm-color-emphasis-200:var(--ifm-color-gray-200);--ifm-color-emphasis-300:var(--ifm-color-gray-300);--ifm-color-emphasis-400:var(--ifm-color-gray-400);--ifm-color-emphasis-500:var(--ifm-color-gray-500);--ifm-color-emphasis-600:var(--ifm-color-gray-600);--ifm-color-emphasis-700:var(--ifm-color-gray-700);--ifm-color-emphasis-800:var(--ifm-color-gray-800);--ifm-color-emphasis-900:var(--ifm-color-gray-900);--ifm-color-emphasis-1000:var(--ifm-color-gray-1000);--ifm-color-content:var(--ifm-color-emphasis-900);--ifm-color-content-inverse:var(--ifm-color-emphasis-0);--ifm-color-content-secondary:#525860;--ifm-background-color:transparent;--ifm-background-surface-color:var(--ifm-color-content-inverse);--ifm-global-border-width:1px;--ifm-global-radius:.4rem;--ifm-hover-overlay:rgba(0,0,0,.05);--ifm-font-color-base:var(--ifm-color-content);--ifm-font-color-base-inverse:var(--ifm-color-content-inverse);--ifm-font-color-secondary:var(--ifm-color-content-secondary);--ifm-font-family-base:system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,Cantarell,Noto Sans,sans-serif,BlinkMacSystemFont,"Segoe UI",Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";--ifm-font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--ifm-font-size-base:100%;--ifm-font-weight-light:300;--ifm-font-weight-normal:400;--ifm-font-weight-semibold:500;--ifm-font-weight-bold:700;--ifm-font-weight-base:var(--ifm-font-weight-normal);--ifm-line-height-base:1.65;--ifm-global-spacing:1rem;--ifm-spacing-vertical:var(--ifm-global-spacing);--ifm-spacing-horizontal:var(--ifm-global-spacing);--ifm-transition-fast:.2s;--ifm-transition-slow:.4s;--ifm-transition-timing-default:cubic-bezier(.08,.52,.52,1);--ifm-global-shadow-lw:0 1px 2px 0 rgba(0,0,0,.1);--ifm-global-shadow-md:0 5px 40px rgba(0,0,0,.2);--ifm-global-shadow-tl:0 12px 28px 0 rgba(0,0,0,.2),0 2px 4px 0 rgba(0,0,0,.1);--ifm-z-index-dropdown:100;--ifm-z-index-fixed:200;--ifm-z-index-overlay:400;--ifm-container-width:1140px;--ifm-container-width-xl:1320px;--ifm-code-background:#f6f7f8;--ifm-code-border-radius:var(--ifm-global-radius);--ifm-code-font-size:90%;--ifm-code-padding-horizontal:.1rem;--ifm-code-padding-vertical:.1rem;--ifm-pre-background:var(--ifm-code-background);--ifm-pre-border-radius:var(--ifm-code-border-radius);--ifm-pre-color:inherit;--ifm-pre-line-height:1.45;--ifm-pre-padding:1rem;--ifm-heading-color:inherit;--ifm-heading-margin-top:0;--ifm-heading-margin-bottom:var(--ifm-spacing-vertical);--ifm-heading-font-family:var(--ifm-font-family-base);--ifm-heading-font-weight:var(--ifm-font-weight-bold);--ifm-heading-line-height:1.25;--ifm-h1-font-size:2rem;--ifm-h2-font-size:1.5rem;--ifm-h3-font-size:1.25rem;--ifm-h4-font-size:1rem;--ifm-h5-font-size:.875rem;--ifm-h6-font-size:.85rem;--ifm-image-alignment-padding:1.25rem;--ifm-leading-desktop:1.25;--ifm-leading:calc(var(--ifm-leading-desktop)*1rem);--ifm-list-left-padding:2rem;--ifm-list-margin:1rem;--ifm-list-item-margin:.25rem;--ifm-list-paragraph-margin:1rem;--ifm-table-cell-padding:.75rem;--ifm-table-background:transparent;--ifm-table-stripe-background:rgba(0,0,0,.03);--ifm-table-border-width:1px;--ifm-table-border-color:var(--ifm-color-emphasis-300);--ifm-table-head-background:inherit;--ifm-table-head-color:inherit;--ifm-table-head-font-weight:var(--ifm-font-weight-bold);--ifm-table-cell-color:inherit;--ifm-link-color:var(--ifm-color-primary);--ifm-link-decoration:none;--ifm-link-hover-color:var(--ifm-link-color);--ifm-link-hover-decoration:underline;--ifm-paragraph-margin-bottom:var(--ifm-leading);--ifm-blockquote-font-size:var(--ifm-font-size-base);--ifm-blockquote-border-left-width:2px;--ifm-blockquote-padding-horizontal:var(--ifm-spacing-horizontal);--ifm-blockquote-padding-vertical:0;--ifm-blockquote-shadow:none;--ifm-blockquote-color:var(--ifm-color-emphasis-800);--ifm-blockquote-border-color:var(--ifm-color-emphasis-300);--ifm-hr-background-color:var(--ifm-color-emphasis-500);--ifm-hr-height:1px;--ifm-hr-margin-vertical:1.5rem;--ifm-scrollbar-size:7px;--ifm-scrollbar-track-background-color:#f1f1f1;--ifm-scrollbar-thumb-background-color:silver;--ifm-scrollbar-thumb-hover-background-color:#a7a7a7;--ifm-alert-background-color:inherit;--ifm-alert-border-color:inherit;--ifm-alert-border-radius:var(--ifm-global-radius);--ifm-alert-border-width:0px;--ifm-alert-border-left-width:5px;--ifm-alert-color:var(--ifm-font-color-base);--ifm-alert-padding-horizontal:var(--ifm-spacing-horizontal);--ifm-alert-padding-vertical:var(--ifm-spacing-vertical);--ifm-alert-shadow:var(--ifm-global-shadow-lw);--ifm-avatar-intro-margin:1rem;--ifm-avatar-intro-alignment:inherit;--ifm-avatar-photo-size:3rem;--ifm-badge-background-color:inherit;--ifm-badge-border-color:inherit;--ifm-badge-border-radius:var(--ifm-global-radius);--ifm-badge-border-width:var(--ifm-global-border-width);--ifm-badge-color:var(--ifm-color-white);--ifm-badge-padding-horizontal:calc(var(--ifm-spacing-horizontal)*.5);--ifm-badge-padding-vertical:calc(var(--ifm-spacing-vertical)*.25);--ifm-breadcrumb-border-radius:1.5rem;--ifm-breadcrumb-spacing:.5rem;--ifm-breadcrumb-color-active:var(--ifm-color-primary);--ifm-breadcrumb-item-background-active:var(--ifm-hover-overlay);--ifm-breadcrumb-padding-horizontal:.8rem;--ifm-breadcrumb-padding-vertical:.4rem;--ifm-breadcrumb-size-multiplier:1;--ifm-breadcrumb-separator:url("data:image/svg+xml;utf8,");--ifm-breadcrumb-separator-filter:none;--ifm-breadcrumb-separator-size:.5rem;--ifm-breadcrumb-separator-size-multiplier:1.25;--ifm-button-background-color:inherit;--ifm-button-border-color:var(--ifm-button-background-color);--ifm-button-border-width:var(--ifm-global-border-width);--ifm-button-color:var(--ifm-font-color-base-inverse);--ifm-button-font-weight:var(--ifm-font-weight-bold);--ifm-button-padding-horizontal:1.5rem;--ifm-button-padding-vertical:.375rem;--ifm-button-size-multiplier:1;--ifm-button-transition-duration:var(--ifm-transition-fast);--ifm-button-border-radius:calc(var(--ifm-global-radius)*var(--ifm-button-size-multiplier));--ifm-button-group-spacing:2px;--ifm-card-background-color:var(--ifm-background-surface-color);--ifm-card-border-radius:calc(var(--ifm-global-radius)*2);--ifm-card-horizontal-spacing:var(--ifm-global-spacing);--ifm-card-vertical-spacing:var(--ifm-global-spacing);--ifm-toc-border-color:var(--ifm-color-emphasis-300);--ifm-toc-link-color:var(--ifm-color-content-secondary);--ifm-toc-padding-vertical:.5rem;--ifm-toc-padding-horizontal:.5rem;--ifm-dropdown-background-color:var(--ifm-background-surface-color);--ifm-dropdown-font-weight:var(--ifm-font-weight-semibold);--ifm-dropdown-link-color:var(--ifm-font-color-base);--ifm-dropdown-hover-background-color:var(--ifm-hover-overlay);--ifm-footer-background-color:var(--ifm-color-emphasis-100);--ifm-footer-color:inherit;--ifm-footer-link-color:var(--ifm-color-emphasis-700);--ifm-footer-link-hover-color:var(--ifm-color-primary);--ifm-footer-link-horizontal-spacing:.5rem;--ifm-footer-padding-horizontal:calc(var(--ifm-spacing-horizontal)*2);--ifm-footer-padding-vertical:calc(var(--ifm-spacing-vertical)*2);--ifm-footer-title-color:inherit;--ifm-footer-logo-max-width:min(30rem,90vw);--ifm-hero-background-color:var(--ifm-background-surface-color);--ifm-hero-text-color:var(--ifm-color-emphasis-800);--ifm-menu-color:var(--ifm-color-emphasis-700);--ifm-menu-color-active:var(--ifm-color-primary);--ifm-menu-color-background-active:var(--ifm-hover-overlay);--ifm-menu-color-background-hover:var(--ifm-hover-overlay);--ifm-menu-link-padding-horizontal:.75rem;--ifm-menu-link-padding-vertical:.375rem;--ifm-menu-link-sublist-icon:url("data:image/svg+xml;utf8,");--ifm-menu-link-sublist-icon-filter:none;--ifm-navbar-background-color:var(--ifm-background-surface-color);--ifm-navbar-height:3.75rem;--ifm-navbar-item-padding-horizontal:.75rem;--ifm-navbar-item-padding-vertical:.25rem;--ifm-navbar-link-color:var(--ifm-font-color-base);--ifm-navbar-link-hover-color:var(--ifm-color-primary);--ifm-navbar-link-active-color:var(--ifm-link-color);--ifm-navbar-padding-horizontal:var(--ifm-spacing-horizontal);--ifm-navbar-padding-vertical:calc(var(--ifm-spacing-vertical)*.5);--ifm-navbar-shadow:var(--ifm-global-shadow-lw);--ifm-navbar-search-input-background-color:var(--ifm-color-emphasis-200);--ifm-navbar-search-input-color:var(--ifm-color-emphasis-800);--ifm-navbar-search-input-placeholder-color:var(--ifm-color-emphasis-500);--ifm-navbar-search-input-icon:url("data:image/svg+xml;utf8,");--ifm-navbar-sidebar-width:83vw;--ifm-pagination-border-radius:var(--ifm-global-radius);--ifm-pagination-color-active:var(--ifm-color-primary);--ifm-pagination-font-size:1rem;--ifm-pagination-item-active-background:var(--ifm-hover-overlay);--ifm-pagination-page-spacing:.2em;--ifm-pagination-padding-horizontal:calc(var(--ifm-spacing-horizontal)*1);--ifm-pagination-padding-vertical:calc(var(--ifm-spacing-vertical)*.25);--ifm-pagination-nav-border-radius:var(--ifm-global-radius);--ifm-pagination-nav-color-hover:var(--ifm-color-primary);--ifm-pills-color-active:var(--ifm-color-primary);--ifm-pills-color-background-active:var(--ifm-hover-overlay);--ifm-pills-spacing:.125rem;--ifm-tabs-color:var(--ifm-font-color-secondary);--ifm-tabs-color-active:var(--ifm-color-primary);--ifm-tabs-color-active-border:var(--ifm-tabs-color-active);--ifm-tabs-padding-horizontal:1rem;--ifm-tabs-padding-vertical:1rem}*{box-sizing:border-box}html{background-color:var(--ifm-background-color);color:var(--ifm-font-color-base);color-scheme:var(--ifm-color-scheme);font:var(--ifm-font-size-base)/var(--ifm-line-height-base)var(--ifm-font-family-base);-webkit-font-smoothing:antialiased;-webkit-tap-highlight-color:transparent;text-rendering:optimizelegibility;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;text-size-adjust:100%}body{word-wrap:break-word;margin:0}iframe{color-scheme:normal;border:0}.container{max-width:var(--ifm-container-width);padding:0 var(--ifm-spacing-horizontal);width:100%;margin:0 auto}.container--fluid{max-width:inherit}.row{margin:0 calc(var(--ifm-spacing-horizontal)*-1);flex-wrap:wrap;display:flex}.row--no-gutters{margin-left:0;margin-right:0}.row--no-gutters>.col{padding-left:0;padding-right:0}.row--align-top{align-items:flex-start}.row--align-bottom{align-items:flex-end}.row--align-center{align-items:center}.row--align-stretch{align-items:stretch}.row--align-baseline{align-items:baseline}.col{--ifm-col-width:100%;max-width:var(--ifm-col-width);padding:0 var(--ifm-spacing-horizontal);flex:1 0;width:100%;margin-left:0}.col[class*=col--]{flex:0 0 var(--ifm-col-width)}.col--1{--ifm-col-width:calc(1/12*100%)}.col--offset-1{margin-left:8.33333%}.col--2{--ifm-col-width:calc(2/12*100%)}.col--offset-2{margin-left:16.6667%}.col--3{--ifm-col-width:calc(3/12*100%)}.col--offset-3{margin-left:25%}.col--4{--ifm-col-width:calc(4/12*100%)}.col--offset-4{margin-left:33.3333%}.col--5{--ifm-col-width:calc(5/12*100%)}.col--offset-5{margin-left:41.6667%}.col--6{--ifm-col-width:calc(6/12*100%)}.col--offset-6{margin-left:50%}.col--7{--ifm-col-width:calc(7/12*100%)}.col--offset-7{margin-left:58.3333%}.col--8{--ifm-col-width:calc(8/12*100%)}.col--offset-8{margin-left:66.6667%}.col--9{--ifm-col-width:calc(9/12*100%)}.col--offset-9{margin-left:75%}.col--10{--ifm-col-width:calc(10/12*100%)}.col--offset-10{margin-left:83.3333%}.col--11{--ifm-col-width:calc(11/12*100%)}.col--offset-11{margin-left:91.6667%}.col--12{--ifm-col-width:calc(12/12*100%)}.col--offset-12{margin-left:100%}.margin--none{margin:0!important}.margin-top--none{margin-top:0!important}.margin-left--none{margin-left:0!important}.margin-bottom--none{margin-bottom:0!important}.margin-right--none{margin-right:0!important}.margin-vert--none{margin-top:0!important;margin-bottom:0!important}.margin-horiz--none{margin-left:0!important;margin-right:0!important}.margin--xs{margin:.25rem!important}.margin-top--xs{margin-top:.25rem!important}.margin-left--xs{margin-left:.25rem!important}.margin-bottom--xs{margin-bottom:.25rem!important}.margin-right--xs{margin-right:.25rem!important}.margin-vert--xs{margin-top:.25rem!important;margin-bottom:.25rem!important}.margin-horiz--xs{margin-left:.25rem!important;margin-right:.25rem!important}.margin--sm{margin:.5rem!important}.margin-top--sm{margin-top:.5rem!important}.margin-left--sm{margin-left:.5rem!important}.margin-bottom--sm{margin-bottom:.5rem!important}.margin-right--sm{margin-right:.5rem!important}.margin-vert--sm{margin-top:.5rem!important;margin-bottom:.5rem!important}.margin-horiz--sm{margin-left:.5rem!important;margin-right:.5rem!important}.margin--md{margin:1rem!important}.margin-top--md{margin-top:1rem!important}.margin-left--md{margin-left:1rem!important}.margin-bottom--md{margin-bottom:1rem!important}.margin-right--md{margin-right:1rem!important}.margin-vert--md{margin-top:1rem!important;margin-bottom:1rem!important}.margin-horiz--md{margin-left:1rem!important;margin-right:1rem!important}.margin--lg{margin:2rem!important}.margin-top--lg{margin-top:2rem!important}.margin-left--lg{margin-left:2rem!important}.margin-bottom--lg{margin-bottom:2rem!important}.margin-right--lg{margin-right:2rem!important}.margin-vert--lg{margin-top:2rem!important;margin-bottom:2rem!important}.margin-horiz--lg{margin-left:2rem!important;margin-right:2rem!important}.margin--xl{margin:5rem!important}.margin-top--xl{margin-top:5rem!important}.margin-left--xl{margin-left:5rem!important}.margin-bottom--xl{margin-bottom:5rem!important}.margin-right--xl{margin-right:5rem!important}.margin-vert--xl{margin-top:5rem!important;margin-bottom:5rem!important}.margin-horiz--xl{margin-left:5rem!important;margin-right:5rem!important}.padding--none{padding:0!important}.padding-top--none{padding-top:0!important}.padding-left--none{padding-left:0!important}.padding-bottom--none{padding-bottom:0!important}.padding-right--none{padding-right:0!important}.padding-vert--none{padding-top:0!important;padding-bottom:0!important}.padding-horiz--none{padding-left:0!important;padding-right:0!important}.padding--xs{padding:.25rem!important}.padding-top--xs{padding-top:.25rem!important}.padding-left--xs{padding-left:.25rem!important}.padding-bottom--xs{padding-bottom:.25rem!important}.padding-right--xs{padding-right:.25rem!important}.padding-vert--xs{padding-top:.25rem!important;padding-bottom:.25rem!important}.padding-horiz--xs{padding-left:.25rem!important;padding-right:.25rem!important}.padding--sm{padding:.5rem!important}.padding-top--sm{padding-top:.5rem!important}.padding-left--sm{padding-left:.5rem!important}.padding-bottom--sm{padding-bottom:.5rem!important}.padding-right--sm{padding-right:.5rem!important}.padding-vert--sm{padding-top:.5rem!important;padding-bottom:.5rem!important}.padding-horiz--sm{padding-left:.5rem!important;padding-right:.5rem!important}.padding--md{padding:1rem!important}.padding-top--md{padding-top:1rem!important}.padding-left--md{padding-left:1rem!important}.padding-bottom--md{padding-bottom:1rem!important}.padding-right--md{padding-right:1rem!important}.padding-vert--md{padding-top:1rem!important;padding-bottom:1rem!important}.padding-horiz--md{padding-left:1rem!important;padding-right:1rem!important}.padding--lg{padding:2rem!important}.padding-top--lg{padding-top:2rem!important}.padding-left--lg{padding-left:2rem!important}.padding-bottom--lg{padding-bottom:2rem!important}.padding-right--lg{padding-right:2rem!important}.padding-vert--lg{padding-top:2rem!important;padding-bottom:2rem!important}.padding-horiz--lg{padding-left:2rem!important;padding-right:2rem!important}.padding--xl{padding:5rem!important}.padding-top--xl{padding-top:5rem!important}.padding-left--xl{padding-left:5rem!important}.padding-bottom--xl{padding-bottom:5rem!important}.padding-right--xl{padding-right:5rem!important}.padding-vert--xl{padding-top:5rem!important;padding-bottom:5rem!important}.padding-horiz--xl{padding-left:5rem!important;padding-right:5rem!important}code{background-color:var(--ifm-code-background);border-radius:var(--ifm-code-border-radius);font-family:var(--ifm-font-family-monospace);font-size:var(--ifm-code-font-size);padding:var(--ifm-code-padding-vertical)var(--ifm-code-padding-horizontal);vertical-align:middle;border:.1rem solid rgba(0,0,0,.1)}a code{color:inherit}pre{background-color:var(--ifm-pre-background);border-radius:var(--ifm-pre-border-radius);color:var(--ifm-pre-color);font:var(--ifm-code-font-size)/var(--ifm-pre-line-height)var(--ifm-font-family-monospace);margin:0 0 var(--ifm-spacing-vertical);padding:var(--ifm-pre-padding);overflow:auto}pre code{font-size:100%;line-height:inherit;background-color:transparent;border:none;padding:0}kbd{background-color:var(--ifm-color-emphasis-0);border:1px solid var(--ifm-color-emphasis-400);box-shadow:inset 0 -1px 0 var(--ifm-color-emphasis-400);color:var(--ifm-color-emphasis-800);font:80% var(--ifm-font-family-monospace);border-radius:.2rem;padding:.15rem .3rem}h1,h2,h3,h4,h5,h6{color:var(--ifm-heading-color);font-family:var(--ifm-heading-font-family);font-weight:var(--ifm-heading-font-weight);line-height:var(--ifm-heading-line-height);margin:var(--ifm-heading-margin-top)0 var(--ifm-heading-margin-bottom)0}h1{font-size:var(--ifm-h1-font-size)}h2{font-size:var(--ifm-h2-font-size)}h3{font-size:var(--ifm-h3-font-size)}h4{font-size:var(--ifm-h4-font-size)}h5{font-size:var(--ifm-h5-font-size)}h6{font-size:var(--ifm-h6-font-size)}img{max-width:100%}img[align=right]{padding-left:var(--image-alignment-padding)}img[align=left]{padding-right:var(--image-alignment-padding)}.markdown{--ifm-h1-vertical-rhythm-top:3;--ifm-h2-vertical-rhythm-top:2;--ifm-h3-vertical-rhythm-top:1.5;--ifm-heading-vertical-rhythm-top:1.25;--ifm-h1-vertical-rhythm-bottom:1.25;--ifm-heading-vertical-rhythm-bottom:1}.markdown:before{content:"";display:table}.markdown:after{clear:both;content:"";display:table}.markdown>:last-child{margin-bottom:0!important}.markdown h1:first-child{--ifm-h1-font-size:3rem;margin-bottom:calc(var(--ifm-h1-vertical-rhythm-bottom)*var(--ifm-leading))}.markdown>h2{--ifm-h2-font-size:2rem;margin-bottom:calc(var(--ifm-heading-vertical-rhythm-bottom)*var(--ifm-leading));margin-top:calc(var(--ifm-h2-vertical-rhythm-top)*var(--ifm-leading))}.markdown>h3{--ifm-h3-font-size:1.5rem;margin-bottom:calc(var(--ifm-heading-vertical-rhythm-bottom)*var(--ifm-leading));margin-top:calc(var(--ifm-h3-vertical-rhythm-top)*var(--ifm-leading))}.markdown>h4,.markdown>h5,.markdown>h6{margin-bottom:calc(var(--ifm-heading-vertical-rhythm-bottom)*var(--ifm-leading));margin-top:calc(var(--ifm-heading-vertical-rhythm-top)*var(--ifm-leading))}.markdown>pre,.markdown>ul,.markdown>p{margin-bottom:var(--ifm-leading)}.markdown li{word-wrap:break-word}.markdown li>p{margin-top:var(--ifm-list-paragraph-margin)}.markdown li+li{margin-top:var(--ifm-list-item-margin)}ul,ol{margin:0 0 var(--ifm-list-margin);padding-left:var(--ifm-list-left-padding)}ol ol,ul ol{list-style-type:lower-roman}ul ul,ul ol,ol ol,ol ul{margin:0}ul ul ol,ul ol ol,ol ul ol,ol ol ol{list-style-type:lower-alpha}table{border-collapse:collapse;margin-bottom:var(--ifm-spacing-vertical);display:block;overflow:auto}table thead tr{border-bottom:2px solid var(--ifm-table-border-color)}table thead{background-color:var(--ifm-table-stripe-background)}table tr{background-color:var(--ifm-table-background);border-top:var(--ifm-table-border-width)solid var(--ifm-table-border-color)}table tr:nth-child(2n){background-color:var(--ifm-table-stripe-background)}table th,table td{border:var(--ifm-table-border-width)solid var(--ifm-table-border-color);padding:var(--ifm-table-cell-padding)}table th{background-color:var(--ifm-table-head-background);color:var(--ifm-table-head-color);font-weight:var(--ifm-table-head-font-weight)}table td{color:var(--ifm-table-cell-color)}strong{font-weight:var(--ifm-font-weight-bold)}a{color:var(--ifm-link-color);-webkit-text-decoration:var(--ifm-link-decoration);text-decoration:var(--ifm-link-decoration);transition:color var(--ifm-transition-fast)var(--ifm-transition-timing-default)}a:hover{color:var(--ifm-link-hover-color);-webkit-text-decoration:var(--ifm-link-hover-decoration);text-decoration:var(--ifm-link-hover-decoration)}a:not([href]){text-decoration:none}p{margin:0 0 var(--ifm-paragraph-margin-bottom)}blockquote{border-left:var(--ifm-blockquote-border-left-width)solid var(--ifm-blockquote-border-color);box-shadow:var(--ifm-blockquote-shadow);color:var(--ifm-blockquote-color);font-size:var(--ifm-blockquote-font-size);margin:0 0 var(--ifm-spacing-vertical);padding:var(--ifm-blockquote-padding-vertical)var(--ifm-blockquote-padding-horizontal)}blockquote>:first-child{margin-top:0}blockquote>:last-child{margin-bottom:0}hr{background-color:var(--ifm-hr-background-color);height:var(--ifm-hr-height);margin:var(--ifm-hr-margin-vertical)0;border:0}.shadow--lw{box-shadow:var(--ifm-global-shadow-lw)!important}.shadow--md{box-shadow:var(--ifm-global-shadow-md)!important}.shadow--tl{box-shadow:var(--ifm-global-shadow-tl)!important}.text--primary{color:var(--ifm-color-primary)}.text--secondary{color:var(--ifm-color-secondary)}.text--success{color:var(--ifm-color-success)}.text--info{color:var(--ifm-color-info)}.text--warning{color:var(--ifm-color-warning)}.text--danger{color:var(--ifm-color-danger)}.text--center{text-align:center}.text--left{text-align:left}.text--justify{text-align:justify}.text--right{text-align:right}.text--capitalize{text-transform:capitalize}.text--lowercase{text-transform:lowercase}.text--uppercase{text-transform:uppercase}.text--light{font-weight:var(--ifm-font-weight-light)}.text--normal{font-weight:var(--ifm-font-weight-normal)}.text--semibold{font-weight:var(--ifm-font-weight-semibold)}.text--bold{font-weight:var(--ifm-font-weight-bold)}.text--italic{font-style:italic}.text--truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.text--break{word-wrap:break-word!important;word-break:break-word!important}.text--no-decoration,.text--no-decoration:hover{text-decoration:none}.clean-btn{color:inherit;cursor:pointer;background:0 0;border:none;padding:0;font-family:inherit}.clean-list{padding-left:0;list-style:none}.alert--primary{--ifm-alert-background-color:var(--ifm-color-primary-contrast-background);--ifm-alert-background-color-highlight:rgba(53,120,229,.15);--ifm-alert-foreground-color:var(--ifm-color-primary-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-primary-dark)}.alert--secondary{--ifm-alert-background-color:var(--ifm-color-secondary-contrast-background);--ifm-alert-background-color-highlight:rgba(235,237,240,.15);--ifm-alert-foreground-color:var(--ifm-color-secondary-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-secondary-dark)}.alert--success{--ifm-alert-background-color:var(--ifm-color-success-contrast-background);--ifm-alert-background-color-highlight:rgba(0,164,0,.15);--ifm-alert-foreground-color:var(--ifm-color-success-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-success-dark)}.alert--info{--ifm-alert-background-color:var(--ifm-color-info-contrast-background);--ifm-alert-background-color-highlight:rgba(84,199,236,.15);--ifm-alert-foreground-color:var(--ifm-color-info-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-info-dark)}.alert--warning{--ifm-alert-background-color:var(--ifm-color-warning-contrast-background);--ifm-alert-background-color-highlight:rgba(255,186,0,.15);--ifm-alert-foreground-color:var(--ifm-color-warning-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-warning-dark)}.alert--danger{--ifm-alert-background-color:var(--ifm-color-danger-contrast-background);--ifm-alert-background-color-highlight:rgba(250,56,62,.15);--ifm-alert-foreground-color:var(--ifm-color-danger-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-danger-dark)}.alert{--ifm-code-background:var(--ifm-alert-background-color-highlight);--ifm-link-color:var(--ifm-alert-foreground-color);--ifm-link-hover-color:var(--ifm-alert-foreground-color);--ifm-link-decoration:underline;--ifm-tabs-color:var(--ifm-alert-foreground-color);--ifm-tabs-color-active:var(--ifm-alert-foreground-color);--ifm-tabs-color-active-border:var(--ifm-alert-border-color);background-color:var(--ifm-alert-background-color);border:var(--ifm-alert-border-width)solid var(--ifm-alert-border-color);border-left-width:var(--ifm-alert-border-left-width);border-radius:var(--ifm-alert-border-radius);box-shadow:var(--ifm-alert-shadow);color:var(--ifm-alert-foreground-color);padding:var(--ifm-alert-padding-vertical)var(--ifm-alert-padding-horizontal)}.alert__heading{font:bold var(--ifm-h5-font-size)/var(--ifm-heading-line-height)var(--ifm-heading-font-family);text-transform:uppercase;align-items:center;margin-bottom:.5rem;display:flex}.alert__icon{margin-right:.4em;display:inline-flex}.alert__icon svg{fill:var(--ifm-alert-foreground-color);stroke:var(--ifm-alert-foreground-color);stroke-width:0}.alert .close{color:var(--ifm-alert-foreground-color);margin:calc(var(--ifm-alert-padding-vertical)*-1)calc(var(--ifm-alert-padding-horizontal)*-1)0 0;opacity:.75}.alert .close:hover,.alert .close:focus{opacity:1}.alert a{-webkit-text-decoration-color:var(--ifm-alert-border-color);text-decoration-color:var(--ifm-alert-border-color)}.alert a:hover{text-decoration-thickness:2px}.avatar{column-gap:var(--ifm-avatar-intro-margin);display:flex}.avatar__photo{height:var(--ifm-avatar-photo-size);width:var(--ifm-avatar-photo-size);border-radius:50%;display:block;overflow:hidden}.avatar__photo--sm{--ifm-avatar-photo-size:2rem}.avatar__photo--lg{--ifm-avatar-photo-size:4rem}.avatar__photo--xl{--ifm-avatar-photo-size:6rem}.avatar__intro{text-align:var(--ifm-avatar-intro-alignment);flex-direction:column;flex:1;justify-content:center;display:flex}.avatar__name{font:bold var(--ifm-h4-font-size)/var(--ifm-heading-line-height)var(--ifm-font-family-base)}.avatar__subtitle{margin-top:.25rem}.avatar--vertical{--ifm-avatar-intro-alignment:center;--ifm-avatar-intro-margin:.5rem;flex-direction:column;align-items:center}.badge{background-color:var(--ifm-badge-background-color);border:var(--ifm-badge-border-width)solid var(--ifm-badge-border-color);border-radius:var(--ifm-badge-border-radius);color:var(--ifm-badge-color);font-size:75%;font-weight:var(--ifm-font-weight-bold);padding:var(--ifm-badge-padding-vertical)var(--ifm-badge-padding-horizontal);line-height:1;display:inline-block}.badge--primary{--ifm-badge-background-color:var(--ifm-color-primary);--ifm-badge-border-color:var(--ifm-badge-background-color)}.badge--secondary{--ifm-badge-background-color:var(--ifm-color-secondary);--ifm-badge-border-color:var(--ifm-badge-background-color);color:var(--ifm-color-black)}.badge--success{--ifm-badge-background-color:var(--ifm-color-success);--ifm-badge-border-color:var(--ifm-badge-background-color)}.badge--info{--ifm-badge-background-color:var(--ifm-color-info);--ifm-badge-border-color:var(--ifm-badge-background-color)}.badge--warning{--ifm-badge-background-color:var(--ifm-color-warning);--ifm-badge-border-color:var(--ifm-badge-background-color)}.badge--danger{--ifm-badge-background-color:var(--ifm-color-danger);--ifm-badge-border-color:var(--ifm-badge-background-color)}.breadcrumbs{margin-bottom:0;padding-left:0}.breadcrumbs__item{display:inline-block}.breadcrumbs__item:not(:last-child):after{background:var(--ifm-breadcrumb-separator)center;content:" ";filter:var(--ifm-breadcrumb-separator-filter);height:calc(var(--ifm-breadcrumb-separator-size)*var(--ifm-breadcrumb-size-multiplier)*var(--ifm-breadcrumb-separator-size-multiplier));margin:0 var(--ifm-breadcrumb-spacing);opacity:.5;width:calc(var(--ifm-breadcrumb-separator-size)*var(--ifm-breadcrumb-size-multiplier)*var(--ifm-breadcrumb-separator-size-multiplier));display:inline-block}.breadcrumbs__item--active .breadcrumbs__link{background:var(--ifm-breadcrumb-item-background-active);color:var(--ifm-breadcrumb-color-active)}.breadcrumbs__link{border-radius:var(--ifm-breadcrumb-border-radius);color:var(--ifm-font-color-base);font-size:calc(1rem*var(--ifm-breadcrumb-size-multiplier));padding:calc(var(--ifm-breadcrumb-padding-vertical)*var(--ifm-breadcrumb-size-multiplier))calc(var(--ifm-breadcrumb-padding-horizontal)*var(--ifm-breadcrumb-size-multiplier));transition-property:background,color;transition-duration:var(--ifm-transition-fast);transition-timing-function:var(--ifm-transition-timing-default);display:inline-block}.breadcrumbs__link:link:hover,.breadcrumbs__link:visited:hover,area[href].breadcrumbs__link:hover{background:var(--ifm-breadcrumb-item-background-active);text-decoration:none}.breadcrumbs__link:any-link:hover{background:var(--ifm-breadcrumb-item-background-active);text-decoration:none}.breadcrumbs--sm{--ifm-breadcrumb-size-multiplier:.8}.breadcrumbs--lg{--ifm-breadcrumb-size-multiplier:1.2}.button{background-color:var(--ifm-button-background-color);border:var(--ifm-button-border-width)solid var(--ifm-button-border-color);border-radius:var(--ifm-button-border-radius);color:var(--ifm-button-color);cursor:pointer;font-size:calc(.875rem*var(--ifm-button-size-multiplier));font-weight:var(--ifm-button-font-weight);padding:calc(var(--ifm-button-padding-vertical)*var(--ifm-button-size-multiplier))calc(var(--ifm-button-padding-horizontal)*var(--ifm-button-size-multiplier));text-align:center;-webkit-user-select:none;user-select:none;vertical-align:middle;white-space:nowrap;transition-property:color,background,border-color;transition-duration:var(--ifm-button-transition-duration);transition-timing-function:var(--ifm-transition-timing-default);line-height:1.5;display:inline-block}.button:hover{color:var(--ifm-button-color);text-decoration:none}.button--outline{--ifm-button-background-color:transparent;--ifm-button-color:var(--ifm-button-border-color)}.button--outline:hover{--ifm-button-background-color:var(--ifm-button-border-color)}.button--outline:hover,.button--outline:active,.button--outline.button--active{--ifm-button-color:var(--ifm-font-color-base-inverse)}.button--link{--ifm-button-background-color:transparent;--ifm-button-border-color:transparent;color:var(--ifm-link-color);-webkit-text-decoration:var(--ifm-link-decoration);text-decoration:var(--ifm-link-decoration)}.button--link:hover,.button--link:active,.button--link.button--active{color:var(--ifm-link-hover-color);-webkit-text-decoration:var(--ifm-link-hover-decoration);text-decoration:var(--ifm-link-hover-decoration)}.button.disabled,.button:disabled,.button[disabled]{opacity:.65;pointer-events:none}.button--sm{--ifm-button-size-multiplier:.8}.button--lg{--ifm-button-size-multiplier:1.35}.button--block{width:100%;display:block}.button.button--secondary{color:var(--ifm-color-gray-900)}.button.button--secondary.button--outline:not(.button--active):not(:hover){color:var(--ifm-font-color-base)}:where(.button--primary){--ifm-button-background-color:var(--ifm-color-primary);--ifm-button-border-color:var(--ifm-color-primary)}:where(.button--primary):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-primary-dark);--ifm-button-border-color:var(--ifm-color-primary-dark)}.button--primary:active,.button--primary.button--active{--ifm-button-background-color:var(--ifm-color-primary-darker);--ifm-button-border-color:var(--ifm-color-primary-darker)}:where(.button--secondary){--ifm-button-background-color:var(--ifm-color-secondary);--ifm-button-border-color:var(--ifm-color-secondary)}:where(.button--secondary):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-secondary-dark);--ifm-button-border-color:var(--ifm-color-secondary-dark)}.button--secondary:active,.button--secondary.button--active{--ifm-button-background-color:var(--ifm-color-secondary-darker);--ifm-button-border-color:var(--ifm-color-secondary-darker)}:where(.button--success){--ifm-button-background-color:var(--ifm-color-success);--ifm-button-border-color:var(--ifm-color-success)}:where(.button--success):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-success-dark);--ifm-button-border-color:var(--ifm-color-success-dark)}.button--success:active,.button--success.button--active{--ifm-button-background-color:var(--ifm-color-success-darker);--ifm-button-border-color:var(--ifm-color-success-darker)}:where(.button--info){--ifm-button-background-color:var(--ifm-color-info);--ifm-button-border-color:var(--ifm-color-info)}:where(.button--info):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-info-dark);--ifm-button-border-color:var(--ifm-color-info-dark)}.button--info:active,.button--info.button--active{--ifm-button-background-color:var(--ifm-color-info-darker);--ifm-button-border-color:var(--ifm-color-info-darker)}:where(.button--warning){--ifm-button-background-color:var(--ifm-color-warning);--ifm-button-border-color:var(--ifm-color-warning)}:where(.button--warning):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-warning-dark);--ifm-button-border-color:var(--ifm-color-warning-dark)}.button--warning:active,.button--warning.button--active{--ifm-button-background-color:var(--ifm-color-warning-darker);--ifm-button-border-color:var(--ifm-color-warning-darker)}:where(.button--danger){--ifm-button-background-color:var(--ifm-color-danger);--ifm-button-border-color:var(--ifm-color-danger)}:where(.button--danger):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-danger-dark);--ifm-button-border-color:var(--ifm-color-danger-dark)}.button--danger:active,.button--danger.button--active{--ifm-button-background-color:var(--ifm-color-danger-darker);--ifm-button-border-color:var(--ifm-color-danger-darker)}.button-group{gap:var(--ifm-button-group-spacing);display:inline-flex}.button-group>.button:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.button-group>.button:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.button-group--block{justify-content:stretch;display:flex}.button-group--block>.button{flex-grow:1}.card{background-color:var(--ifm-card-background-color);border-radius:var(--ifm-card-border-radius);box-shadow:var(--ifm-global-shadow-lw);flex-direction:column;display:flex;overflow:hidden}.card--full-height{height:100%}.card__image{padding-top:var(--ifm-card-vertical-spacing)}.card__image:first-child{padding-top:0}.card__header,.card__body,.card__footer{padding:var(--ifm-card-vertical-spacing)var(--ifm-card-horizontal-spacing)}.card__header:not(:last-child),.card__body:not(:last-child),.card__footer:not(:last-child){padding-bottom:0}.card__header>:last-child,.card__body>:last-child,.card__footer>:last-child{margin-bottom:0}.card__footer{margin-top:auto}.table-of-contents{padding:var(--ifm-toc-padding-vertical)0;margin-bottom:0;font-size:.8rem}.table-of-contents,.table-of-contents ul{padding-left:var(--ifm-toc-padding-horizontal);list-style:none}.table-of-contents li{margin:var(--ifm-toc-padding-vertical)var(--ifm-toc-padding-horizontal)}.table-of-contents__left-border{border-left:1px solid var(--ifm-toc-border-color)}.table-of-contents__link{color:var(--ifm-toc-link-color);display:block}.table-of-contents__link:hover,.table-of-contents__link:hover code,.table-of-contents__link--active,.table-of-contents__link--active code{color:var(--ifm-color-primary);text-decoration:none}.close{color:var(--ifm-color-black);float:right;font-size:1.5rem;font-weight:var(--ifm-font-weight-bold);opacity:.5;transition:opacity var(--ifm-transition-fast)var(--ifm-transition-timing-default);padding:1rem;line-height:1}.close:hover{opacity:.7}.close:focus{opacity:.8}.dropdown{font-weight:var(--ifm-dropdown-font-weight);vertical-align:top;display:inline-flex;position:relative}.dropdown--hoverable:hover .dropdown__menu,.dropdown--show .dropdown__menu{opacity:1;pointer-events:all;visibility:visible;transform:translateY(-1px)}.dropdown--right .dropdown__menu{left:inherit;right:0}.dropdown--nocaret .navbar__link:after{content:none!important}.dropdown__menu{background-color:var(--ifm-dropdown-background-color);border-radius:var(--ifm-global-radius);box-shadow:var(--ifm-global-shadow-md);opacity:0;pointer-events:none;left:0;top:calc(100% - var(--ifm-navbar-item-padding-vertical) + .3rem);visibility:hidden;z-index:var(--ifm-z-index-dropdown);transition-property:opacity,transform,visibility;transition-duration:var(--ifm-transition-fast);transition-timing-function:var(--ifm-transition-timing-default);min-width:10rem;max-height:80vh;padding:.5rem;list-style:none;position:absolute;overflow-y:auto;transform:translateY(-.625rem)}.dropdown__link{color:var(--ifm-dropdown-link-color);white-space:nowrap;border-radius:.25rem;margin-top:.2rem;padding:.25rem .5rem;font-size:.875rem;display:block}.dropdown__link:hover,.dropdown__link--active{background-color:var(--ifm-dropdown-hover-background-color);color:var(--ifm-dropdown-link-color);text-decoration:none}.dropdown__link--active,.dropdown__link--active:hover{--ifm-dropdown-link-color:var(--ifm-link-color)}.dropdown>.navbar__link:after{content:"";border:.4em solid transparent;border-top-color:currentColor;border-bottom:0 solid;margin-left:.3em;display:inline-block;position:relative;top:2px;transform:translateY(-50%)}.footer{background-color:var(--ifm-footer-background-color);color:var(--ifm-footer-color);padding:var(--ifm-footer-padding-vertical)var(--ifm-footer-padding-horizontal)}.footer--dark{--ifm-footer-background-color:#303846;--ifm-footer-color:var(--ifm-footer-link-color);--ifm-footer-link-color:var(--ifm-color-secondary);--ifm-footer-title-color:var(--ifm-color-white)}.footer__links{margin-bottom:1rem}.footer__link-item{color:var(--ifm-footer-link-color);line-height:2}.footer__link-item:hover{color:var(--ifm-footer-link-hover-color)}.footer__link-separator{margin:0 var(--ifm-footer-link-horizontal-spacing)}.footer__logo{max-width:var(--ifm-footer-logo-max-width);margin-top:1rem}.footer__title{color:var(--ifm-footer-title-color);font:bold var(--ifm-h4-font-size)/var(--ifm-heading-line-height)var(--ifm-font-family-base);margin-bottom:var(--ifm-heading-margin-bottom)}.footer__item{margin-top:0}.footer__items{margin-bottom:0}[type=checkbox]{padding:0}.hero{background-color:var(--ifm-hero-background-color);color:var(--ifm-hero-text-color);align-items:center;padding:4rem 2rem;display:flex}.hero--primary{--ifm-hero-background-color:var(--ifm-color-primary);--ifm-hero-text-color:var(--ifm-font-color-base-inverse)}.hero--dark{--ifm-hero-background-color:#303846;--ifm-hero-text-color:var(--ifm-color-white)}.hero__title{font-size:3rem}.hero__subtitle{font-size:1.5rem}.menu{font-weight:var(--ifm-font-weight-semibold);overflow-x:hidden}.menu__list{margin:0;padding-left:0;list-style:none}.menu__list .menu__list{padding-left:var(--ifm-menu-link-padding-horizontal);flex:0 0 100%;margin-top:.25rem}.menu__list-item:not(:first-child){margin-top:.25rem}.menu__list-item--collapsed .menu__list{height:0;overflow:hidden}.menu__list-item--collapsed .menu__link--sublist:after,.menu__list-item--collapsed .menu__caret:before{transform:rotate(90deg)}.menu__list-item-collapsible{transition:background var(--ifm-transition-fast)var(--ifm-transition-timing-default);border-radius:.25rem;flex-wrap:wrap;display:flex;position:relative}.menu__list-item-collapsible:hover,.menu__list-item-collapsible--active{background:var(--ifm-menu-color-background-hover)}.menu__list-item-collapsible .menu__link:hover,.menu__list-item-collapsible .menu__link--active{background:0 0!important}.menu__link,.menu__caret{transition:background var(--ifm-transition-fast)var(--ifm-transition-timing-default);border-radius:.25rem;align-items:center;display:flex}.menu__link:hover,.menu__caret:hover{background:var(--ifm-menu-color-background-hover)}.menu__link{color:var(--ifm-menu-color);padding:var(--ifm-menu-link-padding-vertical)var(--ifm-menu-link-padding-horizontal);flex:1;line-height:1.25}.menu__link:hover{color:var(--ifm-menu-color);transition:color var(--ifm-transition-fast)var(--ifm-transition-timing-default);text-decoration:none}.menu__link--sublist-caret:after{content:"";background:var(--ifm-menu-link-sublist-icon)50%/2rem 2rem;filter:var(--ifm-menu-link-sublist-icon-filter);transition:transform var(--ifm-transition-fast)linear;width:1.25rem;min-width:1.25rem;height:1.25rem;margin-left:auto;transform:rotate(180deg)}.menu__link--active,.menu__link--active:hover{color:var(--ifm-menu-color-active)}.menu__link--active:not(.menu__link--sublist){background-color:var(--ifm-menu-color-background-active)}.menu__caret{padding:var(--ifm-menu-link-padding-vertical)var(--ifm-menu-link-padding-horizontal)}.menu__caret:before{content:"";background:var(--ifm-menu-link-sublist-icon)50%/2rem 2rem;filter:var(--ifm-menu-link-sublist-icon-filter);transition:transform var(--ifm-transition-fast)linear;width:1.25rem;height:1.25rem;transform:rotate(180deg)}html[data-theme=dark],.navbar--dark{--ifm-menu-link-sublist-icon-filter:invert(100%)sepia(94%)saturate(17%)hue-rotate(223deg)brightness(104%)contrast(98%)}.navbar{background-color:var(--ifm-navbar-background-color);box-shadow:var(--ifm-navbar-shadow);height:var(--ifm-navbar-height);padding:var(--ifm-navbar-padding-vertical)var(--ifm-navbar-padding-horizontal);display:flex}.navbar>.container,.navbar>.container-fluid{display:flex}.navbar--fixed-top{z-index:var(--ifm-z-index-fixed);position:sticky;top:0}.navbar__inner{flex-wrap:wrap;justify-content:space-between;width:100%;display:flex}.navbar__brand{color:var(--ifm-navbar-link-color);align-items:center;min-width:0;margin-right:1rem;display:flex}.navbar__brand:hover{color:var(--ifm-navbar-link-hover-color);text-decoration:none}.navbar__title{flex:auto}.navbar__toggle{margin-right:.5rem;display:none}.navbar__logo{flex:none;height:2rem;margin-right:.5rem}.navbar__logo img{height:100%}.navbar__items{flex:1;align-items:center;min-width:0;display:flex}.navbar__items--center{flex:none}.navbar__items--center .navbar__brand{margin:0}.navbar__items--center+.navbar__items--right{flex:1}.navbar__items--right{flex:none;justify-content:flex-end}.navbar__items--right>:last-child{padding-right:0}.navbar__item{padding:var(--ifm-navbar-item-padding-vertical)var(--ifm-navbar-item-padding-horizontal);display:inline-block}.navbar__item.dropdown .navbar__link:not([href]){pointer-events:none}.navbar__link{color:var(--ifm-navbar-link-color);font-weight:var(--ifm-font-weight-semibold)}.navbar__link:hover,.navbar__link--active{color:var(--ifm-navbar-link-hover-color);text-decoration:none}.navbar--dark,.navbar--primary{--ifm-menu-color:var(--ifm-color-gray-300);--ifm-navbar-link-color:var(--ifm-color-gray-100);--ifm-navbar-search-input-background-color:rgba(255,255,255,.1);--ifm-navbar-search-input-placeholder-color:rgba(255,255,255,.5);color:var(--ifm-color-white)}.navbar--dark{--ifm-navbar-background-color:#242526;--ifm-navbar-link-hover-color:var(--ifm-color-primary);--ifm-menu-color-background-active:rgba(255,255,255,.05);--ifm-navbar-search-input-color:var(--ifm-color-white)}.navbar--primary{--ifm-navbar-background-color:var(--ifm-color-primary);--ifm-navbar-link-hover-color:var(--ifm-color-white);--ifm-menu-color-active:var(--ifm-color-white);--ifm-navbar-search-input-color:var(--ifm-color-emphasis-500)}.navbar__search-input{appearance:none;background:var(--ifm-navbar-search-input-background-color)var(--ifm-navbar-search-input-icon)no-repeat .75rem center/1rem 1rem;color:var(--ifm-navbar-search-input-color);cursor:text;border:none;border-radius:2rem;width:12.5rem;height:2rem;padding:0 .5rem 0 2.25rem;font-size:1rem;display:inline-block}.navbar__search-input::placeholder{color:var(--ifm-navbar-search-input-placeholder-color)}.navbar-sidebar{background-color:var(--ifm-navbar-background-color);box-shadow:var(--ifm-global-shadow-md);opacity:0;visibility:hidden;width:var(--ifm-navbar-sidebar-width);transition-property:opacity,visibility,transform;transition-duration:var(--ifm-transition-fast);transition-timing-function:ease-in-out;position:fixed;top:0;bottom:0;left:0;overflow-x:hidden;transform:translate(-100%)}.navbar-sidebar--show .navbar-sidebar,.navbar-sidebar--show .navbar-sidebar__backdrop{opacity:1;visibility:visible}.navbar-sidebar--show .navbar-sidebar{transform:translate(0,0)}.navbar-sidebar__backdrop{opacity:0;visibility:hidden;transition-property:opacity,visibility;transition-duration:var(--ifm-transition-fast);background-color:rgba(0,0,0,.6);transition-timing-function:ease-in-out;position:fixed;inset:0}.navbar-sidebar__brand{box-shadow:var(--ifm-navbar-shadow);height:var(--ifm-navbar-height);padding:var(--ifm-navbar-padding-vertical)var(--ifm-navbar-padding-horizontal);flex:1;align-items:center;display:flex}.navbar-sidebar__items{height:calc(100% - var(--ifm-navbar-height));transition:transform var(--ifm-transition-fast)ease-in-out;display:flex;transform:translateZ(0)}.navbar-sidebar__items--show-secondary{transform:translate3d(calc((var(--ifm-navbar-sidebar-width))*-1),0,0)}.navbar-sidebar__item{width:calc(var(--ifm-navbar-sidebar-width));flex-shrink:0;padding:.5rem}.navbar-sidebar__back{background:var(--ifm-menu-color-background-active);font-size:15px;font-weight:var(--ifm-button-font-weight);text-align:left;width:calc(100% + 1rem);margin:0 0 .2rem -.5rem;padding:.6rem 1.5rem;position:relative;top:-.5rem}.navbar-sidebar__close{margin-left:auto;display:flex}.pagination{column-gap:var(--ifm-pagination-page-spacing);font-size:var(--ifm-pagination-font-size);padding-left:0;display:flex}.pagination--sm{--ifm-pagination-font-size:.8rem;--ifm-pagination-padding-horizontal:.8rem;--ifm-pagination-padding-vertical:.2rem}.pagination--lg{--ifm-pagination-font-size:1.2rem;--ifm-pagination-padding-horizontal:1.2rem;--ifm-pagination-padding-vertical:.3rem}.pagination__item{display:inline-flex}.pagination__item>span{padding:var(--ifm-pagination-padding-vertical)}.pagination__item--active .pagination__link{background:var(--ifm-pagination-item-active-background);color:var(--ifm-pagination-color-active)}.pagination__item:not(.pagination__item--active):hover .pagination__link{background:var(--ifm-pagination-item-active-background)}.pagination__item--disabled,.pagination__item[disabled]{opacity:.25;pointer-events:none}.pagination__link{border-radius:var(--ifm-pagination-border-radius);color:var(--ifm-font-color-base);padding:var(--ifm-pagination-padding-vertical)var(--ifm-pagination-padding-horizontal);transition:background var(--ifm-transition-fast)var(--ifm-transition-timing-default);display:inline-block}.pagination__link:hover{text-decoration:none}.pagination-nav{grid-gap:var(--ifm-spacing-horizontal);gap:var(--ifm-spacing-horizontal);grid-template-columns:repeat(2,1fr);display:grid}.pagination-nav__link{border:1px solid var(--ifm-color-emphasis-300);border-radius:var(--ifm-pagination-nav-border-radius);line-height:var(--ifm-heading-line-height);padding:var(--ifm-global-spacing);transition:border-color var(--ifm-transition-fast)var(--ifm-transition-timing-default);height:100%;display:block}.pagination-nav__link:hover{border-color:var(--ifm-pagination-nav-color-hover);text-decoration:none}.pagination-nav__link--next{text-align:right;grid-column:2/3}.pagination-nav__label{font-size:var(--ifm-h4-font-size);font-weight:var(--ifm-heading-font-weight);word-break:break-word}.pagination-nav__link--prev .pagination-nav__label:before{content:"« "}.pagination-nav__link--next .pagination-nav__label:after{content:" »"}.pagination-nav__sublabel{color:var(--ifm-color-content-secondary);font-size:var(--ifm-h5-font-size);font-weight:var(--ifm-font-weight-semibold);margin-bottom:.25rem}.pills{gap:var(--ifm-pills-spacing);padding-left:0;display:flex}.pills__item{cursor:pointer;font-weight:var(--ifm-font-weight-bold);transition:background var(--ifm-transition-fast)var(--ifm-transition-timing-default);border-radius:.5rem;padding:.25rem 1rem;display:inline-block}.pills__item--active{background:var(--ifm-pills-color-background-active);color:var(--ifm-pills-color-active)}.pills__item:not(.pills__item--active):hover{background:var(--ifm-pills-color-background-active)}.pills--block{justify-content:stretch}.pills--block .pills__item{text-align:center;flex-grow:1}.tabs{color:var(--ifm-tabs-color);font-weight:var(--ifm-font-weight-bold);margin-bottom:0;padding-left:0;display:flex;overflow-x:auto}.tabs__item{border-radius:var(--ifm-global-radius);cursor:pointer;padding:var(--ifm-tabs-padding-vertical)var(--ifm-tabs-padding-horizontal);transition:background-color var(--ifm-transition-fast)var(--ifm-transition-timing-default);border-bottom:3px solid transparent;display:inline-flex}.tabs__item--active{border-bottom-color:var(--ifm-tabs-color-active-border);color:var(--ifm-tabs-color-active);border-bottom-right-radius:0;border-bottom-left-radius:0}.tabs__item:hover{background-color:var(--ifm-hover-overlay)}.tabs--block{justify-content:stretch}.tabs--block .tabs__item{flex-grow:1;justify-content:center}html[data-theme=dark]{--ifm-color-scheme:dark;--ifm-color-emphasis-0:var(--ifm-color-gray-1000);--ifm-color-emphasis-100:var(--ifm-color-gray-900);--ifm-color-emphasis-200:var(--ifm-color-gray-800);--ifm-color-emphasis-300:var(--ifm-color-gray-700);--ifm-color-emphasis-400:var(--ifm-color-gray-600);--ifm-color-emphasis-500:var(--ifm-color-gray-500);--ifm-color-emphasis-600:var(--ifm-color-gray-400);--ifm-color-emphasis-700:var(--ifm-color-gray-300);--ifm-color-emphasis-800:var(--ifm-color-gray-200);--ifm-color-emphasis-900:var(--ifm-color-gray-100);--ifm-color-emphasis-1000:var(--ifm-color-gray-0);--ifm-background-color:#1b1b1d;--ifm-background-surface-color:#242526;--ifm-hover-overlay:rgba(255,255,255,.05);--ifm-color-content:#e3e3e3;--ifm-color-content-secondary:#fff;--ifm-breadcrumb-separator-filter:invert(64%)sepia(11%)saturate(0%)hue-rotate(149deg)brightness(99%)contrast(95%);--ifm-code-background:rgba(255,255,255,.1);--ifm-scrollbar-track-background-color:#444;--ifm-scrollbar-thumb-background-color:#686868;--ifm-scrollbar-thumb-hover-background-color:#7a7a7a;--ifm-table-stripe-background:rgba(255,255,255,.07);--ifm-toc-border-color:var(--ifm-color-emphasis-200);--ifm-color-primary-contrast-background:#102445;--ifm-color-primary-contrast-foreground:#ebf2fc;--ifm-color-secondary-contrast-background:#474748;--ifm-color-secondary-contrast-foreground:#fdfdfe;--ifm-color-success-contrast-background:#003100;--ifm-color-success-contrast-foreground:#e6f6e6;--ifm-color-info-contrast-background:#193c47;--ifm-color-info-contrast-foreground:#eef9fd;--ifm-color-warning-contrast-background:#4d3800;--ifm-color-warning-contrast-foreground:#fff8e6;--ifm-color-danger-contrast-background:#4b1113;--ifm-color-danger-contrast-foreground:#ffebec}@media (min-width:1440px){.container{max-width:var(--ifm-container-width-xl)}}@media (max-width:996px){.col{--ifm-col-width:100%;flex-basis:var(--ifm-col-width);margin-left:0}.footer{--ifm-footer-padding-horizontal:0}.footer__link-separator{display:none}.footer__col{margin-bottom:calc(var(--ifm-spacing-vertical)*3)}.footer__link-item{width:max-content;display:block}.hero{padding-left:0;padding-right:0}.navbar>.container,.navbar>.container-fluid{padding:0}.navbar__toggle{display:inherit}.navbar__item{display:none}.navbar__search-input{width:9rem}.pills--block,.tabs--block{flex-direction:column}}@media (max-width:576px){.markdown h1:first-child{--ifm-h1-font-size:2rem}.markdown>h2{--ifm-h2-font-size:1.5rem}.markdown>h3{--ifm-h3-font-size:1.25rem}}@media (pointer:fine){.thin-scrollbar{scrollbar-width:thin}.thin-scrollbar::-webkit-scrollbar{height:var(--ifm-scrollbar-size);width:var(--ifm-scrollbar-size)}.thin-scrollbar::-webkit-scrollbar-track{background:var(--ifm-scrollbar-track-background-color);border-radius:10px}.thin-scrollbar::-webkit-scrollbar-thumb{background:var(--ifm-scrollbar-thumb-background-color);border-radius:10px}.thin-scrollbar::-webkit-scrollbar-thumb:hover{background:var(--ifm-scrollbar-thumb-hover-background-color)}}@media (prefers-reduced-motion:reduce){:root{--ifm-transition-fast:0s;--ifm-transition-slow:0s}}@media print{.table-of-contents,.footer,.menu,.navbar,.pagination-nav{display:none}.tabs{page-break-inside:avoid}}:root{--docusaurus-progress-bar-color:var(--ifm-color-primary)}#nprogress{pointer-events:none}#nprogress .bar{background:var(--docusaurus-progress-bar-color);z-index:1031;width:100%;height:2px;position:fixed;top:0;left:0}#nprogress .peg{box-shadow:0 0 10px var(--docusaurus-progress-bar-color),0 0 5px var(--docusaurus-progress-bar-color);opacity:1;width:100px;height:100%;position:absolute;right:0;transform:rotate(3deg)translateY(-4px)}:root{--ifm-container-width:1280px;--ifm-container-width-xl:1440px;--ifm-footer-padding-vertical:.5rem;--ifm-spacing-horizontal:.8rem}.header-github-link:hover{opacity:.6}.header-github-link:before{content:"";background:url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") no-repeat;width:24px;height:24px;display:flex}[data-theme=dark] .header-github-link:before{background:url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='white' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") no-repeat}body:not(.navigation-with-keyboard) :not(input):focus{outline:none}#__docusaurus-base-url-issue-banner-container{display:none}.skipToContent_fXgn{z-index:calc(var(--ifm-z-index-fixed) + 1);padding:calc(var(--ifm-global-spacing)/2)var(--ifm-global-spacing);color:var(--ifm-color-emphasis-900);background-color:var(--ifm-background-surface-color);position:fixed;top:1rem;left:100%}.skipToContent_fXgn:focus{box-shadow:var(--ifm-global-shadow-md);left:1rem}.closeButton_CVFx{padding:0;line-height:0}.content_knG7{text-align:center;padding:5px 0;font-size:85%}.content_knG7 a{color:inherit;text-decoration:underline}:root{--docusaurus-announcement-bar-height:auto}.announcementBar_mb4j{height:var(--docusaurus-announcement-bar-height);background-color:var(--ifm-color-white);color:var(--ifm-color-black);border-bottom:1px solid var(--ifm-color-emphasis-100);align-items:center;display:flex}html[data-announcement-bar-initially-dismissed=true] .announcementBar_mb4j{display:none}.announcementBarPlaceholder_vyr4{flex:0 0 10px}.announcementBarClose_gvF7{flex:0 0 30px;align-self:stretch}.announcementBarContent_xLdY{flex:auto}@media print{.announcementBar_mb4j{display:none}}@media (min-width:997px){:root{--docusaurus-announcement-bar-height:30px}.announcementBarPlaceholder_vyr4,.announcementBarClose_gvF7{flex-basis:50px}}.toggle_vylO{width:2rem;height:2rem}.toggleButton_gllP{-webkit-tap-highlight-color:transparent;transition:background var(--ifm-transition-fast);border-radius:50%;justify-content:center;align-items:center;width:100%;height:100%;display:flex}.toggleButton_gllP:hover{background:var(--ifm-color-emphasis-200)}[data-theme=light] .darkToggleIcon_wfgR,[data-theme=dark] .lightToggleIcon_pyhR{display:none}.toggleButtonDisabled_aARS{cursor:not-allowed}.darkNavbarColorModeToggle_X3D1:hover{background:var(--ifm-color-gray-800)}.themedComponent_mlkZ{display:none}[data-theme=light] .themedComponent--light_NVdE,[data-theme=dark] .themedComponent--dark_xIcU,html:not([data-theme]) .themedComponent--light_NVdE{display:initial}.iconExternalLink_nPIU{margin-left:.3rem}.dropdownNavbarItemMobile_S0Fm{cursor:pointer}.iconLanguage_nlXk{vertical-align:text-bottom;margin-right:5px}.navbarSearchContainer_Bca1:empty{display:none}@media (max-width:996px){.navbarSearchContainer_Bca1{right:var(--ifm-navbar-padding-horizontal);position:absolute}}@media (min-width:997px){.navbarSearchContainer_Bca1{padding:var(--ifm-navbar-item-padding-vertical)var(--ifm-navbar-item-padding-horizontal)}}.navbarHideable_m1mJ{transition:transform var(--ifm-transition-fast)ease}.navbarHidden_jGov{transform:translateY(calc(-100% - 2px))}@media (max-width:996px){.colorModeToggle_DEke{display:none}}.errorBoundaryError_a6uf{white-space:pre-wrap;color:red}.errorBoundaryFallback_VBag{color:red;padding:.55rem}.footerLogoLink_BH7S{opacity:.5;transition:opacity var(--ifm-transition-fast)var(--ifm-transition-timing-default)}.footerLogoLink_BH7S:hover{opacity:1}.anchorWithStickyNavbar_LWe7{scroll-margin-top:calc(var(--ifm-navbar-height) + .5rem)}.anchorWithHideOnScrollNavbar_WYt5{scroll-margin-top:.5rem}.hash-link{opacity:0;transition:opacity var(--ifm-transition-fast);-webkit-user-select:none;user-select:none;padding-left:.5rem}.hash-link:before{content:"#"}.hash-link:focus,:hover>.hash-link{opacity:1}html,body{height:100%}.mainWrapper_z2l0{flex-direction:column;flex:1 0 auto;display:flex}.docusaurus-mt-lg{margin-top:3rem}#__docusaurus{flex-direction:column;min-height:100%;display:flex}.sidebar_re4s{max-height:calc(100vh - (var(--ifm-navbar-height) + 2rem));top:calc(var(--ifm-navbar-height) + 2rem);position:sticky;overflow-y:auto}.sidebarItemTitle_pO2u{font-size:var(--ifm-h3-font-size);font-weight:var(--ifm-font-weight-bold)}.sidebarItemList_Yudw{font-size:.9rem}.sidebarItem__DBe{margin-top:.7rem}.sidebarItemLink_mo7H{color:var(--ifm-font-color-base);display:block}.sidebarItemLink_mo7H:hover{text-decoration:none}.sidebarItemLinkActive_I1ZP{color:var(--ifm-color-primary)!important}@media (max-width:996px){.sidebar_re4s{display:none}}.yearGroupHeading_rMGB{margin-top:1.6rem;margin-bottom:.4rem}.yearGroupHeading_QT03{margin:1rem .75rem .5rem}.title_f1Hy{font-size:3rem}@media (max-width:576px){.title_f1Hy{font-size:2rem}}.container_mt6G{font-size:.9rem}[data-theme=dark] .githubSvg_Uu4N{fill:var(--light)}[data-theme=light] .githubSvg_Uu4N{fill:var(--dark)}[data-theme=dark] .xSvg_y3PF{fill:var(--light)}[data-theme=light] .xSvg_y3PF{fill:var(--dark)}:root{--docusaurus-blog-social-icon-size:1rem}.authorSocials_rSDt{height:var(--docusaurus-blog-social-icon-size);line-clamp:1;-webkit-line-clamp:1;-webkit-box-orient:vertical;flex-wrap:wrap;align-items:center;line-height:0;display:flex;overflow:hidden}.authorSocialLink_owbf{height:var(--docusaurus-blog-social-icon-size);width:var(--docusaurus-blog-social-icon-size);margin-right:.4rem;line-height:0}.authorSocialIcon_XYv3{width:var(--docusaurus-blog-social-icon-size);height:var(--docusaurus-blog-social-icon-size)}.authorImage_XqGP{--ifm-avatar-photo-size:3.6rem}.author-as-h1_n9oJ .authorImage_XqGP{--ifm-avatar-photo-size:7rem}.author-as-h2_gXvM .authorImage_XqGP{--ifm-avatar-photo-size:5.4rem}.authorDetails_lV9A{flex-direction:column;justify-content:space-around;align-items:flex-start;display:flex}.authorName_yefp{flex-direction:row;font-size:1.1rem;line-height:1.1rem;display:flex}.author-as-h1_n9oJ .authorName_yefp{font-size:2.4rem;line-height:2.4rem;display:inline}.author-as-h2_gXvM .authorName_yefp{font-size:1.4rem;line-height:1.4rem;display:inline}.authorTitle_nd0D{line-clamp:1;-webkit-line-clamp:1;-webkit-box-orient:vertical;font-size:.8rem;line-height:1rem;display:-webkit-box;overflow:hidden}.author-as-h1_n9oJ .authorTitle_nd0D{font-size:1.2rem;line-height:1.6rem}.author-as-h2_gXvM .authorTitle_nd0D{font-size:1rem;line-height:1.3rem}.authorBlogPostCount_iiJ5{background:var(--ifm-color-secondary);color:var(--ifm-color-black);border-radius:var(--ifm-global-radius);margin-left:.3rem;padding:.1rem .4rem;font-size:.8rem;line-height:1.2}.authorListItem_n3yI{margin-bottom:2rem;list-style-type:none}.authorCol_Hf19{max-width:inherit!important}.imageOnlyAuthorRow_pa_O{flex-flow:wrap;display:flex}.imageOnlyAuthorCol_G86a{margin-left:.3rem;margin-right:.3rem}.codeBlockContainer_Ckt0{background:var(--prism-background-color);color:var(--prism-color);margin-bottom:var(--ifm-leading);box-shadow:var(--ifm-global-shadow-lw);border-radius:var(--ifm-code-border-radius)}.codeBlockContent_biex{border-radius:inherit;direction:ltr;position:relative}.codeBlockTitle_Ktv7{border-bottom:1px solid var(--ifm-color-emphasis-300);font-size:var(--ifm-code-font-size);padding:.75rem var(--ifm-pre-padding);border-top-left-radius:inherit;border-top-right-radius:inherit;font-weight:500}.codeBlock_bY9V{--ifm-pre-background:var(--prism-background-color);margin:0;padding:0}.codeBlockTitle_Ktv7+.codeBlockContent_biex .codeBlock_bY9V{border-top-left-radius:0;border-top-right-radius:0}.codeBlockStandalone_MEMb{padding:0}.codeBlockLines_e6Vv{font:inherit;float:left;padding:var(--ifm-pre-padding);min-width:100%}.codeBlockLinesWithNumbering_o6Pm{padding:var(--ifm-pre-padding)0;display:table}@media print{.codeBlockLines_e6Vv{white-space:pre-wrap}}.buttonGroup__atx{right:calc(var(--ifm-pre-padding)/2);top:calc(var(--ifm-pre-padding)/2);column-gap:.2rem;display:flex;position:absolute}.buttonGroup__atx button{background:var(--prism-background-color);color:var(--prism-color);border:1px solid var(--ifm-color-emphasis-300);border-radius:var(--ifm-global-radius);transition:opacity var(--ifm-transition-fast)ease-in-out;opacity:0;align-items:center;padding:.4rem;line-height:0;display:flex}.buttonGroup__atx button:hover{opacity:1!important}.buttonGroup__atx button:focus-visible{opacity:1!important}.theme-code-block:hover .buttonGroup__atx button{opacity:.4}:where(:root){--docusaurus-highlighted-code-line-bg:#484d5b}:where([data-theme=dark]){--docusaurus-highlighted-code-line-bg:#646464}.theme-code-block-highlighted-line{background-color:var(--docusaurus-highlighted-code-line-bg);margin:0 calc(-1*var(--ifm-pre-padding));padding:0 var(--ifm-pre-padding);display:block}.codeLine_lJS_{counter-increment:line-count;display:table-row}.codeLineNumber_Tfdd{text-align:right;padding:0 var(--ifm-pre-padding);background:var(--ifm-pre-background);overflow-wrap:normal;width:1%;display:table-cell;position:sticky;left:0}.codeLineNumber_Tfdd:before{content:counter(line-count);opacity:.4}.theme-code-block-highlighted-line .codeLineNumber_Tfdd:before{opacity:.8}.codeLineContent_feaV{padding-right:var(--ifm-pre-padding)}.theme-code-block:hover .copyButtonCopied_obH4{opacity:1!important}.copyButtonIcons_eSgA{width:1.125rem;height:1.125rem;position:relative}.copyButtonIcon_y97N,.copyButtonSuccessIcon_LjdS{fill:currentColor;opacity:inherit;width:inherit;height:inherit;transition:all var(--ifm-transition-fast)ease;position:absolute;top:0;left:0}.copyButtonSuccessIcon_LjdS{opacity:0;color:#00d600;top:50%;left:50%;transform:translate(-50%,-50%)scale(.33)}.copyButtonCopied_obH4 .copyButtonIcon_y97N{opacity:0;transform:scale(.33)}.copyButtonCopied_obH4 .copyButtonSuccessIcon_LjdS{opacity:1;transition-delay:75ms;transform:translate(-50%,-50%)scale(1)}.wordWrapButtonIcon_Bwma{width:1.2rem;height:1.2rem}.wordWrapButtonEnabled_EoeP .wordWrapButtonIcon_Bwma{color:var(--ifm-color-primary)}.details_lb9f{--docusaurus-details-summary-arrow-size:.38rem;--docusaurus-details-transition:transform .2s ease;--docusaurus-details-decoration-color:grey}.details_lb9f>summary{cursor:pointer;padding-left:1rem;list-style:none;position:relative}.details_lb9f>summary::-webkit-details-marker{display:none}.details_lb9f>summary:before{content:"";border-width:var(--docusaurus-details-summary-arrow-size);border-style:solid;border-color:transparent transparent transparent var(--docusaurus-details-decoration-color);transition:var(--docusaurus-details-transition);transform-origin:calc(var(--docusaurus-details-summary-arrow-size)/2)50%;position:absolute;top:.45rem;left:0;transform:rotate(0)}.details_lb9f[open]:not(.isBrowser_bmU9)>summary:before,.details_lb9f[data-collapsed=false].isBrowser_bmU9>summary:before{transform:rotate(90deg)}.collapsibleContent_i85q{border-top:1px solid var(--docusaurus-details-decoration-color);margin-top:1rem;padding-top:1rem}.collapsibleContent_i85q p:last-child,.details_lb9f>summary>p:last-child{margin-bottom:0}.details_b_Ee{--docusaurus-details-decoration-color:var(--ifm-alert-border-color);--docusaurus-details-transition:transform var(--ifm-transition-fast)ease;margin:0 0 var(--ifm-spacing-vertical);border:1px solid var(--ifm-alert-border-color)}.containsTaskList_mC6p{list-style:none}:not(.containsTaskList_mC6p>li)>.containsTaskList_mC6p{padding-left:0}.img_ev3q{height:auto}.admonition_xJq3{margin-bottom:1em}.admonitionHeading_Gvgb{font:var(--ifm-heading-font-weight)var(--ifm-h5-font-size)/var(--ifm-heading-line-height)var(--ifm-heading-font-family);text-transform:uppercase}.admonitionHeading_Gvgb:not(:last-child){margin-bottom:.3rem}.admonitionHeading_Gvgb code{text-transform:none}.admonitionIcon_Rf37{vertical-align:middle;margin-right:.4em;display:inline-block}.admonitionIcon_Rf37 svg{fill:var(--ifm-alert-foreground-color);width:1.6em;height:1.6em;display:inline-block}.admonitionContent_BuS1>:last-child{margin-bottom:0}.iconEdit_Z9Sw{vertical-align:sub;margin-right:.3em}.lastUpdated_JAkA{margin-top:.2rem;font-size:smaller;font-style:italic}@media (min-width:997px){.lastUpdated_JAkA{text-align:right}}:root{--docusaurus-tag-list-border:var(--ifm-color-emphasis-300)}.tag_zVej{border:1px solid var(--docusaurus-tag-list-border);transition:border var(--ifm-transition-fast)}.tag_zVej:hover{--docusaurus-tag-list-border:var(--ifm-link-color);text-decoration:none}.tagRegular_sFm0{border-radius:var(--ifm-global-radius);padding:.2rem .5rem .3rem;font-size:90%}.tagWithCount_h2kH{border-left:0;align-items:center;padding:0 .5rem 0 1rem;display:flex;position:relative}.tagWithCount_h2kH:before,.tagWithCount_h2kH:after{content:"";border:1px solid var(--docusaurus-tag-list-border);transition:inherit;position:absolute;top:50%}.tagWithCount_h2kH:before{border-bottom:0;border-right:0;width:1.18rem;height:1.18rem;right:100%;transform:translate(50%,-50%)rotate(-45deg)}.tagWithCount_h2kH:after{border-radius:50%;width:.5rem;height:.5rem;left:0;transform:translateY(-50%)}.tagWithCount_h2kH span{background:var(--ifm-color-secondary);color:var(--ifm-color-black);border-radius:var(--ifm-global-radius);margin-left:.3rem;padding:.1rem .4rem;font-size:.7rem;line-height:1.2}.tags_jXut{display:inline}.tag_QGVx{margin:0 .4rem .5rem 0;display:inline-block}.tableOfContents_bqdL{max-height:calc(100vh - (var(--ifm-navbar-height) + 2rem));top:calc(var(--ifm-navbar-height) + 1rem);position:sticky;overflow-y:auto}@media (max-width:996px){.tableOfContents_bqdL{display:none}.docItemContainer_F8PC{padding:0 .3rem}}.algolia-docsearch-suggestion{border-bottom-color:#3a3dd1}.algolia-docsearch-suggestion--category-header{background-color:#4b54de}.algolia-docsearch-suggestion--highlight{color:#3a33d1}.algolia-docsearch-suggestion--category-header .algolia-docsearch-suggestion--highlight{background-color:#4d47d5}.aa-cursor .algolia-docsearch-suggestion--content{color:#272296}.aa-cursor .algolia-docsearch-suggestion{background:#ebebfb}@media (min-width:768px){.algolia-docsearch-suggestion{border-bottom-color:#7671df}.algolia-docsearch-suggestion--subcategory-column{color:#4e4726;border-right-color:#7671df}}.searchbox{white-space:nowrap;box-sizing:border-box;width:200px;display:inline-block;position:relative;visibility:visible!important;height:32px!important}.searchbox .algolia-autocomplete{width:100%;height:100%;display:block}.searchbox__wrapper{z-index:999;width:100%;height:100%;position:relative}.searchbox__input{box-sizing:border-box;vertical-align:middle;white-space:normal;appearance:none;border:0;border-radius:16px;width:100%;height:100%;padding:0 26px 0 32px;font-size:12px;transition:box-shadow .4s,background .4s;display:inline-block;box-shadow:inset 0 0 0 1px #ccc;background:#fff!important}.searchbox__input::-webkit-search-decoration{display:none}.searchbox__input::-webkit-search-cancel-button{display:none}.searchbox__input::-webkit-search-results-button{display:none}.searchbox__input::-webkit-search-results-decoration{display:none}.searchbox__input:hover{box-shadow:inset 0 0 0 1px #b3b3b3}.searchbox__input:focus,.searchbox__input:active{background:#fff;outline:0;box-shadow:inset 0 0 0 1px #aaa}.searchbox__input::placeholder{color:#aaa}.searchbox__submit{vertical-align:middle;text-align:center;font-size:inherit;-webkit-user-select:none;user-select:none;top:0;right:inherit;background-color:rgba(69,142,225,0);border:0;border-radius:16px 0 0 16px;width:32px;height:100%;margin:0;padding:0;position:absolute;left:0}.searchbox__submit:before{vertical-align:middle;content:"";height:100%;margin-right:-4px;display:inline-block}.searchbox__submit:hover,.searchbox__submit:active{cursor:pointer}.searchbox__submit:focus{outline:0}.searchbox__submit svg{vertical-align:middle;fill:#6d7e96;width:14px;height:14px}.searchbox__reset{cursor:pointer;font-size:inherit;-webkit-user-select:none;user-select:none;fill:rgba(0,0,0,.5);background:0 0;border:0;margin:0;padding:0;display:block;position:absolute;top:8px;right:8px}.searchbox__reset.hide{display:none}.searchbox__reset:focus{outline:0}.searchbox__reset svg{width:8px;height:8px;margin:4px;display:block}.searchbox__input:valid~.searchbox__reset{animation-name:sbx-reset-in;animation-duration:.15s;display:block}@keyframes sbx-reset-in{0%{opacity:0;transform:translate(-20%)}to{opacity:1;transform:none}}.algolia-autocomplete .ds-dropdown-menu:before{content:"";z-index:1000;background:#373940;border-top:1px solid #373940;border-right:1px solid #373940;border-radius:2px;width:14px;height:14px;display:block;position:absolute;top:-7px;transform:rotate(-45deg)}.algolia-autocomplete .ds-dropdown-menu{box-shadow:0 1px rgba(0,0,0,.2),0 2px 3px rgba(0,0,0,.1)}@media (min-width:601px){.algolia-autocomplete.algolia-autocomplete-right .ds-dropdown-menu{right:0!important;left:inherit!important}.algolia-autocomplete.algolia-autocomplete-right .ds-dropdown-menu:before{right:48px}.algolia-autocomplete .ds-dropdown-menu{text-align:left;z-index:999;background:0 0;border:none;border-radius:4px;min-width:500px;max-width:600px;height:auto;margin:6px 0 0;padding:0;position:relative;top:-6px}}@media (max-width:600px){.algolia-autocomplete .ds-dropdown-menu{z-index:100;width:600px;max-width:calc(100% - 2rem);max-height:calc(100% - 5rem);display:block;position:fixed!important;top:50px!important;left:auto!important;right:1rem!important}.algolia-autocomplete .ds-dropdown-menu:before{right:6rem}}.algolia-autocomplete .ds-dropdown-menu .ds-suggestions{z-index:1000;position:relative}.algolia-autocomplete .ds-dropdown-menu .ds-suggestion{cursor:pointer}.algolia-autocomplete .ds-dropdown-menu [class^=ds-dataset-]{background:#fff;border-radius:4px;padding:0;position:relative;overflow:auto}.algolia-autocomplete .ds-dropdown-menu *{box-sizing:border-box}.algolia-autocomplete .algolia-docsearch-suggestion{padding:0;text-decoration:none;display:block;position:relative;overflow:hidden}.algolia-autocomplete .ds-cursor .algolia-docsearch-suggestion--wrapper{background:#f1f1f1;box-shadow:inset -2px 0 #61dafb}.algolia-autocomplete .algolia-docsearch-suggestion--highlight{background:#ffe564;padding:.1em .05em}.algolia-autocomplete .algolia-docsearch-suggestion--category-header .algolia-docsearch-suggestion--category-header-lvl0 .algolia-docsearch-suggestion--highlight,.algolia-autocomplete .algolia-docsearch-suggestion--category-header .algolia-docsearch-suggestion--category-header-lvl1 .algolia-docsearch-suggestion--highlight{color:inherit;background:inherit}.algolia-autocomplete .algolia-docsearch-suggestion--text .algolia-docsearch-suggestion--highlight{background:inherit;color:inherit;padding:0 0 1px;box-shadow:inset 0 -2px rgba(69,142,225,.8)}.algolia-autocomplete .algolia-docsearch-suggestion--content{float:right;cursor:pointer;width:70%;padding:5.33333px 0 5.33333px 10.6667px;display:block;position:relative}.algolia-autocomplete .algolia-docsearch-suggestion--content:before{content:"";background:#ececec;width:1px;height:100%;display:block;position:absolute;top:0;left:-1px}.algolia-autocomplete .algolia-docsearch-suggestion--category-header{letter-spacing:.08em;text-transform:uppercase;color:#fff;background-color:#373940;margin:0;padding:5px 8px;font-size:14px;font-weight:700;display:none;position:relative}.algolia-autocomplete .algolia-docsearch-suggestion--wrapper{float:left;background-color:#fff;width:100%;padding:8px 0 0}.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column{float:left;text-align:right;color:#777;word-wrap:break-word;width:30%;padding:5.33333px 10.6667px;font-size:.9em;display:none;position:relative}.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column:before{content:"";background:#ececec;width:1px;height:100%;display:block;position:absolute;top:0;right:0}.algolia-autocomplete .algolia-docsearch-suggestion.algolia-docsearch-suggestion__main .algolia-docsearch-suggestion--category-header,.algolia-autocomplete .algolia-docsearch-suggestion.algolia-docsearch-suggestion__secondary{display:block}.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column .algolia-docsearch-suggestion--highlight{background-color:inherit;color:inherit}.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-inline{display:none}.algolia-autocomplete .algolia-docsearch-suggestion--title{color:#02060c;margin-bottom:4px;font-size:.9em;font-weight:700}.algolia-autocomplete .algolia-docsearch-suggestion--text{color:#63676d;padding-right:2px;font-size:.85em;line-height:1.2em;display:block}.algolia-autocomplete .algolia-docsearch-suggestion--version{color:#a6aab1;padding-top:2px;padding-right:2px;font-size:.65em;display:block}.algolia-autocomplete .algolia-docsearch-suggestion--no-results{text-align:center;background-color:#373940;width:100%;margin-top:-8px;padding:8px 0;font-size:1.2em}.algolia-autocomplete .algolia-docsearch-suggestion--no-results .algolia-docsearch-suggestion--text{color:#fff;margin-top:4px}.algolia-autocomplete .algolia-docsearch-suggestion--no-results:before{display:none}.algolia-autocomplete .algolia-docsearch-suggestion code{color:#222;background-color:#ebebeb;border:none;border-radius:3px;padding:1px 5px;font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace;font-size:90%}.algolia-autocomplete .algolia-docsearch-suggestion code .algolia-docsearch-suggestion--highlight{background:0 0}.algolia-autocomplete .algolia-docsearch-suggestion.algolia-docsearch-suggestion__main .algolia-docsearch-suggestion--category-header{color:#fff;display:block}.algolia-autocomplete .algolia-docsearch-suggestion.algolia-docsearch-suggestion__secondary .algolia-docsearch-suggestion--subcategory-column{display:block}.algolia-autocomplete .algolia-docsearch-footer{z-index:2000;float:right;background-color:#fff;width:100%;height:30px;font-size:0;line-height:0}.algolia-autocomplete .algolia-docsearch-footer--logo{text-indent:-9000px;background-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 130 18'%3E%3Cdefs%3E%3ClinearGradient id='a' x1='-36.87%25' x2='129.43%25' y1='134.94%25' y2='-27.7%25'%3E%3Cstop stop-color='%252300AEFF' offset='0%25'/%3E%3Cstop stop-color='%25233369E7' offset='100%25'/%3E%3C/linearGradient%3E%3C/defs%3E%3Cg fill='none' fill-rule='evenodd'%3E%3Cpath fill='url(%2523a)' d='M59.4.02h13.3a2.37 2.37 0 0 1 2.38 2.37V15.6a2.37 2.37 0 0 1-2.38 2.36H59.4a2.37 2.37 0 0 1-2.38-2.36V2.38A2.37 2.37 0 0 1 59.4.02z'/%3E%3Cpath fill='%2523FFF' d='M66.26 4.56c-2.82 0-5.1 2.27-5.1 5.08 0 2.8 2.28 5.07 5.1 5.07 2.8 0 5.1-2.26 5.1-5.07 0-2.8-2.28-5.07-5.1-5.07zm0 8.65c-2 0-3.6-1.6-3.6-3.56 0-1.97 1.6-3.58 3.6-3.58 1.98 0 3.6 1.6 3.6 3.58a3.58 3.58 0 0 1-3.6 3.57zm0-6.4v2.66c0 .07.08.13.15.1l2.4-1.24c.04-.02.06-.1.03-.14a2.96 2.96 0 0 0-2.46-1.5c-.06 0-.1.05-.1.1zm-3.33-1.96l-.3-.3a.78.78 0 0 0-1.12 0l-.36.36a.77.77 0 0 0 0 1.1l.3.3c.05.05.13.04.17 0 .2-.25.4-.5.6-.7.23-.23.46-.43.7-.6.07-.04.07-.1.03-.16zm5-.8V3.4a.78.78 0 0 0-.78-.78h-1.83a.78.78 0 0 0-.78.78v.63c0 .07.06.12.14.1a5.74 5.74 0 0 1 1.58-.22c.52 0 1.04.07 1.54.2a.1.1 0 0 0 .13-.1z'/%3E%3Cpath fill='%2523182359' d='M102.16 13.76c0 1.46-.37 2.52-1.12 3.2-.75.67-1.9 1-3.44 1-.56 0-1.74-.1-2.67-.3l.34-1.7c.78.17 1.82.2 2.36.2.86 0 1.48-.16 1.84-.5.37-.36.55-.88.55-1.57v-.35a6.37 6.37 0 0 1-.84.3 4.15 4.15 0 0 1-1.2.17 4.5 4.5 0 0 1-1.6-.28 3.38 3.38 0 0 1-1.26-.82 3.74 3.74 0 0 1-.8-1.35c-.2-.54-.3-1.5-.3-2.2 0-.67.1-1.5.3-2.06a3.92 3.92 0 0 1 .9-1.43 4.12 4.12 0 0 1 1.45-.92 5.3 5.3 0 0 1 1.94-.37c.7 0 1.35.1 1.97.2a15.86 15.86 0 0 1 1.6.33v8.46zm-5.95-4.2c0 .9.2 1.88.6 2.3.4.4.9.62 1.53.62.34 0 .66-.05.96-.15a2.75 2.75 0 0 0 .73-.33V6.7a8.53 8.53 0 0 0-1.42-.17c-.76-.02-1.36.3-1.77.8-.4.5-.62 1.4-.62 2.23zm16.13 0c0 .72-.1 1.26-.32 1.85a4.4 4.4 0 0 1-.9 1.53c-.38.42-.85.75-1.4.98-.54.24-1.4.37-1.8.37-.43 0-1.27-.13-1.8-.36a4.1 4.1 0 0 1-1.4-.97 4.5 4.5 0 0 1-.92-1.52 5.04 5.04 0 0 1-.33-1.84c0-.72.1-1.4.32-2 .22-.6.53-1.1.92-1.5.4-.43.86-.75 1.4-.98a4.55 4.55 0 0 1 1.78-.34 4.7 4.7 0 0 1 1.8.34c.54.23 1 .55 1.4.97.38.42.68.92.9 1.5.23.6.35 1.3.35 2zm-2.2 0c0-.92-.2-1.7-.6-2.22-.38-.54-.94-.8-1.64-.8-.72 0-1.27.26-1.67.8-.4.54-.58 1.3-.58 2.22 0 .93.2 1.56.6 2.1.38.54.94.8 1.64.8s1.25-.26 1.65-.8c.4-.55.6-1.17.6-2.1zm6.97 4.7c-3.5.02-3.5-2.8-3.5-3.27L113.57.92l2.15-.34v10c0 .25 0 1.87 1.37 1.88v1.8zm3.77 0h-2.15v-9.2l2.15-.33v9.54zM119.8 3.74c.7 0 1.3-.58 1.3-1.3 0-.7-.58-1.3-1.3-1.3-.73 0-1.3.6-1.3 1.3 0 .72.58 1.3 1.3 1.3zm6.43 1c.7 0 1.3.1 1.78.27.5.18.88.42 1.17.73.28.3.5.74.6 1.18.13.46.2.95.2 1.5v5.47a25.24 25.24 0 0 1-1.5.25c-.67.1-1.42.15-2.25.15a6.83 6.83 0 0 1-1.52-.16 3.2 3.2 0 0 1-1.18-.5 2.46 2.46 0 0 1-.76-.9c-.18-.37-.27-.9-.27-1.44 0-.52.1-.85.3-1.2.2-.37.48-.67.83-.9a3.6 3.6 0 0 1 1.23-.5 7.07 7.07 0 0 1 2.2-.1l.83.16v-.35c0-.25-.03-.48-.1-.7a1.5 1.5 0 0 0-.3-.58c-.15-.18-.34-.3-.58-.4a2.54 2.54 0 0 0-.92-.17c-.5 0-.94.06-1.35.13-.4.08-.75.16-1 .25l-.27-1.74c.27-.1.67-.18 1.2-.28a9.34 9.34 0 0 1 1.65-.14zm.18 7.74c.66 0 1.15-.04 1.5-.1V10.2a5.1 5.1 0 0 0-2-.1c-.23.03-.45.1-.64.2a1.17 1.17 0 0 0-.47.38c-.13.17-.18.26-.18.52 0 .5.17.8.5.98.32.2.74.3 1.3.3zM84.1 4.8c.72 0 1.3.08 1.8.26.48.17.87.42 1.15.73.3.3.5.72.6 1.17.14.45.2.94.2 1.47v5.48a25.24 25.24 0 0 1-1.5.26c-.67.1-1.42.14-2.25.14a6.83 6.83 0 0 1-1.52-.16 3.2 3.2 0 0 1-1.18-.5 2.46 2.46 0 0 1-.76-.9c-.18-.38-.27-.9-.27-1.44 0-.53.1-.86.3-1.22.2-.36.5-.65.84-.88a3.6 3.6 0 0 1 1.24-.5 7.07 7.07 0 0 1 2.2-.1c.26.03.54.08.84.15v-.35c0-.24-.03-.48-.1-.7a1.5 1.5 0 0 0-.3-.58c-.15-.17-.34-.3-.58-.4a2.54 2.54 0 0 0-.9-.15c-.5 0-.96.05-1.37.12-.4.07-.75.15-1 .24l-.26-1.75c.27-.08.67-.17 1.18-.26a8.9 8.9 0 0 1 1.66-.15zm.2 7.73c.65 0 1.14-.04 1.48-.1v-2.17a5.1 5.1 0 0 0-1.98-.1c-.24.03-.46.1-.65.18a1.17 1.17 0 0 0-.47.4c-.12.17-.17.26-.17.52 0 .5.18.8.5.98.32.2.75.3 1.3.3zm8.68 1.74c-3.5 0-3.5-2.82-3.5-3.28L89.45.92 91.6.6v10c0 .25 0 1.87 1.38 1.88v1.8z'/%3E%3Cpath fill='%25231D3657' d='M5.03 11.03c0 .7-.26 1.24-.76 1.64-.5.4-1.2.6-2.1.6-.88 0-1.6-.14-2.17-.42v-1.2c.36.16.74.3 1.14.38.4.1.78.15 1.13.15.5 0 .88-.1 1.12-.3a.94.94 0 0 0 .35-.77.98.98 0 0 0-.33-.74c-.22-.2-.68-.44-1.37-.72-.72-.3-1.22-.62-1.52-1C.23 8.27.1 7.82.1 7.3c0-.65.22-1.17.7-1.55.46-.37 1.08-.56 1.86-.56.76 0 1.5.16 2.25.48l-.4 1.05c-.7-.3-1.32-.44-1.87-.44-.4 0-.73.08-.94.26a.9.9 0 0 0-.33.72c0 .2.04.38.12.52.08.15.22.3.42.4.2.14.55.3 1.06.52.58.24 1 .47 1.27.67.27.2.47.44.6.7.12.26.18.57.18.92zM9 13.27c-.92 0-1.64-.27-2.16-.8-.52-.55-.78-1.3-.78-2.24 0-.97.24-1.73.72-2.3.5-.54 1.15-.82 2-.82.78 0 1.4.25 1.85.72.46.48.7 1.14.7 1.97v.67H7.35c0 .58.17 1.02.46 1.33.3.3.7.47 1.24.47.36 0 .68-.04.98-.1a5.1 5.1 0 0 0 .98-.33v1.02a3.87 3.87 0 0 1-.94.32 5.72 5.72 0 0 1-1.08.1zm-.22-5.2c-.4 0-.73.12-.97.38s-.37.62-.42 1.1h2.7c0-.48-.13-.85-.36-1.1-.23-.26-.54-.38-.94-.38zm7.7 5.1l-.26-.84h-.05c-.28.36-.57.6-.86.74-.28.13-.65.2-1.1.2-.6 0-1.05-.16-1.38-.48-.32-.32-.5-.77-.5-1.34 0-.62.24-1.08.7-1.4.45-.3 1.14-.47 2.07-.5l1.02-.03V9.2c0-.37-.1-.65-.27-.84-.17-.2-.45-.28-.82-.28-.3 0-.6.04-.88.13a6.68 6.68 0 0 0-.8.33l-.4-.9a4.4 4.4 0 0 1 1.05-.4 4.86 4.86 0 0 1 1.08-.12c.76 0 1.33.18 1.7.5.4.33.6.85.6 1.56v4h-.9zm-1.9-.87c.47 0 .83-.13 1.1-.38.3-.26.43-.62.43-1.08v-.52l-.76.03c-.6.03-1.02.13-1.3.3s-.4.45-.4.82c0 .26.08.47.24.6.16.16.4.23.7.23zm7.57-5.2c.25 0 .46.03.62.06l-.12 1.18a2.38 2.38 0 0 0-.56-.06c-.5 0-.92.16-1.24.5-.3.32-.47.75-.47 1.27v3.1h-1.27V7.23h1l.16 1.05h.05c.2-.36.45-.64.77-.85a1.83 1.83 0 0 1 1.02-.3zm4.12 6.17c-.9 0-1.58-.27-2.05-.8-.47-.52-.7-1.27-.7-2.25 0-1 .24-1.77.73-2.3.5-.54 1.2-.8 2.12-.8.63 0 1.2.1 1.7.34l-.4 1c-.52-.2-.96-.3-1.3-.3-1.04 0-1.55.68-1.55 2.05 0 .67.13 1.17.38 1.5.26.34.64.5 1.13.5a3.23 3.23 0 0 0 1.6-.4v1.1a2.53 2.53 0 0 1-.73.28 4.36 4.36 0 0 1-.93.08zm8.28-.1h-1.27V9.5c0-.45-.1-.8-.28-1.02-.18-.23-.47-.34-.88-.34-.53 0-.9.16-1.16.48-.25.3-.38.85-.38 1.6v2.94h-1.26V4.8h1.26v2.12c0 .34-.02.7-.06 1.1h.08a1.76 1.76 0 0 1 .72-.67c.3-.16.66-.24 1.07-.24 1.43 0 2.15.74 2.15 2.2v3.86zM42.2 7.1c.74 0 1.32.28 1.73.82.4.53.62 1.3.62 2.26 0 .97-.2 1.73-.63 2.27-.42.54-1 .82-1.75.82s-1.33-.27-1.75-.8h-.08l-.23.7h-.94V4.8h1.26v2l-.02.64-.03.56h.05c.4-.6 1-.9 1.78-.9zm-.33 1.04c-.5 0-.88.15-1.1.45-.22.3-.34.8-.35 1.5v.08c0 .72.12 1.24.35 1.57.23.32.6.48 1.12.48.44 0 .78-.17 1-.53.24-.35.36-.87.36-1.53 0-1.35-.47-2.03-1.4-2.03zm3.24-.92h1.4l1.2 3.37c.18.47.3.92.36 1.34h.04l.18-.72 1.37-4H51l-2.53 6.73c-.46 1.23-1.23 1.85-2.3 1.85-.3 0-.56-.03-.83-.1v-1c.2.05.4.08.65.08.6 0 1.03-.36 1.28-1.06l.22-.56-2.4-5.94z'/%3E%3C/g%3E%3C/svg%3E");background-position:50%;background-repeat:no-repeat;background-size:100%;width:110px;height:100%;margin-left:auto;margin-right:5px;display:block;overflow:hidden}html[data-theme=dark] .algolia-docsearch-suggestion--category-header,html[data-theme=dark] .algolia-docsearch-suggestion--wrapper,html[data-theme=dark] .algolia-docsearch-footer{background:var(--ifm-background-color)!important;color:var(--ifm-font-color-base)!important}html[data-theme=dark] .algolia-docsearch-suggestion--title{color:var(--ifm-font-color-base)!important}html[data-theme=dark] .ds-cursor .algolia-docsearch-suggestion--wrapper{background:var(--ifm-background-surface-color)!important}mark{background-color:#add8e6} \ No newline at end of file diff --git a/assets/font/JetBrainsMono-Regular.woff b/assets/font/JetBrainsMono-Regular.woff deleted file mode 100644 index dc1d85f5706143d7b21492f7d5c296ec3db13b9f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 59368 zcmY&eFZM zs&SKXgnoFv007n!0N}Wd+~2<;r$qmA9~kzBqwxbh zxz}$=Lu-9I0068K0003706;kc+ur94U7QF307R`Hj`9z33HA_%P3%mq0RYHa008_B z0Dwo!W<#4d)pz{SPDTE3ApZ}DrdIAI00731-=aK|2-Sw4@kZz0h?ylPHsQ`%znK5cn9~W8jMA^vNinSA^tb7)eoH9cqam^ z_1%8v1-ke@Eg@teB({ydwJ`wDx%#6&`r(mKt-f{J**ZD_06o0`0NljSG%HK)A#8RI z#y?tw!XItx50-tp5@Fmj4fTxl^Z@@Zmb)o$KiG7!4$^c1qL?944<6Mem)$Y21kR(Rqc@&D=g9;WJE;Q#dc;d+_9z2m+8i$kb=y}i3|d06|HPFUJln5nJm zoN1V8es^%Nzt3P{kt1Q{cMl75d+q%E&-+Y{l)m{G85nX54jlFLQsLtb6%E?~g{YXT z0KQ4M;Qzc;DalEo>ZAJ7y2Zi8J=H@>w?am=5=PWRM%2kikPgX5!UxtvBD+z5A~>Jz zidO!jFb^ZSnMHQ18_+2XlbWS;zFmI-@ue9Du?fx!a(fTf6noRCZ*12UUR z7!gi5M;Bnafa@gVje*+BoQG9{y3J^gF^^2bsZrus+hmu#A$mDdXnqo9Q9XKe%4Ec5 ztj8UyqVwd>W6-i&M&L!j1+TN#tFEIN^378f@Z=2c`4C1mN|#p_XhW24C`B0f4@&Cn zMYz5C8cwR$5Z#_YiD|TVc8#rcwx3yk%19|b&$1VAAtdpsq5o@#q%z!EFITFn_EUG1 z9XxF-Ab-2aP|+O+ElUiR@_|$YHZfol8i@ahpAjckdhuQcRsK-55XltMy$J7L`#-EL z*Mcx6GWrKX$Hu?Ww2CUvX3;jnJIkC%HYl@+Ui01g`gT@>#^-GboDstFK;31!5x)$% zPx?VSOLiq2M;PS>7Q>c`rDw6lmZEyM{k&y0e+w%@UeH?S0KtJjady%^8LBnRhV`ur z!7G|8ji3jHL_TqqG$dn7ef=`o>w;0z1jj11M#cqU&IqiG+<@jSq*OKlDLpye^3%qi z=Xs)gnf{d_>;Dbxi5B&(M7A`5rY9bZFK@)#u|&JySdpOhSJ zV%Kp_@D@_uU$AXEQvr9YqM`sV-gP~G8{J8~y@%ISyxf}6)AsBJq3z_i;?a%AyT!C2 z&7rdOe1)YFbfvyZa4YR|an;sxMsViZ8sa(2?LhDM9$urTy@%D3Lv3W|I(6v!0ohpL zm92o#3G!@c{rCEYL}&ZhSO=8$KGmaNZi!0nu22NxE$`{^YB3nmxyU}aa#BG;XqpRB z%Z=KdUnQvkS0Q5?6unhIucj_s8^5=>CD0Hqx#^K8BO+7J;%hGO!(s8(M9Bvq~- z992LZQLZ2GqtF@@w+3a4h=_s)tpO0_7ooo&C^NGk(wp!TCW_6-fVHs1OCNvFfz{ZU zdw1qB81Q1WYG^j&{DWk^=|bqrInA3=44=n}U$B=%*koMn-290(E}IYQ7i1zB)ubBp z1g>E;D?hvRlyo0hd7YH<7*(lNQh9qhet8UbWpM?TMK_g#zKRX0+7!)(RMUJVYqS2y zs=Z6+)}m~4po=N@0KTc*@3mBi#i6Z#SGS)V+M;XWGJjdQB^waTpH6#yrqCPW(hQH% zf=pIBF==KmK$8kJDA9~v|C~Bs1bD6nIbV>aZ~cS!6oOWGB6DpqiTJMZUFZRP0rf0i zAB-$bR#BFY#h{7`#cfK+l5oePnWI2$mUHb)Y@DKba(7;Cv)MMZtZNi=`nZ%cyyQV@D5eG zY(cVTDTcZwiIq@FpUWiCi2W^{HM|UJUcjuUzmV)w7%Sz;H5(`Ebfk~}D~isk2xse5 zP|o9)qRTzfDQj!wRGL3~S2F7m2-XNO&^^2aO9)&TYuHXKc1)svlq^ImTOW1i4tM4r znEnpA0Bd04fHB#=v)^s&8<@}JR|HgDIGmL}v*y|~o68NUuEM0O6l6n$qRTz&PHjk+ z*GS97F~m4Uimn^?+epFz;`OPPoMCw89v;~QE1!Op7MX7K1f2D*_N#RHVf?{3nzeqj ziErOCs+qXbp)D5SpTm5Ydw<+zU#^tb%0y7fL?+4^?|tSqAvfV^q6*Ei%jcbaCwQEd=6a-cJSik3jf_yhkX-*PLUZ+0dbg_} z(xTOlL>_Z^qAwC;@-|nQiWcb4WrqWY%orwM9TP+xFlZZBAW0UZsXSYrhfPam!O26n zeAbUpiaknl+aN@H(_q4wD^%-CVy*)OdKxSBSzwf{#{FV_JQ| zvb6q^v|NI@$UAe@*HJ(-32k|*9Wk;)>J;uvJjR;#O8QKXvTQ$4(h80m?14mdF7*Zl zJ5t^j#c9W>4sKa1lJvRp^j&nb<;5FHS^&%sox;_#X<4!xCiVc#c>vU~5uFrFd(@%S?P-HB`Zq8usEKM}-`^LU+&b{l33|YH}vrvcv?7 z09q|sNJ$Ja*-8-$m3C_tn#_i*YQq#ee&@@BVo_^cE}c}2Aw&(WrO*|H!4xM|ioYzs zTQnOB1(V|Gz-(`T0HcVsw`K?hqzN^~xQ#{uL`Z=^EDH{?qY6akSo)T8;a8X9>gKI@ zx+Ouv&WriwSC?X45H9o9dfgUBmRu;*eFGGl>QJMS-WpFY<9UrA7s%fY$5gc= zexvq(hN>pg3u}nH(o!ViO4S9YfwFxZkE4mxM3mon=MXCd{!*K;Xu$VAX`3OosjzFl zYC5T(#!dc%=$4$bpdNSV$VFY}6X+*Gebs4&_|j}>xcolcUX=GFjXUv?MNZxP$JX5D#?G?! za6(3R1k;M5?JldK(q7g&z@M;J|E!yRz@=IFSIgs}-$T{qO#koIsE*H}h}-o3=KP(g z;cEoPltcf_+2AMd0(v$^#W(Uti?^J$NlPZzy9ds#ABgEjjAnvO^9kuz_on&Wk#9kM zKSXX%BfcicCpMz*$Uzwx0ykO6ILVgo5jLxz5Cw6 zP%k%bQ&M6U(sZY1I@!h(Px&s}ft4f_drH;TNh`_3JTF~UQci@oO`GG%_V|T7HLa`3$a-WUE}dH>0L}RFKm^k)2AtNFST0kW+38 zeVaL=x}G>7K5Fw+*Q2yFz?B4hzOR=p@wWI{Eb%*DH@JYNU%AaFwpN{1vj+DT1>#AzYJ*DlfIjckD26zKP`l|7{O-vAaCwcT_G4ygcVey$l z`c_g0cF*51yBGlBez<`K-WRKF;sP#Lct@O(-F-ZP+s4^~J4Qb+$oeJF&xKW3na~Gx zZp5HBeGU*lV&E`>4;-1)zp*x4VViZ0)Q;@7yfAp+HN+^m#_7QP>|Gr?2^ppn<7aYV zd}B1&Oe4D?HxbeFyHtP{o*dGbw7TMx*t)) zRt?l1%r>&GhgC07->W>VOt2DSBlmn0ng-hnLc_&}_@}ckOJ7d+<;|FukO6W`bCl@= z<=vJoJ>GOUePpB4uN6QiN`Lte=0?Zsfa!?I$^GsQNKjI(HK4PChkbW zexA!b#p(BOY?u+5x={5m%Q5u&zzbsWQ3Z#9{yb%+MbuD(0yj$Tpk^W0ydDPS7uP)@ z{mir6XvL8-q%)dFR_C;Kv901Ow6yTIa4k_o(FD;2(fdX%4T33AoC2BoV)J_^+$;&x z`5-4SZDICV&l9>gOy1w$Az#5?+1;YwBK(y2G(Nh(Nvl$uq$s3Hq%fqn@hZW7Kun9U zp>q><%pZ}I!??u55)6C{(NaN0dGl@aYNr;bCZ{&1znG)K^pVq$i=*5ph3f5Sh$4m! z8bm5Rl9P>%;A+Lx_bVZ(`^ah|*1Vb%wP%*=XssYTyCSv8Xe;O!lGMj>jSFyrj%iJ! z8m89v3tP=>=u*ZH?JC;fHdW~co9wr=_}K@u_KN{5u@DSDy%b;z0I&xT`z}PWA15Jr zCkUIr_HJs(!Jaz{es98kA`eJS2>bNLKkt8dw+pXod^xrU5HI&1F^8Oo;|_mbRbRVv z1q4t|z!1Rf`^xo{ZUk$|CAK6wfVM)nC7_V{t@4CQk>CEJ^bbLcfE5cVAXQ0(AreJG zqEd=##Obim+=T<@=bAMsYEjnwrN*G<55F~N;vm`XRQF~iyfozWo&}Z#mlZv|HciVbOF#E~X8M4mT}mg2vB?l(&M&G6*-a zm3Z}LWBUC2IUUIwG*Y8r=4IH;jiA-Gm7q1pFBTCcAdiyb7Vs``t9Q`X3}uO&MpG=g z5MVwK$%K*mdIa7ucflf;UT~yxA4fVuR9i~Ri(X^Aez#n2&RK7wX|7tQV$7NvfXou(nSJPK!d%J`EX7SePo%7E6S^QP^W%sT4 zeG6~^@C)GFz~k~I^TYG_Jijcm=|!wI=ji1?=Fq^^U!J;DW|@_&y*}Z04B*#_UpPJ$ zdaw8#2yocJ2LxUta{q?kaQ(VY&f=8KbL?+>LVZ_$WO;e$C3S}D*oH&<5a$4e>k|$X z<`xba7`;~5scECMiSiuAbMA4jX-(h<@Jooz68p!U5}5i#M#qbJ7v%7o<7b8(0P(QJ z%%D7w@DT8bZ(G&$bMNtfNq#Q;Q`=tcRXa0%oQde>*~z7UmA<0i@CoP{$+^kl+vmse zX~ypbll6&;RPx#XXl7=7!M*i5c)c~6I(WIx-$@Aw-<7aC2HO*TkI2S=$P1qRkbXJ* zn3%HjNJU@%E8aih`&(=_u6wjEr$On2@|gOdqUaCK!j0}|&&xM(SbyA4AGWaz#!d52 z5=G}TCl{B8`uyC&^1}SwfbB#w>FcizhkqQ=IpHYeb@9WJ>158Q#4_ekLCwEVxdb%K z=ylzl{1n&yPqXqa8&Ol4f^z1Q|Cj>v3!8Fnwhh2OOy9heq-c&WjQ1?_AxK7Rk9&Pz zi`08HK6853bUk_0G(pKx{hg(9^}iXEm@KSWG-PwYl{<#~#UuWb@V6jwLyjZ58haN- zn`Dp#8r*C~I$&-mEy+NUiR`Ce7{sUM?dRfNzOzqPY5LaI9rxH1*nt@Rar5u`L~|Dl z*gnCD5@=<0>ZF%vGlgHujcTz?NyDx zQ11Z$WE1}1Gqk7tM`k4*fEgMi(Wkrd3sQ_#YIY%*yUVbN8fjV5>yVYZxxqTirkR{> zetUjae%@Q>m$X@*J}DV(FXrQaDSDMzg)y)x&|NwmXu0zXr_7jc=~i&};L*qWQ)y3#`UMP8~glu1FcgsqLz>!7L zbk~>dX{G&wJx2 z>QC6RDu)!PzKEI{CFgvZvTgexm)C#QvI(4vr@=+0m<+=}p`G=>AaaSo)o+K5S-^u1 zUp{6=pf9h`cAT5@^Bq-&hi;@R3vqGZ2FH8nww;}c9J4J>Ybdw&JKnE4>zNz+El$51 z$;3L+={3y=MvL3ue0($Mc~1#T#nKl5byO1nH)RcUp~BQw`eoc(rVrxQj2Bd^L59zRJRsK@36r< zcK}^ILp}O1(6s7;+8Q4ZZek*sEesUduG()2iNAk1b2GyP*^%_b?j0AMQrm1iK3rIj zlxR^efArt(m55hU9OvNRv_Rc6M~kJ80#0SSaz1{{q{{x{L{S z%e3qx*!v8^=YmdzWh?cKBq;Hjlf`2ZDp3CG{Cnc%PdkH{y~@JvXgD)E%m!$JQ?D_y z)t=g98?t)XbP(LvFIWBcT^rVjWK~xmu4}BP+r3u`@8fLMf`g}2xV){qv+_=EBL)yo z=H@H&rge^l3Pi0FUC3$Bzz(6CU&?l2nQ+G}K+3B#hkjCWQ+(KgkN5o=(24AXGmMya zOJ+cEtbiyuC*Q4>M51!VfqznClM=-Xp`mGD;h>4grLjD>hakGa-wzMWGl@Cw!FyA} z7%{Ugu?Rq{7Ya8+!+MW&FAkx-hBR+E>{W?0qo00#x4oh9ofpNnoPX$iOW)u$^q#q7 z_a9U|u+v#mI&qL>6lZCUqVP%xO0)AzCMsv1>AS^d;zIPgZA4_%s8861Fp%x7bo4++ zCQhcFJr_=OylDT`GZc zuN*5Qw=8O-qRN&#U;k2~@L{t3tmmy8fwZsT17}t&us#D0hnw}s2IFAgzkMZH2hwW{ zL3pgDg}{1A0yDs0WrVe53cWYk0=*zr+ zF!vpy8_8NwXJn+dhqY<@Dq7ZlbJl9_XUwSuy}SzsVsgvEZK*?-bQSw^r{>tP4cnN@=Gbr%~+I_f{ zqE)a79czkfUIT=4+3q#%WCwk71MW8S3)6N{iN8213q>#_IS!YwL6_jyF2l(*u z$jI@Z>*iLG1}J}ti4lJD#@k^fsG0Va_UoxqAI^A-03hs34>O}y=6lD^4SpU zD+g$RtAGoAb$e#S;mPAjIsc1!k)n5ahy2#|lf>ho_wlNZdz59sJK3nRc}GMQZD<`) z?w~pf8q`M*+6%-XXe^=telK}%10*9psn~uf(jP8CE_ui9-hgGk;Sb>>8JwrHCHl}&+yJO37T_7Pg7VYeFtST4uvS=so z$>lm@+g_h@K1LMpu8f=x(8Cu2jN<+yu1V|mPPk~(<;asNaQzdIJ+sLc(l0!%`u215=v4UEITmeun@qS-7XqF;;>nP#KdxeTUVKf7;A#5?)TWzAo-8I_j7) zMhQB$OgLDYUUe_z(%(-~mKjTA9M-C3&Tg*0-`R0$=cCyd&7!tc7!ikx+e`)03~qm$ zGA6y?MWEr=>flBz2o@n?nPiE{s;LS}SVAh>)L(?ErKf}L?HGo1gql<^2OAvf$e}eU zHsfNPWFJF>K9#cKyi6q!D??l#Xi)RxK0Z=RN5n8N#79g{XPdqBJ=$Y;hHt>P?YcwR z*A#>8z!MK=uqkV$EOuA?aLpQw1)`gZ6RNuEw*S^(jjH8}UB^X_K_lrkqLv(+CdBQ# zym0l)n3wj?j^fDAl`4s`yxp$mM+-c#W7jjVgcn(wHELv+{z8oYCf$u_alpz2@eGJU6$bEwydWSL(RM>}&hX^Vb z=qTprqi7D!a`U+EU&|INGgx?}iUirz(JUz#q^h3L(HCkrLfZ-w_^E`K^c9wtX5Hen zf`cmGdA*qy;b19eg@d)${FP{;-rMG+%QI@MSMvz0*>yW+f2JxZ0JxBsylt&W7BYkEh$)q0(_t==Bo@f*t{3(n(Gx zsd_W1_{zx3m^8aHDEfD|dGqDs($cHpAA(KIZAXfYxkM4NFiV;My=8Ta29V1<$6V`vU~G<{gn})w(wH%$yY$ZRQTRsk^W=qk z3MG8y80GXhyg1*(<&v0(@0bgH4>KtSdt8WvNM(=vIwR zo02REWu`#4!rch5Gw!m3C#n;Y5|zrk`70FLZIw+%0xXU3ulw;0DejVyw!F)YU>opm zr`$$O25hSPu7)5p!i_($>sy1fRALs)`uc(%2>tR)xX4t+fGmHR@r!pnr?e zbN%p!Lmq)(qIT)xbsq(VcYc`=rD(8sxlY&iCW@m{ZJkY#xT8T{4K%8GSND6Dd=MO-VeIM{lgZ_O7WFJv zNSgrnfpM?t3D1#?7J_z+C5glf|OjQdfmu2SjZrw9$wS5)k zoxrffcwWu=Z)euaFYHeohA1aV&84M&E|1exgx`3@udyFT0hdtHMb#>Ojos8Jx`Taz z1ncCcJ;G|2ekDP3zu~yZ&ElXfP$&(o!l5z8>F;a9P{eu~%Xb^Jospl2^WTP@g^5=C z+S`BHVqolLZ}lGQ8n{kkInQV*zK~-QK-%^^?uAuL)vC|FXsmIbKKPBScfs9+J-^ zkyq)lI}HY@Wj%ZgrCQVjCNQ9BQ$>-<0x^!+0<%ZG)^s_-P#e87J>|8%cbnQ{i`xfdxx*)^`o{dso zDFuqj8qNtY$12i4LPr%UlbW4^7eHLjhn88Ky9qHnF3!!ThwTrgdk02A*)xFcH-N}o z(m9vjO-ho@Kw|$Hw`ZW~`QyN{R%v2T;zDdF9L@m(E0MgJE+BmTJ}x}Boab;voHQ;D zPEHQN->)4CJN^7LjUYDrsrVU@MNG`C;)ZL~Sq#QPl&b?jp2^)_>AyMydWE!zAb$oo zs0nhar(q+Btsqnx7>)$jIow)|DUsg0w@@W8(1ugPGo~5&@(Ka%SFBSSGJSAJ>-$)O z%6Gj0Q2snxrGpb=>KzqKR|sZMf``Kmhs?XV_TU5t<%@ku+$yJ_&#%{Y##GS}!<@2x zA5}Tt(J0F#Sm)z((*c^lNQW3yIl!JUOsHyZkv_LdIjX7X6g zM!jdmEj@f>j8R83H!(2*haJ&sPXn_i&4UY^r$*q)q^fLi{Q@*Eh)VFeQN3y< zs=v1Cz8P%LZh?eI?pW)>W+c&sB72q3G2%0P?of;Ub?+zi{b5>XBE^u~6w;p-?h6@o zloNJB7|IUfARLt~q?2g5)&wSU3Z$bq?T9VQ&;Lmn!^fZ^}B z;jKWB0vucj?|D`l8qY66eG?&hMAM#_i@MfdA^$ zEMn+<%VGNBMDl~|BmCOASrzRD|8+nF$j_xFWQ2;C=?B<{?&H>z2-G)q`;})}>Zy@09T=d+)?_RH8>LU9@yp?ka!| zGfZBv2{dTK6UQiL{MjwJr)RhC=pXH2Nv{%wZlwN7MhGAMEdUvS_x(l8U-^LuIm7%E z+XUrP`HLH`%H`v!<>6gq$r%lQ?(Z@!$}Z^o+iYius&bq{@hhV!$S^msmLMMpbc?Ph zs-=31=m%kK?(E%`?zZbr(}!-N@3%#om4WSw7D#xPcC2}_rI07LlmGQq&~dk%FC7mm z8ALxz=ya)D!*P7nj4}FTrPHApI0=~ZjyVGVtWEWvkDI0y)t6b` zNQ5YmDWFPn{oMKzzT*Xq*c#n}H)-;LM zeL7D6cl;?gV&%HA2&RUOfYP{xv=+stxafQ*KKw~pSN>M-PSbvkx}93De^b)M#l+lC zxD~L+{|4};5FQY=a`3M$uBPFKh*2CXCYCl<91KIoc|oex{^+>?9?8jkcek$yL!?#x zp&LbwWX+A61*iP_29enut|+eFkX9dyPKIp-*|JvD5F6Df(-P`Sy0p=hSt>`FPHsx;$eL1vz}b zeldMq>3ZvXnV5Wh3_eV9(9us%K%GB(aGjsv%&Y4wKSC>{E;}s)$xL#&6;u&j;R<9q z>y@aPx5tgCA1lPRdg+>&d3=1lJjk1TGv-ft6@oibqlIoP0vw0`DNCE-O*%qPvU|a- zoXcE7-Z%eUR6NE9kxG?fZYd8ICFDoPHUJ&RPDOf1GKPCe!W-(l+<*`#KMe;qk#rbL z$Jd#uEbrx?$>rl?u9~RF`Ug^4a0Dd`4jr6Meh8M!(0ljr3n#iiRvZ?FiKvTvI#R#9 zg6?SPSES3B&~IniBwHT$yWuksZn2IQPU`t6PuXp?yx_6AI!TY0)iR}#>J#i7c(9QY zIXq8KJu3%?-)rxpD#!MDBrv~)S%7n)lmb;~I&Vy=O4)V)!+6Is2>caCpJuygHuwbc}8S$zXgQ2phhGl4E=-mz{KMgZreXU7q8KwifXJp3Z5O| zXJch;4!3R1(Th(Uf&;>pQHE>7POZ5l1uDc+DTIm7y|7M^#}j)t`K%UCl0+M9vhL=X z@9tukr-Xy+oAz17Z;)2>{gt@2rqUHs9W6JNT%M!)G)1)_y&Q)15KnDFkOXM97ZZ^o zQ6L_yy5VK)53jVIGb%a=;+5N!jo3K~*bvE&W1KYRioWOXVJJ#=n-zD-)ycGl5z&u+)NMY0l z93UbD!ktH7Pj!b(@y5zX*%V8^r1?=(2M0&F>>o1k-_tKB{LGprmPXbP#ch8HEq0t8;1eK=KN6942u_4ev2NmfT(_h|6jTMio)v~Iv0{!)DW)+;Z zEtqC|Rmw@{@BJK`e{&prDmqkuyB^<$K<)+8lN&eMNiabyf?bb(z)DksLrjnD@03J! zW21-N?uB`wjgSGVT<+1**GFWEGj6$%B_VleAaY>td@^MvtNPXQrEew3;?y+d78U$k z)5S=a1JfoeJh&f;uBoHqS?|yFMe~{QgOWo%tvYY<=khbo{QTGq<9;}o6GlXc0Fh!U zyBSU2NU-)s_!$Ouj_btsUjqhZi1k{aLlA*ujPR@t&y2OJ3cIo;D?Xmuh611CrIU70 zr>alr52xvR(nqulx1ka4G?dM58ke!kyoZjpQuqhscaz zw0!zh9XKyX{)bTx(6cNWWn)xjOW=%PFQyfr`qBI~lGG{F5o5r}hwN!9W>Ir9mYc-o zc2OSYd3j*d)LV4!*oQ!3tEd|pWC$$ddaQLY9Q-SW zbA$d+y?aWvX7;HmAamy9=P%MM@Ks)kjcpCvom(qy^F!$+g8kV(+76CmxgMl5f zoW_Ykz_Bi;c>gzV62P985uKTkLLA7K_Yvjw^1D3tG_T13bHpNW;sITx^MR z9ysF?J*KU?O?e^|i0fW9xheIl^GM^B6uhV>Hd$C;Wj7z?_Lba;^!V!y9i1*5i)@^T zD5_C`mw6ZR?Upm+SL}E&mpxz6cKO__`Ymhd#U{x-V^%DPmX?-LSk^Ky3-|9B&YWUl zn!T$OCR-*fi3_#b9Qlw;Ho5TvS!mb!$Ofw7w1@vblQ47>iH~QlH$C}NJYM#S&U8aG zQqZNGq?-73y`ofsMBuk{23}&wW(UL}>_@9PhzH}zH)_<>R;O~@CN!y6GC6$UQp&$h zXZkOZxUA}Px-CD8xZVZAotS$9Y_uq?7^+{sAoroo%0W2S3e3vm90)>y zqtRj)MiH)YjuiBp>`Pmp=xI8wAtjtpb~*SRz{T7`Dkh>399-K@G%H+N_vq2`0TowZt_%<6#Bl^oKoFwA=JG^U&x42)#YVE z(0fhqnc8#V(3)oBr>!>aj(7q&Qj(@*bW)XHTr@Symz(k@J}ly6zpDqH!ZKz(i||l~ zL^As@ZZ~UE|M{8ZP5zO-N@m$FtG!}j!!$P)l=ScpKp%S$wR zsjV=rC&CXNT#Pl7FA6$3i+ysrXht}kFbFW%fWBhN1qat>uM{H~8(jaadrB>*^^@9` zRx%7{-oPy4Lw6tGE}biRYZ)=`4oOI`#^MX_ZpOr;*UV7r3z;{}>UJ6NdIKlKj% zoEwv94e3bJv?`SoLnk~ievr6Mp{< z^fwVlseR*H52!g)Cx`}%u>2DMAQ3Siw8tegfrZ|r>ux3lmx9A#f58t>7ApnJ)CI#G zVY>nnb)l2j_2b|eW*Wp=(d5AJ;aDv!1udhHSf!~UWm}y&C_Cbr-s)1vBRVX2X?%(#dL?*2&}xxSu$M(pQk|#{ z|7vH5b!LRAAaKd}V>8M8n$&X8RZcfknA2V0v4M3}K)41WeA`OJ-IeC(;0sos_B&YW zwKiy^EVz@d!r%P{HIhF9CwAX9=-6UdAQD~U;f43&RCy{Nph(=O0|BQ!mfzb7-(pl^ zhMRX47Bo?XwlAQ$;a6wC%g1lOdpCjbVAP;BbJ>y+P1krRvs5AT+8k6+ykX0(>wELj zcU}SYCOBdA<>do0_Yqmz@EmVo4mm&|hyi2(JPrv7hKh)K^H4ZkC>V$+xe) zrhYe~Z9X;=AQ#JW0M76WocN#GZrITozy~f;VGM*ZT!7D{U@E&+-+KDr^QtTXzfZd1 z7ko#}(Ax@vJoYo2`043cuYJtS77k?l#Fbk;vD?EqHPza`QN2cHq~~NA8|mlL5nFm@ zH&=k=6*x4qzubm2DT*CTwur`s31QC^-xyFwM2@iTK;+KYoHlslr^5p@?YeuSu31zu z@?7+CG`_mL!M;B5SAg!IV1!Rx5nbVs6VT&a0Qzh2Kk<8a1?C*>Y(zE^2ch7pVDP2(U2m>TsZIi&;;)2>PEw4SR}+ zhK@DVWdA*e=dsQ-+r1dLCdL;kaTLQEC@1h3uiG!G`OgkBH@D{zZ0B4H1Ln#})@UJ7 zaMJ@x`V^-R|5@s&s9QB-C@f_jfssgIumnOy3%^2FRyW=iyYDUH&2!2qEazX#BT*{9 z)DTr+bpBtln(U4Z=5un~Yt|h{cM|W@0bZ@b+4rO%(RA+MzUl^-U8makA(ZQB5r5y6raDJCF9u%IK55Wd z=8`O?Qt=EdG2WM{u}S7ItPyB%ADR&6hm-}KFfXdM1l@ZN5HIFrwPNl!y&pC@Gls;T zn4~=egcXA0(LQ8O6q4(}wFZyv*G2u+%Ft9qk8nyOgi+58NKYXG>eLW*zWcWKhB}($ zL?5Xm%g)GF9I`fJ81f@72nm$j1gLNPc#tB=3hdJL4hYOh(B(0qf@_cp^cV~{GSV=n z{s3i_e1w;;-#NL~vXnZpWCdl5CAgjAT(v7&+`c!^YS9~wf*XCrHLUJUFhojDx3D?E_n-l3#mbf ztZDD6+ilaEJlDY^=ymq~>yyL>Sqm4B>i+7evxE^K$soBklsCjvUJaj+-N$OH21&Kq zdhd*l13Qe?6*E_h)BYfwLe~}1M}Y~~Up%9az1|6EH~hvivq4NG*QYP2^)xmvA_)?{ zU6!ZHG{8@7T^pYtX5-M4?@`8O@Ii+kYQ7{6^hV=Embm#8LaKhcTlQ(=_NCfVf%WOI8ur=L!??brtpV$>EXr z0cD35qNCU+20r1^C~7Wr0A$fD-;pg2#2f5yLj(F`HX|ZFd-|FYLk$v&6-UBtAsQ{x zW_S`+m;mn9*3#j#LcO{{f$VpM0lE&1=dHICx-APd4>t;w znN>cguaAX20m8~RDt2 zy;t}|bU>I4p(Qn^-5{WSSCSb`biW)K%DMr`z;PA#iHw(FCLnoSi2G>~y z!t7^<+}Fuav_^ZLqo0lGzvwq=WT6SmJ|`>P&{DyYLV@s|=-O=Grd@*cAZ^ekfL zw01)xH8@I@)@?cOB))Xu06Luo%rl1#LB_>d%f!4M6mLa~hdBu{1Vz*aVkXL@K=j)W z?7^|Rr?H@lnPy_opOLM41|2<-%StTmK)oNB&I6^x+Lst8=V=uYjt0k zON%Rf2+oIpqIcf0W;1gmr1;rFrCF&2#C}yz;*MF+A3CkAQ9g;F0{aupFU}fi{>o9q zYKC)|)KPPoHvi&z7zN>qZz@2c+x+Cl%u;ky@4)WW-A8c{UJ^F$p)kST3OCJc>)6dXBOIMMBknt~rHO+``_}W@g&g6qBIGtfJ2en1i|fh`3m#xT zb8MeLcF-#gd@LH@?tO-IzD3{IA5b>Gjoh#Gk2Dn|OI%X8G%~8BXoJ9u!`Wm=3gS@{4CWxoPV=1q z2UWO;R3SV-jMt)lT*#>4ylra)647k@! z669G$N=1{nlP!gt3M3rhL=!(3R4(y#msXUf{k)R-3oH=?)Uz-fD;11HxDw=4gK0kz zZPa2AE%2wwuxbXHXaHuNXJrylm;a+Wu9QM8ErhfrS`u;`^twnW2qSNnT!0!)x*6nB z^gttE5I>8U`v?RzQjafpjaEN4`kdp|9e2FBSfuaUfIMy*S|6tGt3FkJYzFb>0& zC}xNzkn7DnZ{APLW`5OX;dnF8;0I%P7rNxO?s(Ou9X0snTk^SVTPh*Pq6~frz@|Dg z(xuRIV+dJ28_I5AQXjkopw-{gu+EF3?0E;(pFHj9y3CV&A z*W1(G)zO|x1X`MXjwZ8KP{wZdvn9;ZEam?YyG3QTwjtz0^m16W4{pJLhlY33E@gu} z$~_0G^`Bd@<^X0pUMgbNj|sDWfSedW-7(X~ z)FKOLNQ#lo?UJ16hf}R=Rt!Zzeaowu*J3$YbY~?wayNb7lOOxolht3`*_OX;P|Y@C zNoiN&824S=hOzcA#xx;Lj=Ys9@Md{V43_OWym5ZC4h9?B+_+_%r?fUYyf1C@xPBiE zmXvE~Lx01b)%|+oX=t;>c%aQwTAQuL18pAH@3Yx>WF-#_Aod;hH;WZ!_w8@Mnfm*( z7z#GNY{MM5*J-mE4l8Y*(%Ni_!yDUdhQmsm$Mt7jg2TWgPmn*MKjrM8o*5rEW1X3C znl7LT+ApTX6L;R3z3cAm-FFS#dDoZjyeoU>-2->uIdIoqKp5qV|G?$A{|BU7T-C|a zgpC?Vd&~ivkfDWg0zb!;1tW$xzq=uiwB}|`B_biXL7>guCSv??A*JUIqQI+``$&sa zart}wigqCH3Sm%5=Tss_X^SFMW&2Wy<3ihng&7Y;` z?K}taZwuL{L|TQ2#yJg4jXY^o!q?Tnp-sQ8#a%f;bn&|-xS>;Oi{Uz8TB(-ep|^0!@JF{FEm_t>-V{;;d+PO9{Ctyd;B%Pkqjx2?PQKT zSh6@leeI-15v?3PSfZ?Ai#<(5!6=Bvvo<@2`i2O|)`V4Rj-skLI&Yyyqv@!bnoJ`m zSf~Nrg8N5wTPewu%-7L#Z+kOD0@&`q7qDFsoOFs%hF zD5l+pkYg>3LQPorYXM(CX$oHDCdMz#w8}%qWpQe-y|>8@qPWA>otm8g=VT<95|+iq zkyMY(4sxc`-rqewy+@QIt!>lM_)G6e##&{%W2UQZxU{py;qv)hj+W)wyVp|Pt%1=Q z?aJ&#actKe#c;ekwI)9?JDXxPAz(cY0MB-E8CZ{<4Ln<+=ZFWp8t`M|^&1Y2J`?G%J1usT)e>?Q@4J5LnvQ#~*|moG;{$t(vC7x_1m+>y6)Lx zUH5FVuC?n6m#p`my7l16ak5Nb8K=DPyki-l&f8lLs+l_ zVhxi+6q8k2ET|T%UM7#gRlmI8lu#6z$#t&oPTY8MF_%6N>n~0jdt%+iDMRGCma!xY zb&ZbRkej>n_ziO5mX_)9^UdB#AN=w;)cu0-cx;_l8u#6X>o+!B4{FzI`;J|A>+Q9D z&0@UReJ-or2lmh&xX!CNDr2P)13<3X$Z}Lv38EfcO5qzdDvXe}R4fV|L!S0_MP}GM z?FUnY8!UZ)`Cmx;ss4(;d40Vu^+%tRTc49QS4Ag#K-b$&Z6X&d{EpN4;HiebPwM*I z%$9w9IoC?-=yAmc8ljuzk%mYlN#+)^p&=TzB+7SFBhTj zUEHe-AA88nn`BKTh>?iI$kOcU@zX>b(x=+CXvWg0ux*V|xFg}7?yk_vMwMSjWfljO73JdQSylg}#8B^`E0w9KEi$X#qsCFg8~3P^lc?&Qj3O6HLuS#$ z^9IzXxRZ5qupnresV)~GuBa;-34sXXhHjG79<#8(-MUIBN+ArdUJu1vR}dvf%{MDG zQRw^Vj#^a|TAH1GMX8MPONCw_ADIN1J^}puv69tB%_L;zOlEC$QaDhmWSTPiVK%FP zw}235Jc?ypv5bCLSc6OGs@cTuPtqz4demc z26AeXy`%7HPM6Cxx}@}dQn#_pY}wbR%X%{neV^6)Znoc*zDd2Ww%=EQc9|GWYgcL4 z9V#9SvRsA^lEuiA$*sl1hV3l zH4|eFiwPRH&JuKVr31~a;i<#H?x^JVTHU@)lLiOmRG`h%5-vIdF_*u`YvpT6gqzs0 zHluADV0SR zzD}$kr1W)i+?UnsO8Y^jy=3LS!X4nIze0%_DD@Y(X@yiPGCs%}IOndd=6vygGkF)= z%qeXyUUi8*L#rL(CVo;XjoQp2FEm*VJh2GmB(cB^EF6mf(tWlb4Of(!W}5D{~GHGai;Pa_$gnB^}s+(^ss!amw*Nz z{ah1Ok~^77f>927q2B=|1sp`8(RnrHAlT4E{}v4pt7?;#l1*}B!X${o4C}QBh%FK) zVG5qf%83(~q)%2RxlaL|>EASmk~^@O4RkYGnH zzfk%DPJOXKAN1+ZnyJwj^wDeW={&8`ofNkY*XNb%>H7OWtIz8ad{=8L#kdqPV{XgF zRL~6-whCI}XRPwV6V!fxbx6(eRBKrJS#1c3)v&}ni2{eZD0-?C8%2&^u-hrIOLocW zuvtVSD56cY3FGCHO4_EHyUW9R(o%8a?w_7ISv+>AJU&c6Uta&s(JRZ`+4T%769n{f z_ToX1t2L1AbT39Z^OP54E{tHjHS^X)zce!>8f5o9Sk1EV`U*2uuI&a(=WD&d>Hf zUHPT1u~l!YscsJ~6?0gGrE5AHdWcJ{zr?)@H8<4YM)W#{u(nu>OTpP5hC82cBdXnW zAN?o9Q@YcFxG<&bI!>&&0_}Sn=+|X%J;$yuHeP4BuPM00``F*daebM;DZ|=5`DaQy zmdDEE^758)`5HXK&z_NKct#ssPcfQWZn%y$xyHDv(p~~N{_Xz?^pw&sT>Mv^p5k-Q zvNqatpT5}8O1)3rbIF-irLTwO;IZfa4EQuT10&YxnS#TqlH6oN^h7E}w6t!(=_6|S z8?TGS^A%vZ3to{`1`|EYm1kIauw|MbqDQMgxV?OP^|d7$9jeRs^^)0A6Z)zI1J3~k z)M%!cY@Yq=MoE8T^6!)WSXwQzi;vfMe`*wMHIm#$oge?Wk^p@3%};VKRe$=X@|*si zr2&6>R8w#0uv>csWLq)UY(_c0@fbW=igJB=;-l?hl==Us6!DwK0&A1EI#adajFRbsv z-3uF;e#@8fG;7t(h*E=NAIo8mkk!xeSgd?bGb5qTN692(^%CnxjV3%brBO}Py`+hf zjuRn~WGtP~Y6N<<6ztl(^g;t5n=cM6AH&&oEXiU8=IO^wQze-P)0i8)-e|OMtc3T>^qxJ_iB4OSQK06>KK$Xws=pG9 zO}5S}G94Wmd|6H$pjRF^5t2MD<)t4k{bs4$;*mm6XGcb|^h$iM^L2EwsO6>7qKR6t z3akM`O*yRCde%lk95sMAYPNECc#D#(4K$Azjw1eI0`W*s_sI5m^jhr(gTZd=>Da#T&xvRtAuL4hv$y-_@x#0-v&jrk^IQ6< zS?)K((|sH4lTNIgr}V9(r>Ee0oYB*L4cC=tJ*7SCQrnRJtf1Cc*S(lefsM1UCEaIv z;B1`x8^)>heM;-Qsotja4QhRLy>05^HmuL5^sV#J%)@mfo8$h5>l>f78PC}GENzb6 zaL-N#&q3v$9_4xwt~ask2bAmmYJ1eF*s}&Ve-_XZ%r&3nUe)w2B3h`4h$cnBF|$mz zOY_bfRWFu=E?Lz=Ivcal@c@9jMD!o@SElIqmg!%v}KB+%(ie*)6n;udfz#%FSoI8hu(Ltu5Y*&o1pWO4SiK!vQpzEtnZ*I^9(6E)~vR72QhwI zZ$CewTxazS8Ei+k+$&1_0>buS!(LIYW8cv&`zrSQpk~kC(D!kz@1}7RHQ8@$OB%p( z&>0*KH_Cpj@3UH8&?zlj^i67g>8xd=95}YM&4~34>V4Us#q`uG_{&|1^$W_q2*A$I{5Zp;)|1+29LAWYr9v~Sz4 za&dX7`gSH?X>`t6PZxC>KM&XI=%Ox5cf<8M+NQTJGW(QH+m!2yY)l_#wJR`-db9FL zSUHJhQ8!jjs;`w)UmywYc?b7i`W-%qbt_m5taFRXhW^I-3oO z1wMH9ki}*Yc-|n`%)`oa>VysD9z^+$g~7U;7Z%P<@xfs;ySrd$ zG7sI2xkUVa`dR*gKQ5Q(fmt@LbKLvs_jw9{o|wjNhGb#hG%Z3{nxj zvZ~|!-0VO{M}9WYGnPn<^#pidJdub;`V($w+m`~P~-U0|d!5yOy@*V#_1At>Hk7?OoLswMgnsVh>N?+n6CiCgqEl$opYmG<3 zf#Sm8wxq=z>h}6tihb?jVFtT3z?=UBya|${wR#7E&FURsZ38TK(596;Ab+shf!U2! zuHqOWq2#NwI>AihR0|z&ip;q-JP_|n#C_i83HB-Ghfmz6hR0eWE#ZL2qkT~J?HTU9 zjNe=i;H?t!6sI^2%8>`ISK^MZ3#G6_=X*x?cmJ%g% zN2u$_%_dfN1Zc0-bmj_JMhoL`ODKG|J=c6D)jGnFI2z1 zymz%sJE~pu?@66c0eBZZ!D!eMq>*0B=yi|qoM2E2v4M8KW@Mxq1J(jKrl779N0_oN z6IpU6V(kgZEQZnE?bS|KYS4-0xPv7#G=nX{aux7xeJT0x>M~K zm{$ECnttqf+fcUp4elLkyvmwpBg67OXd_c;Gx~uYK;yU5Te$<;x@s~mT<4JXYxY_e zL)%?vzeRW)sKGi7DrWI`@T&dUc3bVHk(CzIBZI8f{ZiP1FCz8J@kV0nb=XYV;@2 zH`n|LEXVMj>iH7$N^(jI!Dy?000Y0btrVHuz-Wyh_aPLqc~-^StO85 zZ<;DsZ!W*@ch4E9(`;#)4|k?cFVem1xBTPz^Z&?YtH0lSz1-GZM?cvd+u0m3Uuh3J zuOnZ|+L)X>$78t?qd<)(SuL=@yahdoxXp=jCL=3k32#<0%4%x4**?+_(}3{_x8Hnt zt*6|3<>52uF1x<3+;^Iup$DqZ(;@cyboD)Sx%%Jn1!Kqojq`9XDRre}C7;uPbtgGI z$B$wqo3ohUDJo$NjOIl3gZx_AL4JLtuQ%aJ#1e5N;)S{c4|Ng5E7#6It3TW={eI)( z*yLh)`|9D*nO$R}GZl<7#8jJI#qEpn@s+{s@>qiZVt#bT@W_s(?S;XK$)Voad>D&J zZ{H^v4VTUL&MSYC`Wom_%-;0sb@2nN>O1U`Fi5Bd5v#T#gXz=ie>EKH0i1bxA3#79 zMWaLfn;Z6_4H;{UIm~~315hAepf}+Sw0L`6^xuG0|18`qd7OT~C*!Y%YI+pz^R-L- zhU0Agdo>=;_PvAbP`H8*EtCqaN0dLRZ~8#p7jOx`Wj@)UVPWO-vIH7}fat1Hep#jW z9#>bI8tJW-ZwXv^zkhM@2>tGQhP$)z$Ie zSkFxXgi_B<+_aus+wu39tsrdRS!ov2Eyt?=d1M9_?;&5C)8%xy9c_Mka6R)kplS49 z{28m!8)T<dDNj{S#zn{nmBWD8xw+6pB*wK`pZXJY?d@$| z33y+NBzY`iR0+q*WxaZ2Ay5_!3`xRPkzmKfKz5g4&-6#~^Syfky(5vrT<_kVdDvR_ zPYw=FR-eFk`oA)Zh3LdYw6K`@3AUP;zz#ndo2{Csi^XaBtJ$#{-(fHbx2;=aeAgaY(Pn=@tGSaXtrbNp{b&raY&X*g;Kjm%B5C8+N=Jm zFkuly6TN&r^Fyo8Y%!aSM$6TQs9E8uN*!ICW3m)Cf`<9V<_G^`{5@}(zG+W2#lZyM z%$;X1)u)a0Qh}X=oibyj#=RNRA~p=VALEV z!IrPr*zmPqXM7ld_ydg%15x-v_1DOY0UtEjq2E4;yoo3oC~G`sv@kiI9x)k|U60Q9 zV2j74yWN}8F|*jr;`Mx*Y)4_%HR+1j*%_isyNbnK)z_4@o&nm#G5^#=X$%(DC>TXNE>=1I6N_tc zcsQ9_`#h=_Gj7#RV*=eK?n*MQ=+-=awfr09fHJjU&y4kk!zXXN@uYf^+~|&9|9Yxe z*kR=Shfbb4R{hGcQ_4AVXUf;&L2?XFJZk)67^v1sg=)2GRZC3uGz(;LrdYJ{%E|Of z69zwQ$MmtNs-9t6!kVs>q-Z3Sh;&9fC2xCDa+*b@dUkH(`PDEHNv8V2Yv{=G?7J$E zne6D8%mlRGSM8%`x^ua1cmc@&?iQ`EO!dRsr~9gpapy;~)emP!N70ovs_PmmpKL~1 zjY4^D2U1<%?H9`gjeogHCB4eC)(7cXjOYyd6YL9`fBjBkwq&jjt4@PH&(dCfqK zJl5M%OZEqps9h{p*@|_iJKE%!-zzzrtQHfA(1=N7X8^G_qZX;|%iVIX!6K4* zq|zM=ZHYv?t_P$x=mDWWpa-Ous$Ez!Sh*Mh&>z$R{LEdY7q!pHqHy!+gO`n$ z$1gj0`sURmrE=*A*3SJI{8WEc{kPZPhn-LJ8<79HK^k2uEnn% zbLEg@+-58yIi{5IF>9FImX=#X!4@CLR83~A=F>}iEDAQK^;&{8T>vu;MR|-Lp$(`) zn+{xST@{PHLw$u@_C4?J$z-~V?L)>@A(oqLE3RB#8s1sD`k**79*FiN+j4Hj?(aCAZY>uYQKqM^S-1@*R#clj5kRH=vd1025hqivUs z1eJPh3SH?&ZrzsjdqsiGRXHyCtg;;HSi-1~2~2SLV~$yzI(0}e^9F;_AQ()-e&v`& zqe&DU#?syrXl*8wpd7Q9`X_7H=CIoxwy^b|V3>=}Hn-2`_qp2~_uubqcfm)Wt6jp= zCP^LsTyoZDNI9A_$ZwP!SsV{5O)q=Zvp!S*{_pEA{p@Fr`+ZNt{TpPXAQV8Q} z7mMTCr`~gKoeqV6@2u9&uS;z_zb;f0y=%{CIKPg*xVTttr!Q*$OJ;ArjPVNP?+dDq zG*IeeObu(JDN;s=NfN6466Mo~VWGXv?e+d)wMS6-8n3B6ctCVHvrq`@A834Z$5ipV z$lB;(%+7xJ;n2WL&j)*EbNDeg+jINW*at^UF49`f=A1JTyt9lR7=_BKYgo=vZ zW7JMw3Dqk;O-+O}d7C^=I}*!8OhPG7LPn^%+a$jxf4uy@%TAs+{Nu`d`wE3V`u*}Z z4=wFENWcH>FJ!v9GKAr`1~&`SGL*fKEO?D)$c=R#G(#&MjF~`fE6k4EP@jerE2;6g zvAvnYSDd))c2IEkR==Ie7K_;om-^sYtNot(@O`+beo(`%ZWB1E8pmoPFn2Cx){3-IpEuo@04p_o4NdUU-4NwX^SweWWf%(+zURyX*3z zQ7+iW@<2Mt^`C2^CQ*xNPBX)mlH_3^qRAkxDXNOe6x2`pi(op)<}FzT^M;lyC3|Ow zI~hx}ybwn8FUC9mVS3aj7~V zUkG4uyAl`k4@A%7lz>NWrypDfoqXh9?#@W*-=9>@?TuHMV17a6{!p{eE#L;yEB1}|xiEfmL~sopWT zQVNDbl!TI@L^K>|_JV3-!VsAt4Wfu%VCG`)rH;Z}(yJ+(O)U{>pwT)3Oa-Gr&HfBoy|BWz*^4?4az)A&@$ED%IWmtW@YJc9;(4 z9ce~kb}AUpjsR)=LOD*YCbeRbI+Ts+SC^UFZ|y0?@2A-d-oN#;LR&p6^v0f*T6;>d zExg|OzIA>|<$Pc9|BLf|Be1r^OuyUsJZ2Ax{Fk1W+Uj|H-8t_OlExD_hyaKo@K_>4 zNpy(T>iU9^&>E#AAxG1#=}1^R`du+>ZGQCoD2=hea7=^U{P_3k@6Un6Z+M*};BTNa zyT-1)c5IhE-VA^s1XOsl@x~g)8wpbqkA;(wWGL9|gYmklTN`iPF>rOmEnGU>6E~sZ z4K1H?imFe+FWPDnWJ^hjvd%9_zE+tMhq{W`-?ZUm4qlGI z?xH^~m(Nvyj<*!?mN*i;k4(P}mrHVKU}DFrV7@G39%XeGgB5guG1uoc;^!L2Zq(BD5MA*_L2ytcL<9-h)Tn z@t#1SC+^mM)647sg5PQ_jSzX5)n#tS8rTLS$K#Ee{y9Py9Cj?>;`7*pj$ks5N0}rW z3N)wdr2B!q*dXuIF9iMmAimBkzk>g$UAO^jm`>r_E$VN0yG8pDLC+Y_<==68xW5Nj z^kM}8vItV-a%m|;E!I9NShAD|{UEWmn7IG9xo?4wv#Jum_kPcLzcZOkCdtfXk~GgY z$s|piCewFm`j|pXn(!#0fiwk*EefpiC@P@4sFekDDMD5Ji!6(*))xXQtP&BF1*E?I zD6T8$DzNy1CiCTg&b{9^uOw|jcYpu1GxN$y z!^0$TIJow$+mu|)xF6@7wf*$XL#b3#s-Y>>l)$t!y_VBW;b_1NEN-2aN+knRLP|PZ za&l1mjaqrR#n-2oEs4d^MAu)*Dl33x%Wu#uWMp8u~B%{ zh&TL7_rw-CgArF-Y|$c-U*IjT=fCN?qvRh+6a5a4(OsLV_Ug<`7GFZXVrIs&vV0fx z?6UM0<>@T2{+prxZ}a*=O7v*u_45&<38R;IV>sGDS{CU0=CZKfP32L%DQdvz4Eqj@ zx=BzDOuxo`S<^^19Hyx+ej7PzrC*~-`V9=zjM01nruWh>@;nc?21QO)kx6ySW-C1U zR=S0fNZ9YyRdgIzM52P?bys$bHMn~9AiT6>vY{>se-hg;xDvNZc$-SKbi`MtT3S*_ zSLj#iVU`2lvpLh~K-F$O*)K*r;0Xq}2(%e<{rdb~#Sz8nutFE7X`30DIL9LoXMvX8 zJNtclKaKe$`h7x-D8{>IUp1n6ALgU8&(Ozd7UN|?mMT`K1S?z?y_iQ0fo9cNJndPQ)4Pw6Z82(2|p?vJR-n^5Ksz!d;Bz4 zl5s)ktg$F?{j6`@gWo{=N()cTx*3!NFl}xji zXCHvSo%r@FdwT0v7Y{8Shu06jVqQGGm6$P%QI}l>65y)Lpwq@Me=}h6ghOXC2y@)~ z({3;Le5Tbc)lotcb z{`F{E(w)mDU@ zgnOhD;c`A%u*@Grfp%N#qUNNmR<#5Qdw_@L70cm#cu1`gb`Q)0%V+hoJJzk+an_cu zCg`-w?Ofvx2M3xr@7z#xth*0btz;}#PuIPqMivCd~bZx%kD+{kQh@ z)~D)uwo|WNtu2^znZDo25|{5JE!pTw69GqdU9PwkRRf}^tlM>&>QXe-iD@C%(LFtj z7WLq(Kje0Y@C6drTK`~8eN)T8+A4Z~ZZUl*oQWmt>+5UR#*EYHtv|!04fxg&_W8oE zp`WSX$2{}^?9)Gik*3jl5~K{3Zc0&fx&rP*mDxcaVdMRF8wFNHQi(JnF4bK+hArRP z-rk;WPXo1z!LVaq2aKA|IR;BFbPN`wNl65VNyFZiknwCA;~L6Rd`mA!Se_QH|gC zowkqrsB+!*PYA~N*p}JSUe8n|P~wt}<4|E7h#r{T6$Abgo~Mo5j!0x%GD%6YBiYg1 zR1<@Z)(28+f+lQgiPVa!vbYa%grDGcxupOfEb8WAh_uzp^Mq-hjQ2aysII2Bz24sG zY+JtJ0{*=r?F%~X4QtL@_OoT@t!YTDIdA!;M$Yec1--GR2v@6p`dPon84Nl-{%47> z)l9NwY(I(N`5iHhC(Q*)@mseC&Nh%(Prj$_WQFF0jupKy}_=BFHrYUxwGacXTil&7; z^xczx`SawRu7IYiPM0I*F&H&GF^9{k>RQlM3ZG?}UfjDYJsQkxm@>%>>xs6@Hxw<4 zd>)+`iTWc;6yC!)mjGZw8-V!>NLCIQPTrg7Mn}YsaB=woEHC9@W{)^%B8!?vfQ&2j zr14|p$4}Y37RdPg*#SSd@H1Y3Q&ogwfNi=dO37)wbGWK5uLxBjoO;t?GXiacNN;zrnn>!@Kc6Ev>qos)kdboK#&%{>5{ zUd)RXL>?IMF&fZSnRXP=QG(B>D@5a*36>bba%9>G7P&Sz&b+d#vXvT5OVP7@$&Yz zM7$~_tCcOiMIbNU6Xu|-H5<5rgp^caJ_^|SH40Rt2$9W+QsEcl1>=P;-C5-lQ^hVW z;P9Pg7@oVO^f$rV4FL{aOsX-~3(|lPPj3d?Dmx8V`wiuSQ*|`%l~k_zN#cxXI7q5d zJ|NB9=i2C9k0rt`JJs!;ARPml#O~8|4b4$mmR-Ds8|Y}xpmE0_z0e-g?Kb9gJ8FH8 z3%n+qfZx0r77<7-72GEvMzK2tx`Te3!=`%+C0k*G;P@>>ED)E+*j*#JDJU{hwR4fW`Blwe`;|4 zb)HiUNFtgkZ6s#O9tyjoFdbxc>k?lLr%k;=quXmm@r z4`v5IX43;U);4I|ZV%e>fHB6l;G9D@HLoo{Z+oFnEtw{b`!IUk3_E1GfEH2gnpkxN zCM}F7rySY1KBAX~DPMbGGu=fwEl*agrI7=ZQC~0;iiG=9A9-)r zKkT>+V=5tIpL@^H5Pca~c8|;HaVJAYWvLGb zeFqVF3(n+y@{-O>2Z&j!qEOx|RGvv>qI_zFn!(o{$GrxourUPtcc>S3ruSc>_%wQ958e%AU?j%=pfP@ogq$Pp5n7#i!5J7sGc`{{a1ZihqBeyhh(k?-IG@ z$Z?{B?EAoRp+iLZ>@?V~14u)FZV4AYhDslF{+%e29MJg})+~q&iW;>-ttvo(DO1%W+n<}f_EOehyeABzj>F+`Cc~^R*T4@vB#;J=urz%WWS4fb0%LDi z*%=@j>^wlf8D}uj>X?Y&;ABkF(K~tD;tl|Fx?{!S6=>CqiLyoHA3HE*y+ZB5!?c#^ z?D*vN7I4tbv_{M10GPJMhX2#QerTX;olnxd?)HY_FNZ5yF(giPs=`M939n-Ha8+Az zWaEmV6&t%2S6BFJt;dPo>@% zZ>&QOO@tsQisHcK-I{$pD`GvH+V}C#p^kkRP2o?i8@i&WFYhs&cup_$9V^Ks3$qq) zf}BsrVdl?=yO$Cl&ipvcJdX``sTnhj$C+UN1ULZv00E-H6Peurj4_Zfg{aDeEHdQD zMQLZn@mD^CCYPr)VCc1EFNo;;)dk<_)f;~}y>)c!?hkH!_Knj=Prs1fek6PEz1bt# z&woA(5U_m6d?bXHVm`0O2F!=XiD9Wy&hPh!{Gnh#f~gLqLP|mT7gj*nNXJ7v=>5hM z&Y)%kUO)>vX%9%0_tQ6zjebnknMBc|T32r$Ic8jx#Ubb0_L3lKxxEga`>(WZzHg%5 zgkiI&<4su#!G^{Z2D`&nPp3W3Aa0VnFAQQF++a^18!t30NzU7IKh6U0HBEciIrR1g z&t+hDmY`CFou9jt-ad}M!80;E^W>>L^W+rAwd2O?^jzr)jL(k;6DH>^3Zn(?@$qr# ziRmU@b_&WKIzic~sVTFpL{iZIF5Z8QI1rD~G>zj1o{iF7N#QI?hjx;gu3i7!TtA zYp6WM|3n+dXQKd*BW4@i&xVgv20=hpHFY1oz&Jc)JV@6`gT{l{mP{frAA|Y%@ZbuzYqBmW9DwE}z)-a*JlW+|rxS-J z66=->(7^{7N~XA)51k63@{E1^TmmE9B z^kc_Hj~x>*pMpLd`d{gT81~#g@b%5HV^-=iX?1gaHfMtw^Jn;}ccbx8~ zJ-LV2J>&U0o_srX$hlY8Po@OzcpLpVG(~SM^keF6^@G=8^<#D#=^gJ!O(wd4wBhBP zJz5thkI&NG?x9m}S3E1)CC70e0h(>X ztW;`EKw!-f}-g9acBQ zzRLGIQHMCT%x9j5I!I|962@-!DP$IrfGy5lZ0@sW9eh40&l5D?e9$;PPR}(yffdM8 z7_G+a!=Y1r9)>^<85cb@#oH&=KL0kJH=DQCGd!;q;5eTS5O-#Nbb-^EL{LZv0=q3&agFVJ}WTl zCs7-SHUDJJxZlb=V`rhKPZdwIMMv}aH=mBxniY@ z1J>l0Av#W#*PF*j>?h(ODhoJ1n(BhN74xdo$NAtnI_4Tg#ea+O7frjUkMkxPUx#HY z+RXD_jzfI4fF#8?h62qo>w3~0GuMk_3bSDpmj~b5gkM)4e!x*Zg^SPx-(Lwotf~Zl zCU2zizF9_%NL#sa!?79g5U};4$e0IpN+CPbYa_jZbj$2%ACYcuBo)f9ztfnks>#MQ2O4`UQzX zS4hDr;SAo>7R}C&7o&Y3O;mX1SB{9lCr%()MB*jSOS}M#@igE1F5+Ynz&Q2S!;uFk z$7+G0D+Xf;9N*U1ajpf6Y~eQ45JF^fKQ@5pSgcu+_ZKT@38vHVPb(z}wkz3vZDC zjQ@|o$((~cKlfMFZ~AtM4ezP)S@_%b-4sq1nB&EnwgUXoe+3se_hYg6+j(GbvblT- zS=`UKV5tgqyM#w7_Fa*c8|EQ&7=%+$Iu~&X1+=bK7r>JT&{Lgx6zVi5ptv0z19Vlu z$TE~6s)QM!QDdWUjg4r)tEn;AJBkL86jRXoQ~`zjoh9}6X1dLBs0yD06$T;^S(SvN zyShu`#z}Jr!Wl(t<3nEYDO1!ln7RGU+m1QcFoI#MS&>v_qDUYrZzquMDq0YxY=rxv zg|Q{`^4HJ#tl%b1SWW%)br-MYr!0rZDoQ2`)$bf6SeR`T2jreHu`U6D@# zX`jqfq&aQ8ZJIQ9UCayrzm{I)amb`bjI4vb)o<#Lfy-ku53W3npOoxEuP0=83P#+h zZbt`wsFV4R7|$I!!WF})W+6kcSGi1`uxSTQMgOcrBTMwdLjkU>zv8yrjOT8)ZOB?%Bs3%2N`-7!_<^jzo%tu*@Rz-LkKqZ}SEy9^^1#1T7E&9>Zu(jvSJt08|hr7@Nkc z3H1T0zSA5i^zm__e}q1>d!_*R^cSWc5OJ0Bq89IMF}FbbWZnQ0J4r6lKncu&i zxl|^~Rd4g7M}Q|A6Z8}C+31@ZKMW0BJkH+0)}a4l9?3IYpHwV6!3;_$9`fZ&@~d}V z*$FTNw_t=eAUS<9rz3(0eea{Z-ejhZFOTa_r2EMS|4?wb0G;C84_7{O`oik~VnCh0 zGS`cQ(W@Ii4U1vDQ78SNM*r!D>g@4~?SdDi(E}5OD-%$}hT5tnwX;=RG`k z{P1B|=&#E^G8L>y{qT&{YDWU-d(eJF0j`H!FX`y5as%dYmiZ~140ttcqau`v%=i(q z2P13PWt+_1AUL75qk$DP%!PT}0-y)CjnmyQQ-`@W8?|bj;ILZ+S{3Oj&3#|t=N3ri zVAgm`b{ zbUO^x!qTv^TR0FUUjJix7{V%4Io5Hqb_2$x)0maXu?y-ABR5UM| zck>OCM^8dSIe_6Vz=k_u!5lu!w<{!n^cPdOaGthkAm1BsT*$fNM#E8^AkYdcUll5* zd9kF?)ZtLEy8;>9oxAfHL__)u+G?&(b3b!%AI9?iY>uUZl;6>ONvQI+@qOdB@>n)jIet$N4i( z)^R-SoWmw*e;hc59*%b7oW6^Z5sM!&aicTSVQB_bD`Eohv%{EuX%{Z-t-`**1fQ1I zmd8U1GALZwGx-ywZzkQ(>s=gZJ1OWJT4X4SX_>L09A5}R#7W&@Q;Za97%AU(0mJ@G zA7?YK*8#s_mNXzi=&Y$~!x-`kvvg}JhhPDId`Gma=yirGwUHA%WVDcA%ubEZ;#dS> z;$&}XyqJ#`c20aBP!$#VXuFMZxapJw4%58zND~}%bL^Z7Dm6>|5F3O^5NFM8!X%5h znoH$`0N?RAJTdm5dP5mhc?d$XK+{D5o=jvFFy<^+5uEdAE9cuqc>740Y|m`7QvmAq zE`?J|0JFo*cnU9x@>5gfLqY0tGCVgG$SZQ~Iekvy9zfzoj=ljdkx9INDNchB zFjHg}X;AJ?*p8mzh|EzM-tOVMv_%?<>>50{W~IWKE%Iz8JnmIICC-8^3pqCn`Hn9c zxn`5cgB}+^+O}`LdSuCXVsNC#xUWF3dEH!>1|t{49pS?r!xQ*zl(?OmN<7pfqrsOI zM5G;K=-BfWdg$#-My}p$+();gTSs13C(fWfd0pMf>mMk}wQ1*dVZ~5Cx{T0_@y$)w zK$|9P%=Ku&hMQ?F3>)eT)-Wp(I&7s_qsN2w!OHk10%;uKh0%GS@3Ut@-50`qR%SvN z&QJKo3R5sYHw6QsjV9B~mCe19l`d?Wscb5Ft|k+eQTX6CoCUXGG`8jD%g_;;9%6oY zT)Ge*mR>G?-h(oS$L)X}St7a_Ns{4a#MtHjpx?(&4AWgv=!FyrB^}U}fRcUj#jIiQ zcWI!>7^a_VGU|-FCi*#J7`9PsPENtz4s%?<=6iv!hElylg#Z0!hsu{izHozewYXRo!p**pCj{roNMy2 z3&BwG#qmH5%3YkhrLd38eh5m`@VrrToY(V@LXY85ov5rU_lKggrVeQSvh-17pt$T6Cn|e|@xkJF zmK&a$roU#{+fGpSw%ba}4xOOv&``cCbZ24sQr_P}j|u4{{j-bB^1}WS^N>bKElH9_ z(!%3{^pfRdkgO)_$q?B}&VcoC7THC1lL>MWxs+T+t|0Fr*OK><8_7SAkC1;PA0vm! zUF6f`v*e%2=gAkzSIF1NH^^h;aoE35RhfoO2R>onNuzmc$h<}-jGDipR+O6GsR&(- z7MzX3pfkq!Hwk|<3!|B*0-pi+#L6&e6#q0U?=>q(i^tHmzSn#Ve@>e1_nNAI_XuYx`3216oUW(T@PnZG~hgewmT+ie6S;=@Y-iL>0g0)a@&;d%pHdU!`@P zqz{3)V>s15&)a_Bq}@Dmw*ppg7HYk$x;a?)KlL{kzyH>M^xT2B_lGOyE&bs-IHkYb zGbgR8eSsec<}owZPaj!9R+6>kG_sj&BRfDQIfsmq3&E`6T%a`5gHd@~`Ae<$P6ptb;x&9q z)_wR4!>0zn`{0#=Yl^>_ScYXJ_{8$~2}C~K2-g@DHb%v7_}OSakI(c%8K2b#e24m1 zuw)3Ez^d8K6XC*9{13GEHN3J&EZ(4dib`F{UuBqirJ%R3@&B~79~G1*n?azSob0xX zDF^Dv>7+Ti58{t+h@YhK0h7@7aJmbZZwShK6~A{CewqFyyqWZrzQ&2WMT||_3cQxo zVe+=8#-~ahoKI!06Y&GUJ202UJ(#=>=QS7Rb?CXsWSV@&0%Z$)hA7SJ2FdH>d=#&{ zNXKIFm82j7aHDN0j?XA`30;D^soX8_V1c*P@C<pyypc! zVt_wJVu*z+JT^>ILhL0Pkbs{{z}#V20zRuGa7?-=wX=o}#P?q_y4&H`bX8V$&F47( ze1{LeXqx7C&^7DnTI1{MjfdzJwOxs-%Wz$!v zeWQpTb2;&P+#c_NIvQ{}T>F1Ru@POd=@Ud{OdTPr%FZWDWv83* zXw25#2eJ8TSD^Zonw6?!QC8uou9yf>r=Py9)*aCLJBnKugFod#SNLUYch6gGHwf)s zF}L0SEuhy!^H-j*d0kNt{!g`kl95|Y(;NQQEsx+|X5_~|{^1Xw`0l-*o1E-u|MAa$ z{Ij3_#Bp$~uH#_O(q z&);2n`GI#_IKJzgGk0v;dfNJxnWgFW(T>r!*5;;$RKS-~D_X)lUVLvuV?$4GPer&w z>*?wYHvj2*NW)-FXZUqGWSvXo<0hhVH-}U`6{DBJxw?~w^V2vz?LLqNOg|{LYlI2)1 zu0*yEOne~R+8{{{^r!2W*Dq}dVqgxt(-Cn;d$x5iU*FdeX?G+leEy1PS7Rm9x9#4& z?I^yjw7DFKFeYg4?Cy}6g^XWCchmYA_j;k}|7ujsYC@W;>U$f&v3Rgfx-_~D>Ls*2#z3a06it`~}K zS?to4VJKGNk0|1BqX*+puAs@@Fh(}_P{0kJ)aBDgTT>s6#P;2L@4lwG!&T8Mue=iY zpsU+ecbuyO#-uJ)m#R-D;+SfW$BlE~>DvjT#-U+G2zV8Co$gjt#V)JL z(AfudkE*J6S(cQc;QuwArOS<<23q`~P^@`LkOqtjn&Mw8Bf%vV#_#EG*&1VPi2hdO z#d&}=NEh*l(@XK>cu@mWH6EK%Ayk><$7dqSt13zAuoWosa z)8WHQpTkM-j3i%b7;LMIv<;?Sf^u81H2dMNe+?ybzdCY6bJ%LbO-q(Eg==jNix)Z{ z@PiTci;U3D7N3oZ15A@O13u^P%`zuFXMtyv;_=Jj?QT&A+spQedj4$UBDO`bEeaE~ z6Wc0UjSiezSXa8gtAOjGt0T8pyiLYS?mM10uMm0lR2B$OD~G zjnF+r99gSXn_+$FwlQL}MQvN6kwk5zA==>g1${yE_10=&q@j39#L$8(9vTY|w=iF# zndgSn>Y1yQE02F*=BqF5eGtY=FEMVGH2L94dfnboCe#v))z`=HjULE0(Z=5?HYE%2 zdWHSVmBzL7IwP8hM(b*;qX{A#r;~B|JX_55rU6*U7ns?1O>05y{xF7X2YKTHOo1Vf z;X!yDv+pL0%=fX}1dudtX z9i{?CA%XlNu{|PUiDC+z0l(J+Pq+csG)(MMf@x`S*<(ffZG2}j-c4teFu$(!0RN`z zA$KVrX2~qStZ`|W(@zWgIL)2Di6jAc_7fNMvm?`56LO*Z3-+_<$ccB8TbmPUYs??; z1$m}-YT*X*3y$)5$5QTAjuG-VrkCrEhbLcNzV|_QNVBOtM%cp`Fb%O`NO!+N8v!$X zVGYv_YHG;)3XhFuIR6Ig({M_EUks<2%gfd$p}Aj~FyQTA&MrD{gu?SszRsCKfnA3C6^ zRNF%^mNRY@Xx^mCTcer!(&%^d7aPeqsv|sWpjXGW+@-}EIZ)!^q|{~3QM`an9W>6T zZ|1VSd*#P2LU3I)-G9;K%jc>#E#UGiYXW)9@*e+&&mGcrw*ST(5so+BNE>nDg1&HN z0l$JFoKYBe?hSG;(_xFmbD(K#cltGY zdp3I{n}s=hBOen8Ys5$;uo2)oVswwnqp9meo4}kz`j&v-GEw1ah*EJ_O!3|>oIL&l zpyb}5|7Rli2exwm=zjVxV|apja}Vts-DiCGIFAH`F@bOnjUUW@nJ!@u@pwS`mw;3< z*2$CPnDKr(&3HfXpQ|7$%(y#x&`E!5+Av)){^IC8GI|bZjLOPCkBt6u=P&1}8%Bh6 zmjv~Jb&suK-B8nS3$+^+?0h!9^F^$id$?+VvWlF`jlMWa)afH+oae)^&ZMHGksK2A zb&={6M)hvRB}opL5wBgA7;!Mi1vU*>7JbYETSZ(4I@^L}HH0sjfbLmOOn9lCIP8uK zU^g*K+koD-$EpGSbez>>yt*;g7!G-(z9`x^79xw7bM3>q4#m?xewH2&(K3=8D7wyl zEBA92XALZlpShnKzpu~JWd5SR&1Q{nX5p`dKtG4?_>J=s(?@1s0+@UOVDTQY1Wo4! zgy?=Jl^r7bvz@AlPQZh@EFVi!joFu4tm66&CoMgasfboZDutC&9v$6)j|5P z6pc?Gk#0L{`zX6Qw__vw)65G)(j_w&4(}Mw-N-(>G3SwLH_m*FHQaR5y363-jW^;4 z?4v%O1M(-pg{~*t$XVp}jDK{~BA^y@WN2k=bwqVYY?GM#PQ((-mw8a9RyGu#yjmW! z+Y}%~(ala5!<+=>SKMsd7kp?JTC$ptC}Vc2D0R5+R-U$Dc*llqr)^uiIy0E?w5=Ya%~u-#;LD?&+=pRJ_;lW?+8J5#@N$?4}9Y|hVghxr@(LSnGua8F0>*^U-|MJv;5-S*~{ zb@eM+E6>@osJpA7b;<9qef*Qdi)cOp(@R}gsXZ#27 zgE5ge5H)Dc_u?9^T<^r_(|v(H}cy?NktG)Q6UcMP&6K< zKflzpB2I`NipJwnc>RXI!%O<2b=7bFB>FQ<@0UL1!&FP9{h{O$sgB+*eZkbj=C!FJ z$w0EVi&l>A+b7lCjP=|IpqFDFRiEKw+(aJxJgREnC~M_)R zkGn15k9%bsn!N;3fzidschk`J=GLK|8#e5e?ps%%Sg~;!1Qtx&ZN@*JVfh&%OC(v+v?rFJs@pax;(5et^6v zb({MD=7eS30$x!uJ?Q0_;6nuZ@}r@)2q}ivq_3t^bsacWe``^8Pd{uR% zLigrLQ=b!dM?q#>+*o(;EyEbr^>>F(qdt0{m9B2PxHFvg>jJ_*T6owPun zkSW$hsBpF71f;Du_q~N_+X?C3PK(|d=EaO`-8ph1^7oe$kidn1`kqM>za>7_m|uk* zJMb&}U~g_P@f3X{nTA1&r3sZU0J571mq@u<*)m^%I$$SKq4)|WI0>e|`EhlRcpY5_ z`{X(@yAEZHI|^l9-?*$`I7Q6VQ}%l_ zxu-?&M*nE^<$g{dH~K*4#z>Nc^UFf^2$KuG?2&mWQ^E7p^Mk~9a2b0)>P}>$?BP*# z(LUddG!V@siY2f43yoxK)hKZY+0YU<(xpO1?hR=`NEb7YO4k(06f^ImzZKF%wpfTj zSbTlq3t#JJgkfoLR67K}OOyjv~La>$4f}~81uaB}ba{n?qN*}HcI~@+E zqsncpX0I3@9yk7R4|V(ODhO|i;?k+e=U{vs+FZ-q3?P+sIn7A%b4*YDyva0oGU35K zsZT^cHa^bI7#+>M$s3~&8>`(_a7io2>4BMt>0#cQ@dx$}T+Jp9}t_S2b% z`Cf6w>_3xdOc;dG!^hzt7zgNoML47B9NfBkMRTm zT4^Ld}}0+lL^ zCq>*E2oQptcc3Bw;NmB)NE{R&poy&%TIjuq5KD1vDZv%v^ilXfx&y$4P@|X6g_*RP z!%aSZ4fONIJlvv4y#R6oe`M9_rfPMQ3E-i4U;@ym0RU|rg1vJ9h&?=h4aco=Fe(r8 zPSUG5ie1t^KP;h_%!rbe^gr1dAZsQ;){OgoZjd#5xtQ6NRuLX46g=bU@>@-Gtvjgc zT#Ms2zK(6@{!o7Jj@vbrd&*E%yL05&__4FfZtyi$+J{8Ii}J1&IyV{W5u$V(E4z_u zf=0hyXppYuU6^x^UIopnDl6?r$qjMtkTr8*hk-j;oyoX$P99CI{XxQH4D4{wBzu!8 zg>UkRO7k(sBb9YEm8nRo-WROb(3aZFl>|f|qcDg}O6hPuz&Hvj+34am%`37lea8;n zm0i)iW^weVOINR6x?;^*I)aHJdpmdKezL0*F0!2;+&FmC;KmIr;SG7MwWN*Sz$Spl zs+qos^x|i(gY*Kw9iP1hy6Ht;vKON$Gx5o3=*@JkG{ARL*iX1ZkK42t9FP?rh1~Qv z)2g{aNE%)#7)3w5&bXE~0)74qaDM|c+q050Yw~_p-QHE~S&>Z{?rom^sCn;sGs80W z;iL46&Cr@N01+xod`$rh> zU7Xh)%zcYzKCHXLBeeD&Gk2yj24zNgnS|ja9w@W&`$R|pq6|@Hnt><&Dtj7eStIEp zgJd&}izLv|Bvs@DRh4=WUqF~)K%F$!Ky{nCDZ1dVcKCHfB)VNrP)oy*eE^n;Vs>br zX1q!y=yrf$9>;vk`WPMp)NST!8-A%AQQ4!$}kBnq0 z7d34+1B5{<>)Ay5`Z&l^Zv%x>~xnEKK&yf%^9LdU%O_4RG|GQ#4sEf7ucLl4|5jH7@9hd2%-NIU5#uZjs=Q8s}N zS_(dAIrLq0?i3D`&%s-)WiAfPUjaTf?=;4jbX0~3S(@(XU((;!(wM5L4ks!TFxU{x zXWXq}ezLg}np75=zLJBdwzAnX&Ym+@FXfM)*n4)A&DGGHiOJ^AqKfk!$6#0J$h4+n z3=eQXfGo~-EQ{>HlqBk`EXT_HS&lLP5^MmCR{p&59LHaj*Dl4B!T3IJisSDtU3uET zf1knMIZto+fX4h5=xHIYi%N?e<@c03%9GM`-3slJOr4`-_~`Ck2ieDR8~fSoGq3I4 zC*5b_LTVZW&;`ft=o8PbbzIA^|$$hRzGoFF_?QAjM zPu~#bA3#0vEXatT0~s+&E)p_gjS4IPal0HgnIAJa?M&B&$2P6Tg_GjW+d3UCE67Xe zyRbafL13zaDT$K2VmL1}dP;=GQo#{l)3aQ7%*&4j5z-VQAA5}NLDTeMG0xmi_;`}f zqAup6JTLmwB|a#h&Kz+&?Pw9m8tM~uH6E28rL;D-q?(h9;!U+)2Z==^;ZQ*J`H61= zhNLM9W@WFgs;meqet*=zWz(h&8`iAJWcvFzZQZnW%jOM38-_NXwr2gB_3PGVR%ceP zTG>C?KRB>r`Le#Hy|B`|I+rYlExom+c~Mhi1GnF(tF4Ju&!Z`CLx|+lHlW75>=RQv znCTr*gI@NTOu=QGzmoaa^heK2?Y=a%E_DQ6)LnkDV+?++UsrzwUdHR?7u)6MQpW2k zgTK>1!V~N#@HW#5Z&ZG4$)s)|NxJ zYhI`(3(zb;G54DyD84oi>|V1V#;$_j>GM#)cz+p`iXgPo!rz%EX8%lH5bJUJHLg>v z;W|Z$)XbiTB#`KD_L;CN_oMpv%=i6XfzdkaT-=U^asi#ga#>g3lBFHJZ9OgNMcs`- z4@o8CwTRn#2sWk(pNHeNhOotz0Qjv{R75MbjE|pp-q~mG+_`;wHoIxl_{8|cg%_N+ z`@G%fkDa~i>|N)cv-7N-XN`_*AKpHE=8o(c*)vYxCRPC~feo+#)&Tlv0Qvjh2KFyM z8TOyrDwtMeC|MpaFL<$CYfTw1r)Cb}yL2|bQv*@pZ0k+Cc%CIQ z&$207I_B~LI(H4w?Hf3qv(8JBcOY*y4D|0&&RbQ(Cf!apXEsLhka&}W0x_0!4O1#o z1({O?HZPlkB1oRUv3MH$mRPKALR0JUYa#-ak>s7@c5o#wf)6k;PTeqx*?0h%mjMeT+yTfL4*el)iVcH{^>AN4cKv zBICbwx1sWiE-K z9ozJkoR+&`R7arS?jI2$J0G#e_KDf^Og^mEn8e zMf@(bKP>E3yh-|<+�ugFeRdHg~_nPVzEO(Ld71%`z|fcy2SZ%u6C~Rqj`_cd+W& zmllj!pjEbDe4E^_&Dx|sE!>z~60D3ISw#;+XAeEUA zplQL26H@=op?#zKa_6&WCT1R&F4;f2pMA0<)0JpjoDsxO*Bf&)ncHw$hG0CsIx8{+ z3;L={4Z#8h3ox_@lT6d7nahM7*s-a+0hlN9@Z@)=n~E)^$ZXyM%vt*inavwcNz?xY zc>JiTCrlIjSGwv^S16s5u23q!kycJkdzgJ2>O{J{yaez6uc7}Bm-Ihb=XF8q0 zj-oTlHzvfWPsQ+2CM7oJVRG@8gBmxB7sF2R=zH(rkVV*Yubdu^ik!}VnfbB<-KCe_m-TxX2tM|?3lx(5i?cC zs{$&|1dW;K5PR}n;)f%!co3a5iumN`(@@RlAvJ$@;^j8Y(?{haMg5x|lg~44QIC|{ zHy@v;!}RgFM$W}FjE|wQ#wW|CNW_qU!62d>mx@Nv6}Cp8CCkPZp4pi1M*#MT68|=<9!3NOi1aWTkf6IEMaAU#i-t%5#a>#C$}ov7tU`tq5G$VO+>2 zn^1tfQam~s+06aW_XE>U4i3uQ(;wPf1bE|@MdO2^;j)Ln8;ijH&Z1G_m`cas2fltF zvpgDf-~sI>4Q-DIMxrDlIxxQ>aqk|9@$=h2B9N#E!GnIjp+s6}xUByjG+Ba49W+-t zB?qT}_uZnNKls%W40@35pUuv`TGrR%@4|NCSNVQ-WLh26%9&f#t*G%p$A+*SXzioR4N}JHmaz$16&UY=%7ww>T!jRpt*{2KA;9wPLF2VuryH4gIFAX zKX}6OKxle&H-6`uAp69S>Mi*3AYTN@QTr7YR(Z&d4|+< zu4Vo5+Sajveclcl4wZRsKlnZTYuqXUwqeoYMa6fT=bYPowLB{;kByf1N$?|&tW$m@z+F0I)1_mgz~Y;z zP|Z;xO3S(9^l!@;;#`6rC})GSux@^xZ!>|uB#A0J+7N7#sTrP372(Nb%-p_9F+w*Dnz8Zy}2%6ECv*+a{T@1t@zFojRb2g=#T4=F}z|BoY`Dq$Gu<7M54b$7JL_Uhz0 zWYH!NZPm?J4Sn8d&V8@&&W*6pXMxi{gU@4IrrA$VDqi_Y#VZ~$w^XK5J~}#jd|#;; zmHR?Lcw+fBounG0dR9BxDeio}N-;i*i{j@r#Y|M%Ensc>{LHMd^X<= zDxjs;?Ud}4*(dQ(fZA+ygkMnG`5u=;vQZn5`lb0NnQlM$79JhR)Lj0qJrn1jch<=E z(>D*TTQj&~*;3pA@Xzb#x@oOOD(nH}pY!brrJpUd)OQS!>Jzbsn%W9qRb?bDNPcC+ zZ+&Y^Rz<3Oet1i?&DkY<(W+-^%$MWQ}`q_~3OZ&f~7cLKLm@JD;xv8bu0Y_~Y+ z<}u%!?vT+)sNlt)-eo&FSnBlcF+RQV$TSOFESf5{Krv0k|KSV#@Tr zfb*Irji8^DcD@ZB)?hY!^A9prKX~d-j%57n*R32}wp485sRo}DC(zm)PI2;>7Eb1V zWy!MjZ)0966ZLJ0qmwAZ3kvM1tg7^!6xvmZI^{oLA08(EN$;m$0N&`G|39-m#<0oL zAnwk~)0E#nMTQ0G(`_bwx}3b0adg3+x}t{_6AGQ_n1#(w2|&eRn`gB<9M)=gobU6v zBnNe1hz>NrUz&fM5ug@t;rYy<^{A5(=Rsn3Qm363SHYZi=3Kaj5g<74Jb;GyW4lIo zp0RDy#zJjHrTtG0hfm?s-rR$XGllfP;{y*FhQD4 zGiiqcv;)_HO{@dk`3@@fkly@*jOYhX{YjuY>((IEN%_NK7M%;s;&NIfg!Z;eCfe!~mA>eyfu7SJT95<(L#aztl>?T`WyfaUN&j6sovZ}tZY6`6{>__K zuS$2frjm)8XwZ(_p`Xy@^_VlI#ABJD!KhRxZNX#USarZJ^Mk8YsVD?5U!w@-lI{xe zqpVT~&{RWXC|qHNbHRhH5JrONpuvi8+D)gA(c6s|gcM{$DdkWswT%drj|cJ3IB?i76ik!wGPE z6X4cK`bY*Nd@M`VM47^9rn{>F;6{m!uvScsv&jrEh7rU?E>0qk5m~O2x8RX^B96dQ zP6>F)?s=irDDf~hVZ%8;v@Y1$KYSrOtGBHLIBPq)+PhBxoXukZ9Q}fA&CB)qU_sB& zJDUI=01F1N_#?ogi!1{?T9p|b9OzEfNBusxLv~0c9Soq+aSJ+HZnDCzF%fp{2Ete! zKm6$;U1&g1jn@Au7);T~E9PPiyovxJ0pyC&#u!`KE3o8@OXecU;%t%w0u9k6U_E_}w9!{VaCU%<(vCSRYHMO{ zms2IQp%T^*jL_%xsIr7;L1grOBgk^gA|y8|e(9XUK4Cp2tfo?>F4PT;ZF7_lxv%{9 z&j0@Jo%Ekji$HtwYK7yg;6}MhA(8#lpWy$GxT4@=1s{$ z*IVpg3D`@4o=WGw#rr31D*>_bgw?&#QgGfr!f_)5xN#6reGNI2EYB=GbKTlZPr5r? z;nS%TRdGAEpRu`bX>Ut&DqiQLrxngw$GBU{wk3;W)e<_S^oDDsaBn*mteO_a(0F0s zdOZ|n4vljO_^lCzwZf5H@etDQI;nz}1$uBhDil?8ZTd(i1*(Fct|F00X)z=v8y8F)ocimHR~6^fL|xJvLbJT3MJ$G+Tgew`^8v7dqS-sj{hhp2l_=pV+l1_XB!W=>+_S{b6SN zK3d7o$o>TFqF)HN^=U}faN{?xeXgxfs^0Ho^X>YyOQU5=6*@Hir?UP$MXPrWu>Ml= zQ)m2TR}1Xvr5*fF_UhfnFH0BItGU$&EW7KULVtr8f6DJt6&d*Ql8y$^6EWN(&DX7) zcJ;G0rCkUcf8Hex95G%fTg17+61$8Do8F>aIT&uu2cck;vJUV4DV zdBppV30H&|p{R?X(j@BhR81X6OS>^1979Hbg(Xm^i9i{uNC#nPrDYkb-pn5$qI?EJ z8_0yo2N!&#P#VNZZ(UtovM%WlBvXN8Jy#f-#`)YRzYERygzT<@(2fRe#)qSe*DUH8 zKWDh9HNB(wn*AzQO|Pq8-W;xPsqbirL<*Pt(AyU9gZzyCk$x4%KQ{D$k4Lb=gnmSs zBrsoeB%dl6W^7EQFlF1KFnCo$CG5euWmT&z&hEC6j6IM_`24Aa#>ZhQB6=`x3U@V= z#V8Elj&;edhT58VMO_2)8y&&5?TJqKo`ml^XydA_Tw%Q^k&UPLRYP4rG0!-DMqlOq z-y`~;Kxk7YL!VR(qks|lcn)HWr+^b6!2lLPM-^2qFZ=&X%ucB)Mh zJ8;@|=>m-0IR=PVt6J_fT|~0Wb_sEA!DoqVmrEYaEW=nt)P_DZ3q4I1co-1Zzbdn0 zd2dfL)tE{IK)!V5bx+KMR>+rZRL!shU7cavj4a?GKI73F4SFER%=MJu3GcX*b(&jbGHurTb+lobcA{{%Vj#Oe~B$1+Pm#+u;w|{hU zwEw_fWAphJpS3-E#@Xk%7Dt2YhWKsxxp&>lY|pZHulT!VKm+=K2HeHo;FhV2Gi`QS zL?0!}lNy&Nf%EyImgiv?o~Sf7h@i!)2bLs!pWS|#e%^>);YQUvlN8rg=g{lue;QHt zhVgp##VW5&*KOXa7ms8ChtHnpP`9_fVZ;W?`F2u5J?H<--diJ>$sWnh7WE(FQi1$&LX!rYrt{o$PN$WxFyETJ zthJ&+;VbvXXueU?Rhvk;@6F!KB-qP)xhKp|GxjjFo}dn|rAA#xbia@22QZ)@Q?*Gv z{0jvb@l&#g_=(T!_f1ABDuZFg>v1uM)9aWZHe$EgFTjYYRh!+^>5Mvo5Uj_F{JQVp zUs=;grhCnrrArABel<&1F9pVRa7F(z7(TicIJ+?*3`6ByWw)l z;u|QErP%NG(=9F8;jM?b`F|{u zjm92%zx7g{+;HvZ}G;XfJgwgyf=MBi=P zySHe>vt4|!M zKe7)!!Hp-xc=!;FCs>+~Ow8N+=zmI^P2Z-SnH|9ZlQ!1XGW|3>3vjy_w;%I)oD%Um zY?5%3qv(8tZm3ax%i_g^2zMsbbwNdWe;IQ?eqe)A=@Rdl0jW3Smbe>uI0RCdHEXH3 zn6upX#)>8_ZCkwQ!WE0R^wgm5#kC*z86UBJT1v-^$8i3v!L69w-tXDr=*eEN{E=nj zTY4NLb@yCdm`rob!Z!o6Je@|q{H1D)m$ykU5q&u7fP+yRTFu#rZX5AC8L9AjT(TY{ z`UJ7trCm7j009yVx_5=VjJtP5GXv$%x@`J{`LyH;5?9H?UJ8$46`?T_3PnReIGPFB zuy*ADIv|x2i`JH=RJ^vT(}ZXkqa>CJ;hK}VR(X&W=al|#ny0$!aCD8>2<{xBBY&)ti2(|lS_TpIuUy8ixktC#hQUsgV1 zTxxm7Fo69XJYPbL#L3poCZrBtm7y5B$K!KLn#;j-r>2{f)g%st6IQgDsjY!0=UX-Y zGKyfW7}#b4f*ve~pNnY$UoFk-_{JAEE9fOrim5e(HVi|Z*vhFOfv>s+%1*~L0gC|#gkd$I_8_>+Gq8gvx)<( zuu{6LvGhXu>C48-$0F}rwZ3`FrEB?0NpD&#A3r?*(7J9NzxT9jFWBG{3+3Lv-CKH` z)_CRI_kf4E3g!rNqP4)7Co>mvS^y*U!AL8pm-zf%-(<+6OC*R!2mu_lp9Jjwz@!+p zlXBOrcwKchAwAuT+Z$7LE%BD>+G?PB^ZL}zgVe=TZ;&Omg~A4u;BYY&d>tqQd$2q# zu!Xbvk4Na*xj}+uq!T6_;v@yL{|g~)brd6Q$QN|Un#XCEbU&c28{mWp>-XDs0VNQz zn+0X*As$WFJo>@8kE6fwf=`2la;gtHC_I9X2gXPs5DlP&(ws;T(%I3{lt?9jCdca* z0Dt+GTIN}r#t>=}cPv#nt*Ox5lzYyVFR46J(HYts8p#-Ia1KJsBA^yEiufqPMgBpz zUU{W)ZRaD8JX>&~o1H~GL^|zF5{<{-Mn9h#qGQyQ8sS4F<#J{mfJLO6EF~Y#c$ZLJ zj!>JvI9?-ZHp!GH+ld)&4!Knw8Dd~QG~G7AqXZBhdQQ_6koFZNssPYCPvHS5ichIy zAQRSsIGAxS?F~02{JwA^*u4P8;tx@w1z%Ue`=)YCp`_Tt1*UK zq-8~Ysm7@)$H_L!(+o+uI^d)CPr_2PH#!ppuw{IW;gd^Lfmd9?WB{uKXY9( z^?J~1k-6FR#7*7T6Q6F=eYS&$R_{mT3#y!toA6UlH~^H-03KGT40JgIy->swY0YY-CNXzKD4pwdGRlzo&^6&<2fss@(F#!zqtVV84s1lJAYYz zr@ZO-7m8TDJofD=iUi%1%|-;DK#O8;H)k4M3&x!mqACX6<_U+2gKp>5crPB0@EF_0 zF|`T#f8N^`gm|2dRKIW2KotHQr$3_74yYhN3@0R(Dsh|?QK^xTk|vdsrV(mUCl)up zHf~%yvYljOM*UAZGa@mwv9j_9up`94=j*j;LV)IU_V?bsd++{M0yymib8I6_m{jjaTv3{HpO9-ee^%Ay9^`MnR!;3bY`RpoRzzJOqg8 zi(%2W5ONBhw8V(hr#;}?rEkpG1U({`k)B!?%n+#6Cf|wL;d8B}JasfsWjyR%C^isLPS`!+~_g*h_lCjl7W*PT-1WkVxP6TB2ERRGDQS?R^pq zI5A1P9|p7!7<`H$**M&Q19>|19fBkJBX0H7Mu8If*_@lGus}^cXUkj{Da>T8+?;6z zyJ$0alQCPgODrU#*b4(uvTSR!(yCQ%7^VaZnouP@lge)Us9D&uOaIm=W<^bF7I)A% z&1zn`ziGu>`u`*BDcj*pRD{dubCs`8Ux<@CbmOc{FBXnSBFB~IPSh9S$sa8>kJTg# z@S0uhsXiTW2z012Qc8Ulou_Y4X^5G{QJw9nldP#IN!;i9h^cy>?+pf`GmM7p)gUkS z!XQqZAn^iG8~QTnhYag6L&91ywo&95t6||x@jSl8(?XY-&$%T@2Jv>WI3AC!GnpJK z?p8PCQk$_|#Uj>sbEs}%`qhQHqv_48vYx}ZD{FdBs&KYT%BoAy_PR2Nc^ij;DP4HHlQb*6yg{ zRv4WY{VM`-vEAHhbPgMZl)!H{6D`sJ0C=2*m<4!SM-sd}U_l-jx}y%*>2e?#XZemY!tyJAYsF-#5E& ze=`fS`$9tQ-(N@3W_e*M`%~dwD+?CbVguIh8giKLyAjlPt0+ zNFi#bFl|9w(pI!JZA07AcGNLHXa|~0^JqufiRRM+T1bm% zXIf0V(5|$EcB9>C589JDXfKM;MRX_~M)%O^^a&kK$I~%%E?q$v)6w)A9YUwjcl13S z#}#xWJwR{Kw{#v|Nk7sL^b=i3*U)412wh7{DM}|&jGiEe9;L_W8G4eQqNnLoT1L;) zv-BKYN1gNyokTCu3-ls&(dYCf9YxD2PAjOJ5|pI#DMc%(htiax9A#;5T1B7HYFbNc zXdl{-_NCkD0y=>9rvvFA`hvcqJK4vTJeaGvnrr9}`jdz7P#(s^=`SW8!6UhrnMZwn z1ymd{+b-4?DWyPx;rt0T?LYC!c`H51Batw><_RLH z=}!XT*FdR{!#aq8Bbmn~9h92Irn}bgtUDUBVD+ZCZHDlc^q!7Kz(K>4nRB{M=;Cox z2W_(pXY+}GGh$aF*!GkwXLRw5rY*;LmDcAqbb}Vengb>9VdPoQ-W9&dmdYan@g8-z zpOV01y%CZ=nun{MBHgN@L%d!-l-_O>{UFhrRh(tWa@OXHE11$bZ%vV^=qTlwTijKN z-j1<}Ld_)gIZLNp&BS6y#w={UBlT)603;31C0fP4W{~N zs&qxwY&#$gy5edO)JVmyNZ__1QnxD>h!96=bVVO+`yoxbzCS=7wr{U71LNt}!>@@0 zlj%45ukivCbJt6+DFah;Hy)19P*~sro|9n|K6t3+WDoTiJP15lL_Gr!AC1R)kS?n< zj|Y43FUt~*|M8HfmC+mTK}iL@y_yK{5DprkKj}x|1r6n%AW)Bj22D?9QG~R^E2rKl z$%o(4rfcHbrqGfr&NPuQWYtg)%kS$27S^QNUdr0^;iad`SeQ$ZtA`| zvQZEA)kQjTWDaW28=**^%c<4&rsjnd>37$KIhN)C3&U4=C>*t zNu00i97&+Hsvb$AwPsD{A+XBlO3b#d-DheaBAHkBN|UtG+>P%9FsZ)c;B~c1_fSde zwQjA95Ajg_@L__P^A$Jcpo+$2s!pB3%z(kX(q+n>Rgmc0XUV~vW}@=XQhGLBL}lZW z3pedV72{GiHhn~8G?J4xtwa?xQYJS&uEVn!ZHM&TnzEP?L!eX5Ho37hf=$`hcdt`b zH$Swhy-wrYRB2VIPIcHcXw|4rqux|(Rd!A_+|+GVcTN-E)M!;bO!eC|Y1O=c^^khI zg$a63ml(c94AQ1c>fgcxeaJ~H-J%5P<|I7~oSlB_VB;AaK8@<&s2SWl4eMYB4lbU? zb#NXH-kyH-XQLnNKaKR~K(E<84fSU?9h^Ol_2*m}ygCg=u}KdGZP{Kc1g5NPdSA=k zCxc(G>lP$a6>%hM>XiQ}SJA1U5|k&@>6vm7l#SJHv`;Socu_7Bs~c+{WBeEU(Jw`=&*bnNdBic5|6P2S(gckzaH@eD$6 z-6G5q;usJrkOB{hdHsaFJKDCNfQ+oGGB;utzVHNt1}vWx zWhQ3XaZpaq#M9eerlD6YHN$*3{tAYvb2-t~xw9R0kg^0as`D-5odA!)S zI{zC3i;igf(l_Y&`>*GIyhTv+%E)`I=-a7CB<(dd!&n=LcMc z+_GY6Y{O-Um@nU05CzjcmfVjEXg5W4dHFxx62uT}XzGUJ`#Iz;JN`9D0`swoGhD0s zNkx{HvaDw(5U0xnCa2f#+rqRKo)2ys_VRN!R2>aYI?HwsT8U*%kE1TC3yXr5fdVOD zZZ8o|_k<9KmxlN}t_ARe;ky$LUV8@bVd~f!=W`1cYWt4~(j?X7+&$=RI-!jL)nw{~ zZE2^U6#Z^b=ct*NB{js79%%luB`-xWUK<7}-RIuEVKF7ugovrg`@vSU4@?M36qzY) z?N-Qs2b7PS5;(UmbaJ;q)*zgn)ox%(kMX-am*^gRZ{Mrh@~b`BW>;zDrS*45h|uO5 zH9$UCMV(m$=P#8K!;owdVGd+VUo7eI%aS@whvTPJwqL?80B-4VaC%9TyIJD{4AuPE zS2gB8#ZLk>W-dSnPJDGW%K7?*tSy-2K+ujScGa1AsOO=Su6)V1k~;TXZIw*TuTxLm zqst!;Z;jWimMh3;W>VpMdSCSx9XwRj3UAXRnID3!_)U){@Q7>zqu_>1aW@5d#G7C4gCA-M76K(WsC6H4y{(sa8xu* zTu!Wq?2f9MmV3mnD)6%F!@F;g=5v!{p^Q%=L^ovTCW9i&uFKx}o9eiCZv5R>n--rW z#M{}^Vz$D$GE~HAAqB#Q$vmyTP;;%PzS<|h*ys9E4rUVV_9T?l)?(W%c(#6@?)0S@ zs`>&^?NKEpA3y61Dhs;kJoGp^f>N$3X@3iiU5F<}_FX&xQdy=?&8x0nO7v*c2x+#l zcgFd|d%`I>H+i8*GL2MtFLK5}Z=yJ)QmH~RNqJfl2X(nsA|@tAJBo~9EvRo+TD{SjTTa?`N3So-iCZ6kh{1-ILuGBGZEWKd zRdK4EgJUnOe4IkU|`R$FB-%+3;fJ2JIwNWJu#F)a9i%l`xZ| zCSZW%IF`bv0*|Z<%Ge0h<>u|$R+pb9N81!g2oQ`ay2FeFh_(u^e`0XI4gHBrR!E{9 zH3XAwz)dNH*x?yOalrVrBU1}G8nAQGU?%j@e!| z^Zj$dLh^I6^A|V#=K?qGQ&C+}LWSVBhOk+f?s?iz!aK5suzM=W;QPl;N~TH&JHYY! zog>(@bHb^w_jCvH4`00UgXE!+^GKE(xn7svw5;$m*H$B5JxHUAlhKpraPeMR#F%Tq z689nI)m*n>B6oVKziMdlpSt-8^La~AJ5h5{;BNC$Q_Lq(hb%#}(Z*B*hpMU>uRVep z>>1J-x;?=?INmyiJFz-RHU%y^E&igNx7^3{tL~gK zu>{ru8@=bQ=EV z8k-S4f8V*_(6q*VP>N;Vv2)+|=-3z87unizJr{Qt4Ws#9kfo)pwX0QMNUk_7|ERD@ z@gQP>)bmZb_+O7H$j?13?*hfFfnC=Uk@~Fbu@APP^?mwBPsI$goyNQn8VF~E6av#W zt$q8~QEUQaDY6qKj#77ZL?|tN zJ`}rpeWiTm)X{%8mvr#?bpIsl;;99}_~lN_PSj56pq=0O*PZ%t?DYX+$(KF-R{h%j z4Ui6+GmR0?n8;SjGwVv8rsv-+DlD4&4x=Sy6cLt3z4{wibGa=`EnzK)^*D$z^TzGT z7_t&sjvV!&S)mIO>Ea0#V5Iy`>PBKu()4OyQ$(Rp63@GTuHPz#y?=wZ(b6kMRICIz z!e*oD!D91D_Z5jlB$S0td0*3CMCg%2|8`sz?d`~1TCLaqS}jFCT+@xkdewDezo?{W zS(!D_IsOFJ((4M@$f3Hz8k=gVcAZT+7z7Re+3b9o;Lmm4=OZ8$;9o2fM?+5oc0eVZ zr1cS%9J8nY;D5za{My=YD3DJq(T&~OcBK?;QVGXpOvp-9I z_GWc$Az%TqY^jx}H#kk)E0?kos@-7Gr^QY1PdZF4PKu*VX&)5@{kXSt8`|t1Bow(B z)YODaUe9fsI9i|n-t4D@$E?Sg^;O3(Q&JpjQR?y0sYRwIiWhsSP&bKN_a_&AtX8KS zQ|pZwsT^q=i5oc_xmU}=UercRb(F<^^j1G$**N?7q@JYiv_9ilWG}r^uYSyGd8C`N zPQzVoI7?Twip-(3QBz!^3bN^%E+P}Rj>Xdor;@r2C z(YlDB>)vQB-G*mU@B=R;j06`unn2-*i&5&){_EOp#O5qQIx`5v;jI0vW>fkHJ-;;d zcLc=~Ien5e5VAU1W@8Ok|Az2>JWVD9Pyk3B{TSc*IQbfjpUKuEUIe=Cw#$ zp7)>k`J@g6NdyG=)CA)AwZNHf%g}((9tAoHj#F|nibv6619rgi!Gb*nX5@fHwMbP< z2Xc2EF@W4!cZXc5O1uKSH2$Df*J`CX$jc{Y=vMI}#rNc=Kzl=5Lsjxf_{7)*d}6$A zlaCL3`0tR|P}}g$@VhJSO^Pv-xrXD`(7DtAb7=Rt!frGNMI(<`6mj%xF5mY#+9sdi z-}<)>w!(*RhmJ%%6B=vwGKX4oxFkZZB^>8+))m-GCA zN_C=#wzY82-8IoJA4zI}Zu>UP?7+P#9hR(g7|oJhyCoK8Qzfh3pFk<&rf*|0TeYd6KJ*cjB@ z_mRK3q}GPwvS$13=zNBu;GEwcN1)DS`ClEw0lWV4F&)VY=cIA8rgJiJBBYys?BG33 zR|esXth)3)NUPx2)vIC&X!gDOYguutOO!CaAqF&drM(>2Z7x2XZp&%Ab$Dl6rEK`I zYkOA^nlOGZSUUT(l5uazpnqYhkr{G>wRnM}c;cVmK zP+Rx~`$h35LvT}9=e^k-CEwpFdV?eV@6hR)i${CZv)+}g*%vver@0~}K;4mdcs4-3`Sy3fW%G(g@t5^vp z#L7#lZ`v?5lgw*uK4UUvaFb4W5+R|ssfGS~5fnDyE`RwPlrXR-4`+0X7~rX0>Lvcn zI-v}R^!vz?0FobjmDt-_mN?+cWEim|yXnQ0`>HN@z*s&}MA#EOlDFx~#Pb{!J3uWD zXK{-k&|*?Hk|EKH?R}9rB?Izb@(|IwMs_?d$ z;Og`SUUp!h(7qk_3-_8R?`f@tNHxWmmZBXI;@FDl*-;8}ffH$kUbhg;w;#J$IZ#^p z>f1Jj7K=r5sXN<`yH%9z+|LSP-ehO8s5`5?Y}LwRU&&v3@ioETHkYIpFDpPmR`S*a zaJx2tRj~aj1N?4_ySFATWP7D>h;S4fUN^RV#fuDJAs!FI{m$5J9uYA+7z~xC^EacL zI>TPqgFeZ4FdPx{kFFa#>>*L4A98=b&NIHYqN+c8+F#N1Tsrps*-=U(#sheVrb2=M zvUeks8)^m!AYUjwWs^m4os-1k*KPw0(5%wA$^ZDt%*4+E;YchR;&1*8b!0#3W zP*$Q3iWVU--xkvuEV{!|WWL5RhG7e-8DJUjp?oOagtbGZu)A_d!-vdjYFpHxF|_U- z^OU~7&J315N;umxML%ZdB6y4`rl{WM?N)*bxoM%6xvn+2DK=%Je#}S5kAJvS#PIUR z)*b4|2i6^uJ%xg3f>*e@OHYpTDj!LpyobcJ5hBLYJ7PWW+Q!1Q>2{~DJ*miSbY`wS z1V<;!YRb^%Ria$H%T|WM;z~ZND{0d%@#Oh<}w~h<=V41701te~vD;NQj)lcHD z{?2RojVk`W;#R@&AFYwc5pYmR@HhBirl>D6FY17bTD?pgY=9V(7r>0Db@h32<|tvAl^AG{dO7zk zY8HnsU-F@oomnmGA+Fo*)c8rj{UrL!oJN2PW8TGbR5lUl{b*9wLrQ;{Yaly(Y%!tR zLVX_F$yYa~rp`jV;NI`8b3akB;LRRb6+XntM*fXVj;r7-@BH}XWRE3Ppc5-wK(g(d zDLnfaxREr=9%c5iB-Nv_i=(eylnQ@LYFB-B+2S&z)>|B-i6O2qVXx+2&(%3tei~wh z2^9#=_qu|MqcRUmo1aP+wae|TB{8l7AChsAud)A{#_UrY+^EF<1k*CBb<{>6@Dldc@Ky17TPv+V*wX8SJM3+=msQYl8c(M~4 z-##tH2%PB*-w$uSr93d6Bw`ENnMYw?+x4A&p?{M;=gqn=au-BGV$ccKQYfc5Y8Swd zxmuPkd;GVaWsgHIh%iuZhFWnPQ21LAX$%u)i{gVcMf!&ozPAJbXcXp2m9!qqyHcI2 zi#Iz(ad&8_iruxUGf%niHk5nsv<55yzzx%gG-CVvhh%h%rbM?APhCWWAY$qzp7QK< z3h)ICj9~C01v~F5{O;44Vc~?sS5M1M}r1=8*sU83$xJ{RyjLKiPCb~1GJUDJ;mkT9MU(5hb3nm-_P52 z`vvg#OX!1R6t#TR<<}JU*?iR$#+er_>hw$#qC41HL--}$n1)eJ?z)jH(=(MP%uo|V-sWgwBvM7o@vZTG8x z3`7C}CflYfNJB{!WhX#gfIUi3SLLbUVEM?Iwaw$X<$`AC3SQs6OKW}L%h2(*0LpW9 z-#9a}+^1HB66>Z(W4d>v$(Nvrdso<5{P5e9BDO}T-0R^>ZKxbpl*lH6kxIbRsW_0X z;H?8HESHROE38_?F|*o-C!W{|;wM-u6R=3Yml3OYNB+fC0A~2Cv#yvm>ap*z47K_` zsXQ~3*M0nLeXU;8xjZ`5qK<_(<^_iDg0*Ul^MhFEi7H{zlQhG=NmPOS=uyRhkeqPe zqX1OQx~(6ssXB-F*A%%{JqDuUn|g_m8sTYU8*RB6>VrzKGhHDLyx_ zUv*fMc?ub*mCaDbo99`qQnXnZ&3RP32oE zF0@uw;6g%Mt%*MYf!XPhy?@W{$WayZkp#bXAVjXE{8DQSMSjNVH7)6R_&QeWyV5uq zw1MrJK*h|s`}*ds4sj>iZ-TU_bYR}t91==gF2>iE7dB`u#b3m*IsYuLvC>gIQH2S+ zL#gMrki_CqMCuABjK;igqQ1XfiCJORzD&Ch;15ZO%PYKjH^YCFCS9;$0q1dmB}4z- zT004$@E2{m(q`K=h0=mC-#qcKej>mtb2W&ahi2DbN0-wtJK63RwOCM84$=`XQ&B{&~ly8Yn0*Cb?Ox7?+0`$RGS_w1*+(xA+5|J@m$#wk|xD2~@Uymfxv+CqMJom+j0! z^63YZFRt#>20|fO>K-SXr{D>n0V(let{YohyuxN@l*$ahug)5!A zhm+Zp{Aj>{A=(?=qE}3j+JV2(J{uq#@}LDON`s)es-v%Uc;pK_J)Sm{s*nQmfL0ti zWo(CEyVBe3Z60)jPXp|CYN!qeeS6rF^+sW)f=k!|{NB?eakAYz3MXY{Y*HysyamV? z=c7A*&M{Wf$_a0LD~8c)0wN2(=%(HN6Wc$fv~zW|%XWxn|E`k4m;zWJ0n}tScqH!I zn0nv4J0G$N$(KhppS6GJP#)F%VVgtXAV;X88%}na8xU}r6!5veW&CoW_(m`lh*`DyHKt0H%{g?ug?Lx||j0}NbynmOkr2VtG zb^$oOHQw1MO6%;=+`d&!0%nGL0aDawhM+PF0)*+rpKE%H?VyM{=Z0$0?(71vf%3~v zeE~F+)Z4vlg8WdB%LDGW(;|gClZ9w9xb79V2(uw8?`IlO&x%HKGIGbLdTWHE>Fw5S z^>_`oo)|&whUnX(h}3jWzB)#0L7t#Xb994$6*lDGg=VjmcjVV24Tr>TDFRnHCiH`m z7v&PQ<7(?`)-XW#jKav|HT zltKCG(2Vy&t1UW)h+3jflBGS!l+<)w<(nWs*Kv2K+oO(wVeC$JLB&T`9Kao}`<%M5 z`(?qFD32M2z)p@hs=gXj)nqIK7~Q(<@3r(RhUv>hQX!_jt33E<`7n{96v9vv!MQ}t z41sVQY2W}6n2_n8By;V*(Fan?4-!cSr=Xd`mvX{SMO@{iUNYoiMrA@7H^TG?Sf$9m z=NO1|dikHK=xjFsulSJuzx);8H`Ajb{blB%bgYMr#*l>gbFqikO54h_+=?5;!Ez&n z^pk)|n_BtW&>+$d9Cpj%dgOa_PtpIK{x5f=@dM7u88_tK1cLbPdAY)pEBZ%{+FJ+t z(BB=Dk2nxA2PXU`y%GM^^1f(^=aKr3WCVjEv zIruyBj5d*=qd&NEDRU|Q%Twb!{m@Pm^Psm0l)1?3+bHP7N0;<1wz6PT5t79hmj*0u z(yT&qjqHCgN7^)f&brwW65{}BOcrXS){6XLW&WwkycR2b#Ztw|1q&JgU#<~*@uaoOt_j^;H zLu7)4Fj#w3-6&GJ{6@xiz3Pzd5u7A{j(LvkzCSbo4+&?CT6YuU^=_UYG0}_KInhnpoO;k?P80`8Z$Z0LelU)H( zQlcU5UwE>PYP;_NA<(A+fmsn(KaU5y?@w@ZOYKlN-@fauehj%YnQ-+1^0^tDhICGb zKG+!8=(}-Ea`AKNTM&hhK{~$l-kAVAcKTdO+?1W#$L%k>P;{JC;9*N}Pqurk-6CCtnWbBGW1Km_f8SXZ-3g- diff --git a/assets/font/JetBrainsMono-Regular.woff2 b/assets/font/JetBrainsMono-Regular.woff2 deleted file mode 100644 index fdf95dde6b002eb2317b4c17f534b307e5153932..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44688 zcmV)KK)SzoPew8T0RR910IrY#5dZ)H0u}540In^Skl%1nUhM$D&bc$Is~5sp5hJ5$TL~Y zt!P>5A~e}lQpB8uJ7(aKJEAAMOlnotvx%KxT&jpS?D1sCZPxEJ6lS01+JJ5ui6_|>t|-tK6B$szQz z2Jd}v0k0ew#$xcWqI0rG58?%YBFLO2jIAE0!|QDS9(b~ffmhB(Oh>c!z`u{aaTDKc z!fKv)>xrLAH5uJja=sZV$NaF)(ai^X<2X$N3n5OpfFoLZ`^N$YI@A(?&)qw^d+`^D zd=VUPxMfVKV|j^=4NGcnq5|?3Pr)V(!JhLeID4kPrd%-Kr=tjUq>oE2d-}9x^f1nu z)U}_Pe?zu1tJM*I@g%+AdCz;`O0E}7bms?tZoT(@>@SHt{-8IwK|3Oy*fpaYyD(}u zqcGuS46Cqa6kruLPEv*waRr1}ce)n{@Y;^25(Xcg3Y#l2;&vN%(`;$~2M8>!ybqqH z8*LS)L-k2h1~fVgf)mnTdZ}OSb(^EVeJsUdG?MX6re7v^sa0j}n>7w)Ku89m0-Ma< z0jBDKd$G8jhQ-43ET_SR4(AW>Yx7U?K%xXLiVKe8j=SdI-etdwdc+Y&90C#wsDL?> zZD24Ptm&9T9lBx6VpFGXhlMe}V}&`z?VBtA`%?8g=iKn#>jprDNI;~5WB7E^_k=7O zDw>{v;tGUAHCrp2lXj|}KaM!o?((;mQ zvf1*>^@=K881xEDX~Ls!PzVL~gwa%lO&B;|;s2Ye{e1u_$?uV7($D$JRyzSmL>#Kb zQeq`DV>^}Hh+An}T1xdVy{)F3bQ6zgJb~kJ9A{zB00SCHgI*v>CMSdUA9X1YQ_Ctm z^%ux4P{1V_uH}UFi&Dzz|tRSK;9?3B(zSOD?O?rOY}R_CW*YGbK8 zv(kNCORcogpuoJK;08%vDE568RsagZf&c&Rf6Jd0;Y{B-Bc9>!OA$n6_o4^f+t(&d zSQ6EU^7lC1u!CZS5GY4N{uz2T|nZIkrr z|KHMPclO^;`n%f;rBLmLovF#3-1lcl0*=m{&`G*`lDTd|W{oP;w z4tiE8{_XZw3iH&dzvw!`VpDRJg~ zmt#w8{;TFV8Pef7ko$y)KWokRD_t$H^O(6u5@Ql021OA{(=S<-rsB!4HL6cpV{Y?OJMuiJM(Ch z_PjfjS{huI$5EEuT@7)6g%m-J-7dd;hb1jaX*eKsW z)&1c!*;s5wZeRR&N#w;P>RXX*JMSC#66oedBHl5Qx;Q}m$eVQ#J-)^AT^z>yop4tX zarbwm{lWC-g)=<9K&U4YOHgPG7Kb+=5J_YTl|}~`Ad>~LIb0rJU}$7)A~Y3=B~qE0 zT%lB{%{5vWEhQ}jVz9Du@(PMd$||aA>Kd9jExfjluAaVup^-5}Ai^Xvg-W9{5GISw zamMBG1tvmMGjj_|D{C9)nlx+Cs!h8Y*DMG6?RxP6q|WG5@B ze5PbBGBGB>aPvCOjRl}ZsJ3)zV6SRYGR#_oVcjFtPm!)|{|9ONRd;{x0qSLD`= zG~H5LZSyPt3>3*jKH@vx!&`iY=NS$;;utfT;*_MKv(`&&Nh0Ya9toCEiRjrCc9nbiC2%Wid^!1ASd=MObr8u^I$>2KrA+BMX>k6cmFqS<&mzMkqhoW1 zaj~4{mnX0VzCJ}n)R(38S_jSU!d2-)c4uPVTX27W)PMH({{Fw(QMFU$_MI*|Erj&L z>FU#etL>fs|0&2Q@)}9W6VgIH%p0DPUvR8*H9EWAi4x$iCxvE{xj%*MYOS5AF_U7ZP5^ zNAOqp1n|@OH{QmV4RS^Xaf4ff;@|`Zy9QSWp9Gl@MuCtZJQKErqr&5<{3?2iSez4g zIHFm6DDjeBdLbQTd!!ucu?%veyq#K=`^rMzBVU(G6osObbBbQMqm-4iDBYu6S4vc& zRn=Q9)kmtWKByPfS9efLG1u53ISzw3$^_vFzno-7VXC zur6Bf5ILZUR44}BKzVeAPNG6oQ3E#EsrD_K*$)%i7W=7PFKCAa>O}s91&lMGx1S8JQAOcUnP_T1W8QtAQ8!d z#76od6;fTgO$nrv)2H-STAK9}WKr3@%*pm;cZ|dA-OO)xZD!AYW`z^X%JTjEMqbYE zCdl*h4~27)6^|w;#uhKdox)y@law*#gL1jNg7QFltNdO$SK|q)wCYL4D|2;T3adNS z&pOsgeW%XXN}W}k>&x{Us7vR0zL_WHM;V##n-|Q#HZt4zGEWSOCw{FL*$LTAg?XuI@u^6Dk??qSu|4;%AUeUp08}G(93fdrCw91 zIttL4)KS$=NyFrqoiwg-wQ0?HcBRl0^k@2-&cj(<)a}JKwgxjk?y$aWd^f$##(X9C z%~bQnoHXrZr*+$`mZgQX%e6z=4RlJEq$|{w=~jSlo9?`BTA!{5w29uXpR4cJ9|8T> z`Y8k4Kr`s1(cm_87`7Nr8txnZp1=qG=-V)-o!FNrsbv~)2%j5 znUl3b%5s#nz?P{c(B9`VdS3g%wuJ}mt zL^0m&q31CRwBS4-U}IV$&jxC+3M#560@T4RBSGX9ejLhMt1bU zVqHYwd7rs&dh@0hAVio5ncC=Su;E6VYNlnfZMMrkIZiw4qN^Ty<7*s!PIxljbo7#B zYN4zCMw)E8Ec0YDIpDC%?tAQ&681Ryg79Qiys1TrlctdlhMI1LHMTO@<*>`{xbIWS z`jRDZ6+kCSl1y#%HPCo7&9TfXW_hl%c@cYGk@2RJq#2{3CNpQ_#$o>X;6kvX)(X1k zo(vz)TNr>mP_4N_U{>Cqxe7q%<%A_H3X^ozeJ4RLLbxRkU>X%2!q5yKxPCRAZb6ep z35N@!(<2HZ0U2UBnFpB9)FMJzm=oB66z3W9A;^ul)aea2rhDbE6z5J~s6pz!B}yTz z8wnUjlzlxfZ_DTD>d!3QhA#UiO-LVq`^}z#)~zu)dzXV3v$qyBBBHK)3zxn+Eqytw zR3kRghwrbSx%5}(fXXLimSY^}?yL(7aM_k|G&I&vdw>tv0D2l-su9O;T)KXHZ*T-I zVvW$*KKx)^+zi#}6KEn<4PB&AA8v^<5_$TVjELn7oU7~?HEOY`s)|}~6{5yn1yi`X zs8yl8T>apJ ztIkO`CeF>?OLZP!pSU5A?(t+|)zC!>b9S@Oiiw{?yywXL=Z^9zSmR=N0Q;#fc4=cuY$xb!B2Q zG1;&%@a7F;Vv_l@?=ou!lYs_e?Wwy>dy4l}Xe@e@;|Mni&8JZRttk<10+W!;0iEs1 z>T|g>{)^Ff+m$@;vI1URFp6`p1x{6$%&h5ezX1Uxqyr}EN zo&l}jqG4&gxSD^!eR1F1--gr|c(SQvSJN_W2oKTSle_Cf^ zCG}X_+FP&t(1kDD?pbNS&~>bRcvoM=e?#~9oym?{eYNC0A1tuzKq%Gp&Sx6nqmS6K z^n^ZK_EMC~ei@C6EIGOl`BcwdQs+g@lg4@;$0v)ESQ+`c-^Rh(D`kr*zWa`qA5i79 zrL+-Wk=+KAX>OVJoogTtTts1&W zp}x1Oezaed*>84{T2$d5%*c^G7xh9y8GU6~Z6* z@6`20M6V_CRmgaa27-1Fw1c3XMQGNqiFDbxXZxiZ{PYu>e>WR3UIPyg&#N$BBQGzj zz6K7vYy3~s*W=H6{Mk;lT;oV5vsraG9H{1Aur6>p!5M#j`Ot5@enZw@_1U|n_vmoU z_JQBGPhI|{9m}yQ?7zp8|1j=9wAtHNt(fft|1N)BP8zN43VKZZzh3INLftjsa-w>= z?7}bnYG0T48lkh}sJ}0yE(g?YgFfm#SyJ8jsW^GB`Vi1X3U%6g7Z!Q?m@u*2pU(yy z_c+cjtU4!+V&LGkxtujZXU9=L7hacrn~lAho9CWm_e4mZ^QZ5q9{-b&yzm0U%U446 zcmurh3iG0o|13Go2 z9~|j3vg=XTe%?Xj5FN_qY4gFjemq?=rB3E3pz(vuk-=x5v1gJcb8lS$J^s24FP{ZO zXaCL$4`2Cajm6xx0baiI!v>`WJ2tJUkM%(m)QI;iSQ+{0C+2Ko{4MV6g@hk|^3xVM zF|aW3%P*e7DTh4qoLY2NtF5Q(z$P%+lDYE$e8$gR9Q@e9Ib^foAO7&yF89Q&MpWF# z`h9)%+?7T(adA0O3>+K|R0uMJBOJBSZ+{)M9`ED0pgMb`8OE>e-~pl$HLqbzAenp8 zL9yX`OrCXoVYS4YQxEXZZ5%jF&)}2EAbxEJc954B2EASZQK#c%Ih|E#Uk2Hxp!>XZL@L;78v1IihPb2!IwsEb|xI#5?ad-WftnJ`0I~MsBT7~+bR^ccJY>LXCK+?d^+w1%o z>$m?saJ=lUtlGkC9`Eh2Qhz_y;AsBzz=x-XfhCf7!q3%8K#&6q4{~y;6b4nukc3!8 zHUkoJ9*)EjBThx-o-$x~Sk!_+_!4m1h#i~xG?0)KV1kcj#k>t370MZ&4a&3dm^mAK zZsIF(oos>8Kt3=iikR*Kl2%XLLrXRbXTphn7JQnA)1Yb~JgNTORg7GK^vGr#@VP8G zwGLT|qPcWSB@4u?o>2<&;0yaDR$&dOKgwJ{dLY2w(`Eu{#!4wB zp5f5Ow2B^mTpS+UieHc{{4iuMD$P@rY-3`iNZLvmWizSF!&^3m-bi(c5$vqojQ_$g zY1KL>cT3_8;DUlMQ!U(G)HnRlt{!GjSTZb+*=3?917mWZ75~3L# zXE+U!-8MXKx9js1!v?|-`9N+JQjifx*N-o!Gbc%7#%j1H?B9DRMH^R4_VSTBx9sXiy9~$t&MCbZB=8U8;|wv zM&7)y5U#RU*SU!ve7}Iyl<5(JwF8>`X#yXw{ZLo2ac$;jDE4DYc>l z4gyO3o4BI`9pGRfQF%sps+}PULar+oU=v2{yiN+}fi8#}7-Q+kZgv$ZB~hw-PYq-5 zp7u!cw!M3E+TMNaC2zYr#nLz&GRY}U_F%g4Xva8#8Bm4-2&Zea9k6k5`LlguUnhbB z9CH`Oiq|?;DT0i(ipX`sB^rT2^H0H^Fwzhh$vG(@agfa-EEJe`;L}fRHXbm+8L(Hc z6tq$Rrl3>?U>m>yNUIGP1c(Fx=1c!1CDLkV(Begr%makekQo4^vw&q_|J_}u4zR0W z75zIk)C#EN(7sEodT&^f*r@!SX*)+OU75Mu%0;JAF}J?=sWsl^)e@izC72TrWtbNU zMaaX&_pYsn7P=7qdrI;?80@jnenW0K>bO%jIBl~t&f00rIbZw61?OG#t&J|b2pDo&I(dv|=e z`=gtqDd{or7lS-m^ako?p-yeu#npoz(WRw&bF~5hcr7l&V%_P8j)#ViLr6qSKuQJx z2{jFsDx3A_)o;Kcf)PYGzFbKF)H2{;4FH0n2H)hQLAg6RuV_a9o*U=6@?}PW-)xLU z>>(nK?tI`3SRernKwyIq#GnK#kO6rhKn^$z&KPb6Z$QH~#>P*OAP=3OE99UAH5kD2 z(1NpQ1>K{9|0P)#dI%4l;AbPAi+{YQvfc|p{AD3~fRMwR^%|c60#E?x8G)iS#~+1D zQXD>mTxj3pG7aLt1GwDOr=|b@zeJ)lb2$M19fzLV*TEKe6!vVtYy=K|0Jgrs$FK>0 z!5?ISo7<017PDm0PBZ7V#s;HiRIaKG4L*-`@PQ|bI*02 zS#``~ANTkt4ER`1-HVL@0BU>I!rb_r%`cA*N9BFr{;>n#^XB$P*ZQDw@TT@m%1Ut? zzkk@A6yRHy(0XNV(VApZ8f06lOq-onZQ4a;MnlIK|E0+>&9Qwve1hTGvto&4fX0+l zh*#u~(T8a16U%HJBNHbsLdE`}f z4UefbI=}##WkVq$=QG6SWX*RTU(gwg=@SEDrP~a5ApO@cq8=kvx2ss}&g05y!=^3g zPTl$(I)=f4#b-buAi*U?q(_2+#)gB(V`M@mWF{t}p{Amvq(!l6ikl17q!l*fJe)>Z z&^WNrO<)+qv|!1!8FQA+nzzW#>c;ERMF9f4aOT?Y$cb$`Lk_jSL-=iEY$7ympl;@C z*QGo=yap3;`K1rxCF*0V$|`CU;Go`^8A~yghm@pLJ}NZ z5|Wst1SLEXiA+?Y6O-7)rB3vTca|$$=_*&d#$#@}qK22X}C*B%?#1@W4175I9JsOKxy!!OU% zW3J!A1nIR`GLL6Q7SN7m&sa-z#G}#-ATYRs1Wu|+K?boP*dPb`1HN-%#??%T5L{+E zwVHKgF@$l9TT?Jh@6FV)r?{OCY%pMOZ2D{V!pI@ooqgmQmJ#BHRMR}^EXIp|Uxz~3 zY)FjY!JHmRKPsfgXKT=d{|7_lU&C5^Y{xBC@s`|mwk4g$v?2&GO1;uGW3ACW_*gsO5YaUa{(O|c+wmnl7RIDb5 zBAa#N&;X?B26gzEpsW-bqf=XW_C%We|DGZr&MqkOx({n6Zm1Wv3bqd(x#5X~#%cvM z=<(42CSqi6>?J24W-gL76H+@TsHe*D08Dcuj!%U&N(DM?rOkF|iih(tcx%GeJ+w-L z0?o5OeiE$uf62zQf5_ z(w>0a*IIye5sJ`BzmA@>FPU076MEB48)BlR4+F}Wqk%9nQ$|qLi0`Hu@RNo_;{HIY z_Z!YJlffFcJY8~V870SFqRbPfX3-jfRG$nEX0bVQZpInlf|6Z=%Nxkq!j`{DJHDj_ z3uNSvUZ^h+8VC#x1&&67K&2p2B`8!28tn)MjRmiF3lTy~s_N^O7nGzglOaRxZi9+q z5x@0&#bx~c&;&D#gV-LOan!r{MjH(irWiAfImQBGiLt_1V{9<)V3&QM(pnw74jfdd z#f7caa#d*I`-&SmL`4auOg|6F5r7dynwGdE-o5%(ZYW2`>?WO~xuSL}@o1W*?zaA^ zQfQp8rt0?0DewyGo-@pVvy|mrQs7bXQX0h}zD%fpCrkNMb2=ZAbD+Wq_XP2%B*VZ3 z%1hProG!aXNLjIU>=iPTy*ns{L(EKO;B)$QQ`)i-e$TkxvB?dWO1tMD@{!jd3ij8i z@SS!^y>r3W6eoU-JG#G7a#J-|U? z*Y4A=nGN-*w9JQ^J*w>a@YmlkYiECCT-Cil*N=kNG26t)z?(=DtMuzU(`C02R4jSO z(c?A>-PAMJ6OrM_$cD$!f(oT9!7h9P*`IOO{S2$~kBgB813lowuC8RsSY3%*#SuN( zi|a=mQJ?b26t_}Il*r`cr}fbUOjdD8;Vv? zqD!-yQ@iJ(%$-8_`|1S@K%?MABfV51;bqg=EA2Gdt39CCDkQva`uYtV0H`D0bW&F( z;VoCaT?2@Bob+y$g!f!$?=MG#ebC$K!wLx>na)0Lr`bN~0exB_L4alEHcsFUxC`!q z```h12p++6j4Tk|JgIF%4UwW}8N>TK%p^}PX;+bWnW#DoAyS7l;k*U?J>77xKGBd& z$F&$!#2m3itV5tJ(Vk2XD9#yk#oRH^5a>FuA9z{D3UF%2Jso3iP;WX@lO z0nd2I{p3LX?~#}RsCI%E005H=41gaCjClbp=^?U zn57%VnbC*<*GlzKSR4&!$}k>-EB$R`E<+5*aP0)QIfI7rTbwV1^M*kJ8{}5P^CA6C z#I@I%1Uda9zbwW6Q8d_7o44}J40`IY;qO&XR8 z9X@7A7+BlOVrr~WmoTrV(EPrRk{x8jMuH`CyjWHda3Pf&nCk(O+ks||)r&1g)a6}u~K zrA~`8BfUA&F`mqcvQ#rmXKK&w<|M__&ALTPwucw8Q+(PNDIRhQh(IEs*ik8!L4v%5 zMn7|=M+7N?bJ?|eTPrv?MX4eC8m#X;gL9mK6cGWO*)7}MO>57S7QVEjFJp;60-94X zrf`g(Iq9{*m{xS-$fVVZQrm4i?v^u{j0?u^Fd@;GHtSq?LgR5S=4wyWsh+?4MoHG^ zn|Yl~%Qh((WLU5*o7next`0TfBKKQ}>|158gDI*NFP96QA#5<-6d!DzwVq`~qf@Y| zI?Y<^)*B;!-BhMX%XuBkzM7gm$kvJ>k1W7R!tUrQAz|9zhYv z;h^9fI-YP3F1SV(s-oED3wgMX$<)~a9hTLS@9*bjwL)_FqGnTnRbN$WOyX}%>WQnp zskGMC0{N-jy)Ff&AYE~pwMv1bqJe9INOS-zC5u*6--#m0vD2>X9oM-CKeL_RzyI*<#CpZwonoyhcP_Grb((fqzUk0J>ppytPWvD_W{}It{f1 zoUPN$n&BC}N`b)PkRn!!uF+zNguF~|L9<~y{Tm-cq?!>Vv4o&5;1~9TCAktDF0?OE zHBBsn2W5P&#R!5z(-a0IIP5qX3MCjrpEom9WM`GaXSgiLMTX7g9G~zUG56oFn*v*- zZEK%@2rHIX)F?}xW1oo`=urJ_3aP(UA5Qs%rJT2Bv1y-2g7ne^C>vsqRt(aRQYKfq zR-(+JGJgy=c7C~NsT<5M+gCC$sc}Lc^@g1rRuxinSFsGQG0z8_wtMYW))SvNQSySF zQ#;mq?1afrVDylS$96}Of!akB$Rndz8x2D2Q&(|Wxu~s!*+%qUWKSYLnbjhtRyr8D z8#)RB`KDb$C#x-9utQbUz9dVLa-Z!XyxwMnHgB^}i1eJp$-KV5EmzEfJ0G(q` zPb2`qTN4?W?gfcSBbYD;ao-JBoL%~FeLp~(%4Tx0AtDL)){?7y z*-82x37cXJ(oLLX(x~1gHmnWmt~=|pNQu$oe3L++0~!CZ0Iq7d8%1&#+Tk#^F-H%mrL9)x#JO|xPST$9F*X%Vi46iCT1&F;fGhOHLvulss zJZO-VN!PLCw;n93B$*$2(hauG24u4uqRn$9j zouvEiz;<>C5+ObJh&o(g{YdT4$KWCMJ-CfdR7jPBjID3V0EjfHBW4(Tx8d6UBX35S zRaVjhc%sWWD!sjBbYvvB7^a>5mT})m&DU+}Nn_F-kb+gnBWrdB%1o;lL4X{pV(;H9Atp5jpDT%Jf{|Hm z*NCPwryfNV)sy26n%3C`c* zCASdoF_M%3PR^ zEJOETG=XVb+g0J(C+c1Id_wmGm9P5uGpWp6V6IQ@oWzN8RF+A)!FyG_Yxb3s=M$`wG}>;i6k?4T*$IJDc{=AjUVg1G|MLY@)_mmPzS5p2cz?p zlUD;i(G6KO(fg*lY-}*X#&bYW#-RH*1&s-r=OrzLVL}lEo(H6o(N>D^Bb8CrT4m#P zOMOO#V>6SK5A0=}Fw)V939D0zhSxScWGAI$YagpNwAFqy(nr3|QSCQUflYl&QZ^)$ z3ar+dlehme_(Y%Ol%AaWjRc&}K`yTrv^t5`boL!aDt~@RiQDCh>pRuqH}|vE!ts<( zN&3SyB7HX<{;4q&(D)af-@-~V_IFv|X?PwhhdMT6Ehr_Y z**h(l9�fI`z+*nJkn6Iz@+GVb{Wvm~3SF@bMQ-TedUwtoOt+B5P zC7T{ogZ_Fw;{+T+A2s$~E@8e#m4fs->xoAUPu|C3xZ%+3sM$k?&Mbz0w@BMgON+)* zb)l@zFE(N?4bSCi66!)AEo1_XeFm{+&d`kTAbC-8H@7i9&u`|7<}(bWUNl=yYn0D| zWBdrVv6=;#A|PYGH4QyP&@idLZen2U@iFkod)A~WNme?~z$*IQjn0Is&v@L$r82;x z;B@}D=EPi?uqin!#-VOfTmw#dG=9ftJfGUC4ZJwC zqq)*o?vsv~fz^DWJ!*2v*WWU~M}p&eZ0uV7t(LKAHk}@8p^=GNO=#s z>!@E3wyX>Q&t0|tg$j?8zd!kX7gmfoVXY`>SVcr>_$E@aObalJ0iR9&P9O$1=0c1h z1%_yWiKsf5<<$uDJ>>84mYp?Z3>hL|c zqDB@3%@ej`@ZdOIJ?|JDr=~Nvzl8YK8lb@ixgvtR?Mj!#@-2ej) zr6%)QqNRvw&COgPZ3U7KE&PHP>FD-6xG*~JR2A}Mb_fvgWtFN|UKyI>G<%~OHl}1U zvEt#li$EVJngGvJD#@`_e#(GzQg|tppv!VKV*Dc^w^XJq@q8HESrU{FmI^vL846b@ zjq2eCa#*(F(Y0yIzN)R}3wdxiNADdN(oVL>*^7nWrX);E-r0C zMWYedA~4Jf!yWS5q~@2iup*~{N!9g8_XS4DakOiDwq4baDtJ|CwB5fJB#M`xvLE%h z$Q>!Uq%g$5KT5 zo^Lg6F22!eG^2^!Nb$@@rFnoarwHY+B^<*t-Fp1Z!nVci*1gT_|JmTUoU4w^HDQXK zp*>n8fKV748jE^CjxjFiL@#R-74LL{z1`^z=a;9ouo9mYg6#>sbk2%s90I8amlAn<%6X<6U>_hx`b7Pa0XmP)#{uYeWU1R^84z) za=IRKCT%)MB8B2uq+vRsA4su5PHUZqrS<+OiUF&@TtyB?)y6AkuKYNWLy;>ONDoY$P%Wd}-A0NfVL+jJO` zm3};e@5WEvmWZQO^P)G|jB63a^{6W~PD%A9!*}8#j*O5?wS9EBeJ;9nsa==BHn^oP z{ETEOlw%FPRMnoHqMBAapjUxWjfPa3RSc`o27U6Bhz+tf=CERY zBUCu=&DhCJtx?9zQ)7;KTBeA1X!@tj^Kp&)gjunzz!Vdt{h7!YSPS{?scU9EUunfW z8?>yZ*NP-+k8X2@#!6mfMysqNyH*NtfkgQ+Hzs^O1=29gn&sGcM7l|-b^rg3edS$I z(Hsc(J3U8?VMcrcf|sUZzZXF^drskQskeR^&H| zP?SrhMplhoH359A?jWwSOC0h_c^YXr9F@t#na_(iPxzQ|JYPoig3D>_^{gN1ZUUp< z{{+&7fup^GNdEYnFe)iQfzd4(wR_0bZ|D$X4a;J?sP?g^$;of!#IS?hC8G}ZNP(?x zJ6r{rW>8U#bi!*X0I>MDo24{Ml+==!=wE&LBu->9VSEf^#d=YL*E$KfGLA8OLef3u zk*;BOFe?ov(0fbUqbjKUE95~Om~wsSq;)5f&#Tva4Ez}I;P5qDU-IM>&_2CB0e6x7Gu0Q$fd1 zG#Kn#S<#Ybp_Xp6g#+-A@ze_=QmtG}!-XD(?R#3#q-HxPXsB6$)dQe}0<9DDg5(oh zuAFwW@?Mn{*F~!q8LW?xG)?9~hkaL;NktURPCD-%nx&mG+;3mCD4ekZlX<=ywggTT zM0pQzB~OnnYjdue=0TE?mW~&eP$Brwjr*117?I3Y(1`fP-;Nse86D)}zoV9YWSU0F zGy|_1$?c^BC5q9x7C48|xFnrs#lT8-QiYzSJM!IFOnc_hY(1LT z8yC}mZRWRezjt~fYEW*(LIvxB&&2orq)p?eb5lOOdpe7<;pIBNk8zA(R=aO9*Jv8! z`#hAJ><$V#HCwu0F>m%FyW&AX3!YwKI+*Lt*^L;S#i-kX-`*EIM#dqq zEl9MsB_aHzDK_N-6aQCjl-GsJ!LR5d2ri0Nb;o$cRr4O(N>)?@3l+)>7^>WGvePr1 z=foQNj=0p72;fP=tievh8FE2d9z&ur3#EW{YA*pE%+VhwN`I%H14JBbg!PV#(ll%I z<$lC~aBh)^rcAdavRnwRz^G0+MMsu(nmC()KNv!uX+<%RT?TzhSk|Pfx z-Kgtj9uy_={CuBt@N2vfjyUiKB=!2s5>>iB*&wYLv}P22RLUJ3+owcJjrGkJswWa; zfmG7eo=c^r(lN)2=LcC+tWEF$+`6*(DBaeL3sL2%8lwvk|n=<%{;50a^3tgv{DXQ*F{XWwxmBgF7 zOADpa@9SibI6xY_Zu4{wT!!)-1V$0n-c7hsU;V< zzF>~NbGRp+_iRE3^32^M9P(A?YZ?$_(x6M7Un&ugJzs25H$OFQd;%)iLyjS?GLI!N zuo}3GNwnlvhI5Qw-47vIxn%YCsx`sW=@w_rSl%gU@ybGH`B=}TjeD~{pf4)-#R6wF4lDukX=W*8y((#>+4ZgwO8K(g!PO}my zsX58l>PcgJ0N+5~DdXeRIC+!K)rm^`;gxo%$eamb+d#_^9f3KNI@oSK)TZcQcRPj0 zXZx>R-T&}><9gY+zJL9OT)k$!zKJ4_nOHc8w(=Zf-UTg$gU4sy0MJ7H9F@SXtH;&MmmD$JXcb9_&i+B-Wv{H4aynn7I3{ zS-@fZ@RA}^9I*OlP!D27Z=1R#vKmCDj0J>PWM%W|2)MN+YM<7Rh3q9Pin3VV%FnA( zzouhs)@d8F&3+(0c4!R0ud*9xeYbj(c|B{-T<3FJ)Do9#h&~5=)lgVc>SV~}+E52|Bqz52%wZ%kcQLO2bRZ2D#N3}0% zQi8Tl)c2n`OoC=Tsr79ojm%ez+OZpH@a_}iMnrUUyX3(8K^+Itfa5wUh;X)bco>Mn z=2n8^g8e9r!lVq8T^WeOxVvm@zj;SL`$$m6bPFIKi9NvP0D-%38FuX+WVh-)<`REt zn{Z5#K#B*qNpKY45CWTD;33|06CiuO20Nu2Z^l3!9E7+7&cf3kxZX0jEY_iZX8