Initial commit

This commit is contained in:
Shadowfacts 2022-12-10 13:15:32 -05:00
commit 358791d1a7
268 changed files with 29681 additions and 0 deletions

2
.cargo/config.toml Normal file
View File

@ -0,0 +1,2 @@
[env]
DATABASE_URL="sqlite://db.sqlite"

2
.gitattributes vendored Normal file
View File

@ -0,0 +1,2 @@
*.mp4 filter=lfs diff=lfs merge=lfs -text
*.webm filter=lfs diff=lfs merge=lfs -text

5
.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
*.pem
/out
/target
.DS_Store
db.sqlite*

2798
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

75
Cargo.toml Normal file
View File

@ -0,0 +1,75 @@
[package]
name = "v6"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
activitystreams = "0.7.0-alpha.22"
activitystreams-ext = "0.1.0-alpha.3"
ammonia = "3.2"
anyhow = "1.0"
askama = "0.11.1"
axum = "0.5.6"
base64 = "0.13"
chrono = { version = "0.4", features = ["serde"] }
clap = { version = "3.1", features = ["cargo"] }
env_logger = "0.9"
futures = "0.3"
html5ever = "0.26"
http-signature-normalization = "0.6"
hyper = "0.14"
log = "0.4"
markup5ever_rcdom = "0.2"
mime = "0.3"
notify = "5.0.0-pre.16"
notify-debouncer-mini = { version = "*", default-features = false }
once_cell = "1.13"
# NOTE: openssl version also needs to be updated in ios target config below
openssl = "0.10"
pulldown-cmark = "0.9"
regex = "1.5"
reqwest = { version = "0.11", features = ["json"] }
rsass = "0.25"
rss = { version = "2.0", features = ["atom"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
splash-rs = { version = "0.2.0", git = "https://git.shadowfacts.net/shadowfacts/splash-rs.git" }
sqlx = { version = "0.5", features = ["runtime-tokio-native-tls", "sqlite"] }
thiserror = "1.0"
tokio = { version = "1.18", features = ["full"] }
tokio-cron-scheduler = "0.8"
tokio-stream = { version = "0.1.8", features = ["fs"] }
toml = "0.5"
tower = "0.4"
tower-http = { version = "0.3", features = ["fs"] }
tree-sitter-bash = "0.20"
tree-sitter-c = "0.20"
tree-sitter-css = { version = "0.20", git = "https://github.com/tree-sitter/tree-sitter-css.git" }
tree-sitter-elixir = { version = "0.19", git = "https://github.com/elixir-lang/tree-sitter-elixir.git" }
tree-sitter-highlight = "0.20"
tree-sitter-html = "0.20"
tree-sitter-java = "0.20"
tree-sitter-javascript = "0.20"
tree-sitter-json = { version = "0.20", git = "https://github.com/tree-sitter/tree-sitter-json.git" }
tree-sitter-objc = { version = "1.0.0", git = "https://github.com/jiyee/tree-sitter-objc.git" }
tree-sitter-rust = "0.20"
unicode-normalization = "0.1.19"
url = "2.2"
uuid = { version = "1.1", features = ["v4" ] }
[patch.crates-io]
tree-sitter-bash = { git = "https://git.shadowfacts.net/shadowfacts/tree-sitter-bash.git" }
tree-sitter-java = { git = "https://github.com/tree-sitter/tree-sitter-java.git" }
tree-sitter-html = { git = "https://git.shadowfacts.net/shadowfacts/tree-sitter-html.git" }
# openssl-src = { path = "../openssl-src-rs" }
[patch."https://github.com/tree-sitter/tree-sitter-css.git"]
tree-sitter-css = { git = "https://git.shadowfacts.net/shadowfacts/tree-sitter-css.git" }
[patch."https://github.com/jiyee/tree-sitter-objc.git"]
tree-sitter-objc = { git = "https://git.shadowfacts.net/shadowfacts/tree-sitter-objc.git" }
[target.'cfg(target_os = "ios")'.dependencies]
openssl = { version = "0.10", features = ["vendored"] }

3
README.md Normal file
View File

@ -0,0 +1,3 @@
# highlight-swift
A description of this package.

2
askama.toml Normal file
View File

@ -0,0 +1,2 @@
[general]
dirs = ["site"]

5
build.rs Normal file
View File

@ -0,0 +1,5 @@
// generated by `sqlx migrate build-script`
fn main() {
// trigger recompilation when a new migration is added
println!("cargo:rerun-if-changed=migrations");
}

View File

@ -0,0 +1,28 @@
create table if not exists articles (
id text primary key not null,
conversation text not null,
has_federated boolean not null,
article_object text not null
);
create table if not exists actors (
id text primary key not null,
actor_object text not null,
is_follower boolean not null default 0,
display_name text,
inbox text not null,
shared_inbox text,
icon_url text,
public_key_pem text
);
create table if not exists notes (
id text primary key not null,
content text not null,
in_reply_to text not null,
conversation text not null,
published text not null, -- RFC 3339 formatted date (e.g., 1985-04-12T23:20:50.52Z)
actor_id text not null,
digested boolean not null default 0,
foreign key (actor_id) references actors (id) on delete cascade
);

20
site/archive.html Normal file
View File

@ -0,0 +1,20 @@
{% extends "layout/default.html" %}
{% block title %}Archive{% endblock %}
{% block content %}
{% for (year, posts) in years %}
<h2>{{ year }}</h2>
<ul>
{% for post in posts %}
<li>
<a href="{{ post.permalink() }}">
{{ post.metadata.title }}
</a>
</li>
{% endfor %}
</ul>
{% endfor %}
{% endblock %}

5
site/css/auto.scss Normal file
View File

@ -0,0 +1,5 @@
@import "light.scss";
@media (prefers-color-scheme: dark) {
@import "dark.scss";
}

59
site/css/dark.scss Normal file
View File

@ -0,0 +1,59 @@
:root {
--accent-color: var(--dark-accent-color);
--content-background-color: var(--dark-content-background-color);
--shadow-color: var(--dark-shadow-color);
--ui-background-color: var(--dark-ui-background-color);
--ui-text-color: var(--dark-ui-text-color);
--secondary-ui-text-color: var(--dark-secondary-ui-text-color);
--content-text-color: var(--dark-content-text-color);
--aside-background: var(--dark-aside-background);
--aside-border: var(--dark-aside-border);
--aside-warning-background: var(--dark-aside-warning-background);
--aside-warning-border: var(--dark-aside-warning-border);
--webring-background: var(--dark-webring-background);
// Syntax highdarking
--atom-base: var(--dark-atom-base);
--atom-mono-1: var(--dark-atom-mono-1);
--atom-mono-2: var(--dark-atom-mono-2);
--atom-mono-3: var(--dark-atom-mono-3);
--atom-hue-1: var(--dark-atom-hue-1);
--atom-hue-2: var(--dark-atom-hue-2);
--atom-hue-3: var(--dark-atom-hue-3);
--atom-hue-4: var(--dark-atom-hue-4);
--atom-hue-5: var(--dark-atom-hue-5);
--atom-hue-5-2: var(--dark-atom-hue-5-2);
--atom-hue-6: var(--dark-atom-hue-6);
--atom-hue-6-2: var(--dark-atom-hue-6-2);
}
.theme-inverted {
--accent-color: var(--light-accent-color);
--content-background-color: var(--light-content-background-color);
--shadow-color: var(--light-shadow-color);
--ui-background-color: var(--light-ui-background-color);
--ui-text-color: var(--light-ui-text-color);
--secondary-ui-text-color: var(--light-secondary-ui-text-color);
--content-text-color: var(--light-content-text-color);
--aside-background: var(--light-aside-background);
--aside-border: var(--light-aside-border);
--aside-warning-background: var(--light-aside-warning-background);
--aside-warning-border: var(--light-aside-warning-border);
// Syntax highlighting
--atom-base: var(--light-atom-base);
--atom-mono-1: var(--light-atom-mono-1);
--atom-mono-2: var(--light-atom-mono-2);
--atom-mono-3: var(--light-atom-mono-3);
--atom-hue-1: var(--light-atom-hue-1);
--atom-hue-2: var(--light-atom-hue-2);
--atom-hue-3: var(--light-atom-hue-3);
--atom-hue-4: var(--light-atom-hue-4);
--atom-hue-5: var(--light-atom-hue-5);
--atom-hue-5-2: var(--light-atom-hue-5-2);
--atom-hue-6: var(--light-atom-hue-6);
--atom-hue-6-2: var(--light-atom-hue-6-2);
}

59
site/css/light.scss Normal file
View File

@ -0,0 +1,59 @@
:root {
--accent-color: var(--light-accent-color);
--content-background-color: var(--light-content-background-color);
--shadow-color: var(--light-shadow-color);
--ui-background-color: var(--light-ui-background-color);
--ui-text-color: var(--light-ui-text-color);
--secondary-ui-text-color: var(--light-secondary-ui-text-color);
--content-text-color: var(--light-content-text-color);
--aside-background: var(--light-aside-background);
--aside-border: var(--light-aside-border);
--aside-warning-background: var(--light-aside-warning-background);
--aside-warning-border: var(--light-aside-warning-border);
--webring-background: var(--light-webring-background);
// Syntax highlighting
--atom-base: var(--light-atom-base);
--atom-mono-1: var(--light-atom-mono-1);
--atom-mono-2: var(--light-atom-mono-2);
--atom-mono-3: var(--light-atom-mono-3);
--atom-hue-1: var(--light-atom-hue-1);
--atom-hue-2: var(--light-atom-hue-2);
--atom-hue-3: var(--light-atom-hue-3);
--atom-hue-4: var(--light-atom-hue-4);
--atom-hue-5: var(--light-atom-hue-5);
--atom-hue-5-2: var(--light-atom-hue-5-2);
--atom-hue-6: var(--light-atom-hue-6);
--atom-hue-6-2: var(--light-atom-hue-6-2);
}
.theme-inverted {
--accent-color: var(--dark-accent-color);
--content-background-color: var(--dark-content-background-color);
--shadow-color: var(--dark-shadow-color);
--ui-background-color: var(--dark-ui-background-color);
--ui-text-color: var(--dark-ui-text-color);
--secondary-ui-text-color: var(--dark-secondary-ui-text-color);
--content-text-color: var(--dark-content-text-color);
--aside-background: var(--dark-aside-background);
--aside-border: var(--dark-aside-border);
--aside-warning-background: var(--dark-aside-warning-background);
--aside-warning-border: var(--dark-aside-warning-border);
// Syntax highdarking
--atom-base: var(--dark-atom-base);
--atom-mono-1: var(--dark-atom-mono-1);
--atom-mono-2: var(--dark-atom-mono-2);
--atom-mono-3: var(--dark-atom-mono-3);
--atom-hue-1: var(--dark-atom-hue-1);
--atom-hue-2: var(--dark-atom-hue-2);
--atom-hue-3: var(--dark-atom-hue-3);
--atom-hue-4: var(--dark-atom-hue-4);
--atom-hue-5: var(--dark-atom-hue-5);
--atom-hue-5-2: var(--dark-atom-hue-5-2);
--atom-hue-6: var(--dark-atom-hue-6);
--atom-hue-6-2: var(--dark-atom-hue-6-2);
}

931
site/css/main.scss Normal file
View File

@ -0,0 +1,931 @@
@import "normalize.scss";
@import "syntax-highlighting.scss";
$light-accent-color: #0638d0;
$dark-accent-color: #f9c72f;
:root {
// Theme colors
--light-accent-color: #{$light-accent-color};
--dark-accent-color: #{$dark-accent-color};
--light-content-background-color: white;
--dark-content-background-color: #111;
--light-shadow-color: #f7f7f7;
--dark-shadow-color: #151515;
--light-ui-background-color: white;
--dark-ui-background-color: #111;
--light-ui-text-color: black;
--dark-ui-text-color: white;
--light-secondary-ui-text-color: #666;
--dark-secondary-ui-text-color: #999;
--light-content-text-color: #222;
--dark-content-text-color: #ddd;
--light-aside-background: #{lighten($light-accent-color, 50%)};
--dark-aside-background: #{darken($dark-accent-color, 50%)};
--light-aside-border: #{darken($light-accent-color, 10%)};
--dark-aside-border: #{darken($dark-accent-color, 10%)};
--light-aside-warning-background: #{lighten(#c6322f, 40%)};
--dark-aside-warning-background: #{darken(#c6322f, 40%)};
--light-aside-warning-border: #c6322f;
--dark-aside-warning-border: #c6322f;
--light-webring-background: linear-gradient(135deg, #855988, #6b4984, #483475, #2b2f77, #141852);
--dark-webring-background: linear-gradient(135deg, rgba(128, 224, 105, 1), rgba(128, 224, 105, 0.7));
// Syntax highlighting
--light-atom-base: #fafafa;
--dark-atom-base: #282c34;
--light-atom-mono-1: #383a42;
--dark-atom-mono-1: #abb2bf;
--light-atom-mono-2: #686b77;
--dark-atom-mono-2: #818896;
--light-atom-mono-3: #a0a1a7;
--dark-atom-mono-3: #5c6370;
--light-atom-hue-1: #0184bb;
--dark-atom-hue-1: #56b6c2;
--light-atom-hue-2: #4078f2;
--dark-atom-hue-2: #61aeee;
--light-atom-hue-3: #a626a4;
--dark-atom-hue-3: #c678dd;
--light-atom-hue-4: #50a14f;
--dark-atom-hue-4: #98c379;
--light-atom-hue-5: #e45649;
--dark-atom-hue-5: #e06c75;
--light-atom-hue-5-2: #c91243;
--dark-atom-hue-5-2: #be5046;
--light-atom-hue-6: #986801;
--dark-atom-hue-6: #d19a66;
--light-atom-hue-6-2: #c18401;
--dark-atom-hue-6-2: #e6c07b;
// Fonts
--ui-font: Avenir, Lucida Grande, Arial, sans-serif;
--content-font: Charter, Georgia, serif;
--monospace-font: SF Mono, monospace;
}
.theme-light {
--accent-color: var(--light-accent-color);
--content-background-color: var(--light-content-background-color);
--shadow-color: var(--light-shadow-color);
--ui-background-color: var(--light-ui-background-color);
--ui-text-color: var(--light-ui-text-color);
--secondary-ui-text-color: var(--light-secondary-ui-text-color);
--content-text-color: var(--light-content-text-color);
--aside-background: var(--light-aside-background);
--aside-border: var(--light-aside-border);
--aside-warning-background: var(--light-aside-warning-background);
--aside-warning-border: var(--light-aside-warning-border);
// Syntax highlighting
--atom-base: var(--light-atom-base);
--atom-mono-1: var(--light-atom-mono-1);
--atom-mono-2: var(--light-atom-mono-2);
--atom-mono-3: var(--light-atom-mono-3);
--atom-hue-1: var(--light-atom-hue-1);
--atom-hue-2: var(--light-atom-hue-2);
--atom-hue-3: var(--light-atom-hue-3);
--atom-hue-4: var(--light-atom-hue-4);
--atom-hue-5: var(--light-atom-hue-5);
--atom-hue-5-2: var(--light-atom-hue-5-2);
--atom-hue-6: var(--light-atom-hue-6);
--atom-hue-6-2: var(--light-atom-hue-6-2);
}
.theme-dark {
--accent-color: var(--dark-accent-color);
--content-background-color: var(--dark-content-background-color);
--shadow-color: var(--dark-shadow-color);
--ui-background-color: var(--dark-ui-background-color);
--ui-text-color: var(--dark-ui-text-color);
--secondary-ui-text-color: var(--dark-secondary-ui-text-color);
--content-text-color: var(--dark-content-text-color);
--aside-background: var(--dark-aside-background);
--aside-border: var(--dark-aside-border);
--aside-warning-background: var(--dark-aside-warning-background);
--aside-warning-border: var(--dark-aside-warning-border);
// Syntax highdarking
--atom-base: var(--dark-atom-base);
--atom-mono-1: var(--dark-atom-mono-1);
--atom-mono-2: var(--dark-atom-mono-2);
--atom-mono-3: var(--dark-atom-mono-3);
--atom-hue-1: var(--dark-atom-hue-1);
--atom-hue-2: var(--dark-atom-hue-2);
--atom-hue-3: var(--dark-atom-hue-3);
--atom-hue-4: var(--dark-atom-hue-4);
--atom-hue-5: var(--dark-atom-hue-5);
--atom-hue-5-2: var(--dark-atom-hue-5-2);
--atom-hue-6: var(--dark-atom-hue-6);
--atom-hue-6-2: var(--dark-atom-hue-6-2);
}
// General
html {
background-color: var(--content-background-color);
font-family: var(--ui-font);
font-size: 16px;
line-height: 1.6;
color: var(--ui-text-color);
}
body {
// prevent .article-content-wide from showing scroll bar
overflow-x: hidden;
}
.container {
margin: 0 auto;
padding: 0 15px;
}
.main {
max-width: 720px;
margin: 0 auto;
.page-heading {
max-width: 720px;
margin: 20px auto;
margin-bottom: 0;
color: var(--content-text-color);
}
.rss {
margin-top: 0;
font-size: 0.75rem;
font-weight: 300;
color: var(--secondary-ui-text-color);
}
}
article {
margin-bottom: 75px;
color: var(--content-text-color);
border-bottom: 1px solid var(--accent-color);
}
.article-title {
margin-top: 0.7em;
margin-bottom: 0;
font-size: 1.7rem;
&::before {
content: "#";
font-family: var(--monospace-font);
color: var(--accent-color);
user-select: none;
}
> a {
color: var(--content-text-color);
text-decoration: none;
transition: 0.3s ease all;
&:hover {
color: var(--accent-color);
}
&::before, &::after {
content: "";
}
}
}
.article-meta {
margin-top: 0;
font-size: 0.9rem;
font-weight: 300;
color: var(--secondary-ui-text-color);
a { text-decoration: underline; }
a::before, a::after { content: ""; }
}
.article-content {
position: relative;
font-family: var(--content-font);
font-size: 1.25rem;
word-wrap: break-word;
h1, h2, h3, h4, h5, h6 {
font-family: var(--ui-font);
.header-anchor {
font-family: var(--monospace-font);
color: var(--accent-color);
user-select: none;
text-decoration: none;
// hide link destination for header anchor links
&::before, &::after { content: ""; }
}
}
h1 { font-size: 1.6rem; }
h2 { font-size: 1.5rem; }
h3 { font-size: 1.4rem; }
hr {
border: none;
height: 1px;
background: var(--accent-color);
}
blockquote {
font-style: italic;
border-left: 3px solid var(--accent-color);
// use margin for vertical spacing so space is shared with elements that come before/after
// and padding for horizontal so that the space is between the border and the text
margin: 20px 0;
padding: 0 40px;
}
p code {
word-break: break-all;
}
aside {
background-color: var(--aside-background);
border: 1px solid var(--aside-border);
padding: 15px;
font-size: 1rem;
box-sizing: border-box;
&.warning {
background-color: var(--aside-warning-background);
border: 1px solid var(--aside-warning-border);
}
p:first-child { margin-top: 0; }
p:last-child { margin-bottom: 0; }
}
.footnotes {
.footnote-item {
display: flex;
flex-direction: row;
align-items: first baseline;
&:not(:last-child) > p:last-child {
margin-bottom: 0;
}
}
.footnote-marker {
margin-right: 6px;
width: 34px;
flex-shrink: 0;
text-align: right;
}
}
// Markdown decorations
@media screen and (min-width: 768px) {
a {
text-decoration: none;
&::before { content: "["; }
&::after { content: "](" attr(data-link) ")"; word-wrap: break-word; }
&::before, &::after {
color: var(--secondary-ui-text-color);
font-family: var(--monospace-font);
font-size: 0.75em;
word-break: break-all;
}
}
a[data-no-link-decoration] {
text-decoration: underline;
&::before { content: ""; }
&::after { content: ""; }
}
sup.footnote-reference > a::before,
sup.footnote-reference > a::after,
a.footnote-backref::before, a.footnote-backref::after {
content: "";
}
}
code::before, code::after {
content: "`";
font-family: var(--monospace-font);
color: var(--secondary-ui-text-color);
}
pre code::before,
pre code::after {
// we don't show the decorations for pre blocks, because it can interfere with scrolling horizontally
content: "";
display: none;
}
strong::before, strong::after {
content: "**";
font-family: var(--monospace-font);
color: var(--secondary-ui-text-color);
}
em::before, em::after {
content: "_";
font-family: var(--monospace-font);
color: var(--secondary-ui-text-color);
}
s::before, s::after {
content: "~~";
font-family: var(--monospace-font);
color: var(--secondary-ui-text-color);
}
code {
strong::before, strong::after,
em::before, em::after,
s::before, s::after {
content: "";
}
}
}
.article-content-wide {
width: 100vw;
position: relative;
left: 50%;
margin-left: -50vw;
padding: 10px 0;
margin-top: -10px;
margin-bottom: -10px;
p:first-child {
margin-top: 0;
}
p:last-child {
margin-bottom: 0;
}
}
.theme-dark, .theme-light, .theme-inverted {
color: var(--content-text-color);
background-color: var(--content-background-color);
}
#comments-container {
border-top: 1px solid var(--accent-color);
padding: 1rem 0;
#comments-container-title {
display: inline-block;
margin: 0;
cursor: pointer;
vertical-align: middle;
}
#comments-info {
margin: 0;
}
#remote-interact {
display: flex;
flex-direction: row;
align-items: baseline;
input {
margin-left: 4px;
}
input[type=text] {
flex-grow: 1;
padding: 0 4px;
background-color: var(--content-background-color);
border: 1px solid var(--accent-color);
font-size: 1rem;
line-height: 2rem;
color: var(--content-text-color);
}
input[type=submit] {
background-color: var(--ui-background-color);
border: 1px solid var(--accent-color);
color: var(--accent-color);
line-height: 2rem;
padding: 0 1rem;
text-decoration: none;
font-weight: bold;
text-transform: uppercase;
-webkit-transition: 0.3s ease-out;
transition: 0.3s ease-out;
&:hover {
background-color: var(--accent-color);
color: var(--ui-background-color);
cursor: pointer;
}
}
}
#comments-js-warning {
background-color: var(--atom-hue-5-2);
padding: 10px;
margin-top: 0;
border: 1px solid var(--atom-hue-5);
border-radius: 5px;
}
> .comments-list {
margin-bottom: 0;
}
.comment:not(:last-child) {
margin-bottom: 16px;
}
.comment-user-avatar {
width: 50px;
border-radius: 5px;
float: left;
margin-right: 10px;
}
@media (min-width: 768px) {
.comments-list {
padding-left: 0px;
}
.comment > .comments-list {
margin-top: 0px;
}
.comment-info {
margin-top: 0px;
margin-bottom: 5px;
}
.comment-children {
margin-left: 60px;
margin-top: 16px;
}
}
}
.tv-show {
details {
margin-bottom: 2em;
> p:first-of-type {
margin-top: 0;
}
p {
margin: 10px;
}
> :not(summary) {
font-family: var(--content-font);
font-size: 1.25rem;
word-wrap: break-word;
}
}
summary {
display: flex;
align-items: baseline;
&:hover {
cursor: pointer;
}
// needed for safari
&::-webkit-details-marker {
display: none;
}
&::before {
content: "";
align-self: baseline;
margin-right: 8px;
}
h2 {
margin: 0;
flex-grow: 1;
}
.episode-watched {
color: var(--secondary-ui-text-color);
font-size: 1rem;
}
}
details[open] summary::before {
transform: rotate(90deg);
}
}
.error-page {
margin: 0 auto;
text-align: center;
h3 {
position: relative;
color: var(--content-text-color);
line-height: 1.3;
text-align: center;
margin: 0;
}
input#q {
display: block;
width: 75%;
margin: 10px auto;
padding: 4px;
background-color: var(--content-background-color);
border: 1px solid var(--accent-color);
font-size: 1rem;
color: var(--content-text-color);
}
input[type=submit] {
display: block;
margin: 10px auto;
background-color: var(--ui-background-color);
border: 1px solid var(--accent-color);
color: var(--accent-color);
line-height: 2rem;
padding: 0 1rem;
text-decoration: none;
font-weight: bold;
text-transform: uppercase;
-webkit-transition: 0.3s ease-out;
transition: 0.3s ease-out;
&:hover {
background-color: var(--accent-color);
color: var(--ui-background-color);
cursor: pointer;
}
}
}
.icon > svg {
display: inline-block;
width: 16px;
height: 16px;
vertical-align: middle;
color: grey;
}
a {
color: var(--accent-color);
text-decoration: underline;
&.fancy-link {
position: relative;
color: var(--ui-text-color);
text-decoration: none;
transition: 0.3s ease all;
> span {
position: absolute;
top: -0.15em;
transition: 0.3s ease all;
font-family: var(--monospace-font);
color: transparent;
&:first-child {
left: 0.5em;
}
&:last-child {
right: 0.5em;
}
}
&:hover {
color: var(--accent-color);
> span {
color: var(--accent-color);
&:first-child {
left: -0.75em;
}
&:last-child {
right: -0.75em;
}
}
}
}
}
pre {
overflow-x: scroll;
-moz-tab-size: 4;
-o-tab-size: 4;
tab-size: 4;
word-wrap: normal;
font-family: var(--monospace-font);
}
code {
font-family: var(--monospace-font);
font-size: 0.8em;
}
img {
display: block;
margin: 0 auto;
max-width: 100%;
}
figure {
margin: 0;
figcaption {
font-family: var(--ui-font);
font-size: 1rem;
font-style: italic;
color: var(--secondary-ui-text-color);
text-align: center;
}
}
table {
width: 100%;
border-collapse: collapse;
border: 1px solid #bbb;
tr, td, th {
border: 1px solid #bbb;
}
td, th {
padding: 0 0.5em;
text-align: left;
}
thead > tr, tbody > tr:nth-child(even) {
background-color: #eee;
}
}
// Header
.site-header {
padding-top: 20px;
padding-bottom: 20px;
background-color: var(--ui-background-color);
font-size: 1rem;
> div {
display: flex;
flex-direction: row;
justify-content: space-between;
align-items: flex-end;
flex-wrap: wrap;
padding-bottom: 10px;
border-bottom: 3px solid var(--accent-color);
}
.site-title {
margin: 0;
font-size: 2em;
font-variant: small-caps;
}
.site-description {
color: var(--secondary-ui-text-color);
font-variant: small-caps;
margin: 0;
}
.site-nav ul {
padding: 0;
margin: 0;
display: inline-block;
position: relative;
li {
list-style: none;
display: inline;
font-variant: small-caps;
font-weight: bold;
&:not(:last-child) {
margin-right: 1em;
}
a.dropdown-link {
color: var(--ui-text-color);
text-decoration: none;
.arrow-down {
display: inline-block;
width: 0.5em;
height: 0.5em;
margin-bottom: 2px;
margin-left: 2px;
border-bottom: 2px solid var(--ui-text-color);
border-right: 2px solid var(--ui-text-color);
transform: rotate(45deg);
}
}
ul {
visibility: hidden;
opacity: 0;
min-width: 5rem;
position: absolute;
transition: 0.3s ease all;
right: -1em;
display: block;
padding: 1em;
background-color: var(--ui-background-color);
border: 1px solid var(--accent-color);
z-index: 10;
li {
width: 100%;
display: block;
margin-right: 0px;
text-align: right;
&:not(:last-child) {
margin-bottom: 1em;
}
}
}
&:hover > ul,
&:focus-within > ul,
ul:hover,
ul:focus {
visibility: visible;
opacity: 1;
display: block;
}
}
}
}
// Footer
.site-footer {
margin-top: 75px;
margin-bottom: 20px;
background-color: var(--ui-background-color);
font-size: 16px;
display: flex;
flex-direction: row;
justify-content: space-between;
flex-wrap: wrap;
align-items: baseline;
> * {
width: 50%;
}
.site-title {
margin: 0;
font-variant: small-caps;
font-size: 1.5em;
}
.ui-controls {
order: 1;
input {
display: none;
}
label {
color: var(--accent-color);
text-decoration: underline;
&:hover {
cursor: pointer;
}
}
input:checked + label {
color: var(--ui-text-color);
text-decoration: none;
font-weight: bold;
&:hover {
cursor: default;
}
}
}
.social-links ul {
padding: 0;
margin: 0;
text-align: right;
li {
list-style: none;
display: inline;
font-variant: small-caps;
font-weight: bold;
&:not(:last-child) {
margin-right: 1em;
}
}
}
.webring {
order: 2;
background: var(--webring-background);
background-clip: text;
font-size: 1.2em;
font-variant: small-caps;
font-weight: 900;
text-align: right;
a {
text-decoration: none;
color: transparent;
}
}
}
// Pagination
.pagination {
text-align: center;
p {
margin: 0;
}
.pagination-link {
color: var(--accent-color);
a {
text-decoration: none;
span:not(.arrow) {
text-decoration: underline;
}
}
.arrow-left {
display: inline-block;
width: 0.5em;
height: 0.5em;
margin-right: -5px;
border-left: 2px solid var(--accent-color);
border-bottom: 2px solid var(--accent-color);
transform: rotate(45deg);
}
.arrow-right {
display: inline-block;
width: 0.5em;
height: 0.5em;
margin-left: -5px;
border-right: 2px solid var(--accent-color);
border-bottom: 2px solid var(--accent-color);
transform: rotate(-45deg);
}
}
}
// Media Queries
@media (min-width: 540px) {
.container {
max-width: 540px;
}
article::after {
max-width: 540px;
}
}
@media (min-width: 768px) {
.container {
max-width: 720px;
}
article::after {
max-width: 720px;
}
}
@media (max-width: 768px) {
.site-footer {
display: block;
> * {
width: 100%;
}
.social-links ul, .webring {
text-align: unset;
}
}
}
// 720 + 30 + 720 + 15
// main content, l/r container padding, aside width (50% on each side), outer edge margin
// inner edge margin overlaps with container padding
@media (min-width: 1485px) {
article .article-content aside:not(.inline) {
width: 50%;
position: absolute;
left: 100%;
margin-left: 15px;
margin-right: 15px;
transform: translateY(-50%);
}
}

349
site/css/normalize.scss vendored Normal file
View File

@ -0,0 +1,349 @@
/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */
/* Document
========================================================================== */
/**
* 1. Correct the line height in all browsers.
* 2. Prevent adjustments of font size after orientation changes in iOS.
*/
html {
line-height: 1.15; /* 1 */
-webkit-text-size-adjust: 100%; /* 2 */
}
/* Sections
========================================================================== */
/**
* Remove the margin in all browsers.
*/
body {
margin: 0;
}
/**
* Render the `main` element consistently in IE.
*/
main {
display: block;
}
/**
* Correct the font size and margin on `h1` elements within `section` and
* `article` contexts in Chrome, Firefox, and Safari.
*/
h1 {
font-size: 2em;
margin: 0.67em 0;
}
/* Grouping content
========================================================================== */
/**
* 1. Add the correct box sizing in Firefox.
* 2. Show the overflow in Edge and IE.
*/
hr {
box-sizing: content-box; /* 1 */
height: 0; /* 1 */
overflow: visible; /* 2 */
}
/**
* 1. Correct the inheritance and scaling of font size in all browsers.
* 2. Correct the odd `em` font sizing in all browsers.
*/
pre {
font-family: monospace, monospace; /* 1 */
font-size: 1em; /* 2 */
}
/* Text-level semantics
========================================================================== */
/**
* Remove the gray background on active links in IE 10.
*/
a {
background-color: transparent;
}
/**
* 1. Remove the bottom border in Chrome 57-
* 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.
*/
abbr[title] {
border-bottom: none; /* 1 */
text-decoration: underline; /* 2 */
text-decoration: underline dotted; /* 2 */
}
/**
* Add the correct font weight in Chrome, Edge, and Safari.
*/
b,
strong {
font-weight: bolder;
}
/**
* 1. Correct the inheritance and scaling of font size in all browsers.
* 2. Correct the odd `em` font sizing in all browsers.
*/
code,
kbd,
samp {
font-family: monospace, monospace; /* 1 */
font-size: 1em; /* 2 */
}
/**
* Add the correct font size in all browsers.
*/
small {
font-size: 80%;
}
/**
* Prevent `sub` and `sup` elements from affecting the line height in
* all browsers.
*/
sub,
sup {
font-size: 75%;
line-height: 0;
position: relative;
vertical-align: baseline;
}
sub {
bottom: -0.25em;
}
sup {
top: -0.5em;
}
/* Embedded content
========================================================================== */
/**
* Remove the border on images inside links in IE 10.
*/
img {
border-style: none;
}
/* Forms
========================================================================== */
/**
* 1. Change the font styles in all browsers.
* 2. Remove the margin in Firefox and Safari.
*/
button,
input,
optgroup,
select,
textarea {
font-family: inherit; /* 1 */
font-size: 100%; /* 1 */
line-height: 1.15; /* 1 */
margin: 0; /* 2 */
}
/**
* Show the overflow in IE.
* 1. Show the overflow in Edge.
*/
button,
input { /* 1 */
overflow: visible;
}
/**
* Remove the inheritance of text transform in Edge, Firefox, and IE.
* 1. Remove the inheritance of text transform in Firefox.
*/
button,
select { /* 1 */
text-transform: none;
}
/**
* Correct the inability to style clickable types in iOS and Safari.
*/
button,
[type="button"],
[type="reset"],
[type="submit"] {
-webkit-appearance: button;
}
/**
* Remove the inner border and padding in Firefox.
*/
button::-moz-focus-inner,
[type="button"]::-moz-focus-inner,
[type="reset"]::-moz-focus-inner,
[type="submit"]::-moz-focus-inner {
border-style: none;
padding: 0;
}
/**
* Restore the focus styles unset by the previous rule.
*/
button:-moz-focusring,
[type="button"]:-moz-focusring,
[type="reset"]:-moz-focusring,
[type="submit"]:-moz-focusring {
outline: 1px dotted ButtonText;
}
/**
* Correct the padding in Firefox.
*/
fieldset {
padding: 0.35em 0.75em 0.625em;
}
/**
* 1. Correct the text wrapping in Edge and IE.
* 2. Correct the color inheritance from `fieldset` elements in IE.
* 3. Remove the padding so developers are not caught out when they zero out
* `fieldset` elements in all browsers.
*/
legend {
box-sizing: border-box; /* 1 */
color: inherit; /* 2 */
display: table; /* 1 */
max-width: 100%; /* 1 */
padding: 0; /* 3 */
white-space: normal; /* 1 */
}
/**
* Add the correct vertical alignment in Chrome, Firefox, and Opera.
*/
progress {
vertical-align: baseline;
}
/**
* Remove the default vertical scrollbar in IE 10+.
*/
textarea {
overflow: auto;
}
/**
* 1. Add the correct box sizing in IE 10.
* 2. Remove the padding in IE 10.
*/
[type="checkbox"],
[type="radio"] {
box-sizing: border-box; /* 1 */
padding: 0; /* 2 */
}
/**
* Correct the cursor style of increment and decrement buttons in Chrome.
*/
[type="number"]::-webkit-inner-spin-button,
[type="number"]::-webkit-outer-spin-button {
height: auto;
}
/**
* 1. Correct the odd appearance in Chrome and Safari.
* 2. Correct the outline style in Safari.
*/
[type="search"] {
-webkit-appearance: textfield; /* 1 */
outline-offset: -2px; /* 2 */
}
/**
* Remove the inner padding in Chrome and Safari on macOS.
*/
[type="search"]::-webkit-search-decoration {
-webkit-appearance: none;
}
/**
* 1. Correct the inability to style clickable types in iOS and Safari.
* 2. Change font properties to `inherit` in Safari.
*/
::-webkit-file-upload-button {
-webkit-appearance: button; /* 1 */
font: inherit; /* 2 */
}
/* Interactive
========================================================================== */
/*
* Add the correct display in Edge, IE 10+, and Firefox.
*/
details {
display: block;
}
/*
* Add the correct display in all browsers.
*/
summary {
display: list-item;
}
/* Misc
========================================================================== */
/**
* Add the correct display in IE 10+.
*/
template {
display: none;
}
/**
* Add the correct display in IE 10.
*/
[hidden] {
display: none;
}

View File

@ -0,0 +1,93 @@
/*
Atom One color scheme by Daniel Gamage
Modified to use colors from CSS vars, defined in theme.scss
*/
.highlight {
display: block;
overflow-x: auto;
padding: 0.5em;
color: var(--atom-mono-1);
background: var(--atom-base);
}
.highlight > code {
display: block;
}
.hl-cmt,
.hljs-quote {
color: var(--atom-mono-3);
font-style: italic;
}
.hljs-doctag,
.hl-kw,
.hljs-formula {
color: var(--atom-hue-3);
}
.hljs-section,
.hljs-name,
.hljs-selector-tag,
.hljs-deletion,
.hljs-subst,
.hl-punct-sp,
.hl-tag {
color: var(--atom-hue-5);
}
.hl-emb {
color: var(--atom-mono-1);
}
.hljs-literal,
.hl-attr,
.hl-mod,
.hl-key {
color: var(--atom-hue-1);
}
.hl-str,
.hljs-regexp,
.hljs-addition,
.hljs-meta-string,
.hl-prop {
color: var(--atom-hue-4);
}
.hl-builtin,
.hljs-class .hljs-title {
color: var(--atom-hue-6-2);
}
.hljs-attr,
.hljs-template-variable,
.hl-type,
.hljs-selector-class,
.hljs-selector-attr,
.hljs-selector-pseudo,
.hl-num {
color: var(--atom-hue-6);
}
.hljs-symbol,
.hljs-bullet,
.hljs-link,
.hljs-meta,
.hljs-selector-id,
.hl-fn {
color: var(--atom-hue-2);
}
.hljs-emphasis {
font-style: italic;
}
.hljs-strong {
font-weight: bold;
}
.hljs-link {
text-decoration: underline;
}

View File

@ -0,0 +1,26 @@
<article itemscope itemtype="https://schema.org/BlogPosting">
<h2 class="article-title" itemprop="headline">
<a href="{{ post.permalink() }}" itemprop="url mainEntityOfPage">
{% match post.metadata.html_title %}
{% when Some with (html) %}
{{ html|safe }}
{% when None %}
{{ post.metadata.title }}
{% endmatch %}
</a>
</h2>
{% include "includes/article-meta.html" %}
<div class="article-content" itemprop="description">
{% match post.excerpt %}
{% when Some with (excerpt) %}
{{ excerpt|safe }}
{% when None %}
{{ post.content.html()|safe }}
{% endmatch %}
</div>
{% if post.excerpt.is_some() %}
<p class="read-more-link">
<a href="{{ post.permalink() }}">Read more...</a>
</p>
{% endif %}
</article>

View File

@ -0,0 +1,27 @@
<p class="article-meta">
<span itemprop="author" itemscope="" itemtype="https://schema.org/Person">
<meta itemprop="name" content="Shadowfacts">
</span>
on
<span>
<time itemprop="datePublished" datetime="{{ post.metadata.date|iso_datetime }}" title="{{ post.metadata.date|pretty_datetime }}">
{{ post.metadata.date|pretty_date }}
</time>
</span>
{% match post.metadata.tags %}
{% when Some with (tags) %}
in
{% for tag in tags %}
<span itemprop="articleSection">
<a href="/{{ tag.slug }}/">{{ tag.name }}</a>{% if !loop.last %},{% endif %}
</span>
{% endfor %}
{% when None %}
{% endmatch %}
{% match post.word_count %}
{% when Some with (wc) %}
<span title="{{ wc }} words">{{ wc|reading_time }} min read</span>
{% when None %}
{% endmatch %}
</p>

View File

@ -0,0 +1,29 @@
<div class="pagination" role="navigation">
<p>
<span class="pagination-link">
{% match pagination_info.prev_href %}
{% when Some with (href) %}
<a href="{{ href }}">
<span class="arrow arrow-left" aria-hidden="true"></span>
<span>Previous</span>
</a>
{% when None %}
<span class="arrow arrow-left" aria-hidden="true"></span>
<span>Previous</span>
{% endmatch %}
</span>
Page {{ pagination_info.page }} of {{ pagination_info.total_pages }}
<span class="pagination-link">
{% match pagination_info.next_href %}
{% when Some with (href) %}
<a href="{{ href }}">
<span>Next</span>
<span class="arrow arrow-right" aria-hidden="true"></span>
</a>
{% when None %}
<span>Next</span>
<span class="arrow arrow-right" aria-hidden="true"></span>
{% endmatch %}
</span>
</p>
</div>

13
site/index.html Normal file
View File

@ -0,0 +1,13 @@
{% extends "layout/default.html" %}
{% block title %}Shadowfacts{% endblock %}
{% block content -%}
{% for post in posts %}
{% include "includes/article-listing.html" %}
{% endfor %}
{% include "includes/pagination.html" %}
{%- endblock %}

76
site/layout/article.html Normal file
View File

@ -0,0 +1,76 @@
{% extends "layout/default.html" %}
{% block head -%}
{% match post.metadata.short_desc %}
{% when Some with (val) %}
<meta property="og:type" content="article">
<meta property="og:description" content="{{ val }}">
{% when None %}
<meta property="og:type" content="website">
<meta property="og:description" content="The outer part of a shadow is called the penumbra.">
{% endmatch %}
{%- endblock %}
{% block image %}
{% match post.metadata.card_image_path %}
{% when Some with (path) %}
<meta property="twitter:image" content="https://{{ Self::domain() }}{{ path }}">
<meta property="og:image" content="https://{{ Self::domain() }}{{ path }}">
{% when None %}
<meta property="twitter:image" content="https://{{ Self::domain() }}/shadowfacts.png">
<meta property="og:image" content="https://{{ Self::domain() }}/shadowfacts.png">
{% endmatch %}
{% endblock %}
{% block title %}{{ post.metadata.title }}{% endblock %}
{% block content -%}
<article itemprop="blogPost" itemscope itematype="https://schema.org/BlogPosting">
<meta itemprop="mainEntityOfPage" content="https://{{ Self::domain() }}{{ self.permalink() }}">
<h1 class="article-title" itemprop="name headline">
{% match post.metadata.html_title %}
{% when Some with (html) %}
{{ html|safe }}
{% when None %}
{{ post.metadata.title }}
{% endmatch %}
</h1>
{% include "includes/article-meta.html" %}
<div class="article-content" itemprop="articleBody">
{% match post.metadata.preamble %}
{% when Some with (html) %}
{{ html|safe }}
{% when None %}
{% endmatch %}
{{ post.content.html()|safe }}
</div>
<details id="comments-container">
<summary>
<h2 id="comments-container-title">Comments</h2>
</summary>
<p class="comments-info">
Comments powered by ActivityPub. To respond to this post, enter your username and instance below, or copy its URL into the search interface for client for Mastodon, Pleroma, or other compatible software.
<a href="/2019/reincarnation/#activity-pub">Learn more</a>.
</p>
<form id="remote-interact" action="/interact" method="POST">
<span>Reply from your instance:</span>
<input type="hidden" name="permalink" value="{{ post.comments_permalink() }}">
<!-- name needs to be exactly this to get the browser to use the same completions as mastodon -->
<input type="text" placeholder="Enter your user@domain" required id="acct" name="remote_follow[acct]">
<input type="submit" value="Interact">
</form>
<noscript>
<p id="comments-js-warning">JavaScript is required to display comments.</p>
</noscript>
</details>
</article>
<script>
const permalink = "{{ post.comments_permalink() }}";
</script>
<script src="/js/comments.js" async></script>
{%- endblock %}

121
site/layout/default.html Normal file
View File

@ -0,0 +1,121 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>{% block title %}Shadowfacts{% endblock %}</title>
<link rel="cannonical" href="https://{{ Self::domain() }}{{ self.permalink() }}">
<link rel="alternate" type="application/rss+xml" title="Shadowfacts" href="https://{{ Self::domain() }}/feed.xml">
<link rel="icon" href="/favicon.ico">
<link rel="apple-touch-icon-precomposed" href="/favicon-152.png">
<meta name="msapplication-TileColor" content="#F9C72F">
<meta name="msapplication-TileImage" content="/favicon-152.png">
<meta name="twitter:card" content="summary">
<meta name="twitter:creator" content="@ShadowfactsDev">
<meta property="og:title" content="{% block title %}{% endblock %}">
{% block image %}
<meta property="twitter:image" content="https://{{ Self::domain() }}shadowfacts.png">
<meta property="og:image" content="https://{{ Self::domain() }}shadowfacts.png">
{% endblock %}
<meta property="og:url" content="https://{{ Self::domain() }}{ self.permalink() }}">
<meta property="og:site_name" content="Shadowfacts">
{% block head %}{% endblock %}
<script>
(() => {
const theme = localStorage.getItem("theme") || "auto";
document.write(`<link rel="stylesheet" href="/css/${theme}.css?{{ Self::stylesheet_cache_buster() }}">`);
})();
</script>
<noscript>
<link rel="stylesheet" href="/css/auto.css?{{ Self::stylesheet_cache_buster() }}">
</noscript>
</head>
<body itemscope itemtype="https://schema.org/Blog">
<header class="site-header container">
<div>
<div>
<h1 class="site-title">{{ Self::fancy_link("Shadowfacts", "/", None)|safe }}</h1>
<p class="site-description">The outer part of a shadow is called the penumbra.</p>
</div>
<nav class="site-nav" role="navigation">
<ul>
<li>{{ Self::fancy_link("Archive", "/archive", None)|safe }}</li>
<li>{{ Self::fancy_link("Tutorials", "/tutorials", None)|safe }}</li>
<li>{{ Self::fancy_link("TV", "/tv", None)|safe }}</li>
<li>
<a href="#" class="dropdown-link" aria-haspopup="true">Other <span class="arrow arrow-down" aria-hidden="true"></span></a>
<ul aria-label="Other links">
<li>{{ Self::fancy_link("Gitea", "https://git.shadowfacts.net", None)|safe }}</li>
<li>{{ Self::fancy_link("RTFM", "https://rtfm.shadowfacts.net", None)|safe }}</li>
<li>{{ Self::fancy_link("Maven", "https://maven.shadowfacts.net", None)|safe }}</li>
<li>{{ Self::fancy_link("Type", "https://type.shadowfacts.net", None)|safe }}</li>
<li>{{ Self::fancy_link("Meme Machine", "https://mememachine.shadowdfacts.net", None)|safe }}</li>
</ul>
</li>
</ul>
</nav>
</div>
</header>
<div class="container main" role="main">
{% block content %}{% endblock %}
</div>
<footer class="site-footer container">
<h2 class="site-title">Shadowfacts</h2>
<span class="ui-controls">
Theme:
<input type="radio" name="theme" id="auto" value="auto">
<label for="auto">auto</label>
<input type="radio" name="theme" id="light" value="light">
<label for="light">light</label>
<input type="radio" name="theme" id="dark" value="dark">
<label for="dark">dark</label>
</span>
<nav class="social-links">
<ul>
<li>{{ Self::fancy_link("Email", "mailto:me@shadowfacts.net", Some("rel=me"))|safe }}</li>
<li>{{ Self::fancy_link("RSS", "/feed.xml", None)|safe }}</li>
<li>{{ Self::fancy_link("GitHub", "https://github.com/shadowfacts", Some("rel=me"))|safe }}</li>
<li>{{ Self::fancy_link("Twitter", "https://twitter.com/ShadowfactsDev", Some("rel=me"))|safe }}</li>
<li>{{ Self::fancy_link("Mastodon", "https://social.shadowfacts.net/users/shadowfacts", Some("rel=me"))|safe }}</li>
</ul>
</nav>
<aside class="webring">
<a href="https://metro.bieszczady.pl">Metro Bieszczady webring</a>
<a title="previous page in webring" href="https://metro.bieszczady.pl/cgi-bin/webring?action=previous&amp;from=shadowfacts"></a>
<a title="next page in webring" href="https://metro.bieszczady.pl/cgi-bin/webring?action=next&amp;from=shadowfacts"></a>
</aside>
<script>
(() => {
const theme = localStorage.getItem("theme") || "auto";
document.getElementsByName("theme").forEach((el) => {
el.checked = theme === el.value;
el.onchange = () => {
localStorage.setItem("theme", el.value);
window.location.reload();
};
});
})();
</script>
<noscript>
<style>
.ui-controls {
display: none;
}
</style>
</noscript>
</footer>
<script data-goatcounter="https://shadowfacts.goatcounter.com/count" async src="//gc.zgo.at/count.v3.js" crossorigin="anonymous" integrity="sha384-QGgNMMRFTi8ul5kHJ+vXysPe8gySvSA/Y3rpXZiRLzKPIw8CWY+a3ObKmQsyDr+a"></script>
</body>
</html>

View File

@ -0,0 +1,21 @@
{% extends "layout/default.html" %}
{% block title%}{{ series.name }}{% endblock %}
{% block content -%}
<h1 class="page-heading">{{ series.name }}</h1>
{% for post in series.posts %}
<article>
<h2 class="article-title">
<a href="/tutorials/{{ series.slug }}/{{ post.slug }}/">
{{ post.metadata.title }}
</a>
</h2>
{% include "includes/article-meta.html" %}
</article>
{% endfor %}
{%- endblock %}

20
site/layout/tutorial.html Normal file
View File

@ -0,0 +1,20 @@
{% extends "layout/default.html" %}
{% block title%}{{ post.metadata.title }}{% endblock %}
{% block content -%}
<article itemprop="blogPost" itemscope="" itemtype="https://schema.org/BlogPosting">
<meta itemprop="mainEntityOfPage" content="https://{{ Self::domain() }}{{ self.permalink() }}">
<h1 class="article-title" itemprop="name headline">
{{ post.metadata.title }}
</h1>
{% include "includes/article-meta.html" %}
<div class="article-content" itemprop="articleBody">
{{ post.content|safe }}
</div>
</article>
{%- endblock %}

View File

@ -0,0 +1,10 @@
```
title = "Hello, World!"
tags = ["meta"]
date = "2016-05-06 11:13:18 -0400"
old_permalink = ["/meta/2016/06/07/hello-world/", "/meta/2016/hello-world/"]
short_desc = "Hello again, world! Welcome to the third iteration of my website."
```
Hello again, world! Welcome to the third iteration of my website. Originally my site was hosted on GitHub pages and only available at [shadowfacts.github.io](https://shadowfacts.github.io). I wrote a couple of tutorials on using [Forge](http://minecraftforge.net) to mod 1.6.4, but never really finished anything other than super basic setup/recipes. Later, after I got [shadowfacts.net](https://shadowfacts.net), I decided to set up a propper website using [WordPress](https://wordpress.org). I copied over all of the old tutorials from my old GitHub pages site, but never really did anything else with it. After my website being offline for almost a year, I've finally decided to switch back to GitHub for the simplicity (also I <3 [Jekyll](https://jekyllrb.com)). Using Jekyll, I've got a structure in place that I can use to easily publish tutorials in a structured format. There is one tutorial series that I'm currently writing and that is [Forge Mods in 1.9](/tutorials/forge-modding-19/), and hopefully more series will follow.

View File

@ -0,0 +1,46 @@
```
title = "1.9.4 Porting Spree"
tags = ["minecraft"]
date = "2016-05-21 17:47:18 -0400"
old_permalink = ["/mods/2016/05/21/194-porting-spree/", "/minecraft/2016/1-9-4-porting-spree/"]
short_desc = "Now that Forge for 1.9.4 is out, I've begun the log and arduous process of porting my mods to 1.9.4 (if by long and arduous, you mean short and trivial)."
```
Now that Forge for 1.9.4 is [out](http://files.minecraftforge.net/maven/net/minecraftforge/forge/index_1.9.4.html), I've begun the log and arduous process of porting my mods to 1.9.4 (if by long and arduous, you mean short and trivial).
<!-- excerpt-end -->
<div class="mod">
<h3 class="mod-name"><a href="http://minecraft.curseforge.com/projects/shadowmc">ShadowMC</a></h3>
<span class="mod-version"><a href="http://minecraft.curseforge.com/projects/shadowmc/files/2301829">3.3.0</a></span>
<p class="mod-desc">
The library mod required by all of my other mods.
</p>
</div>
<div class="mod">
<h3 class="mod-name"><a href="http://minecraft.curseforge.com/projects/sleeping-bag">Sleeping Bag</a></h3>
<span class="mod-version"><a href="http://minecraft.curseforge.com/projects/sleeping-bag/files/2301830">1.2.0</a></span>
<p class="mod-desc">
Adds a simple sleeping bag item that is usable anywhere and doens't set your spawn which makes it quite handy for bringing on adventures.
</p>
</div>
<div class="mod">
<h3 class="mod-name"><a href="http://minecraft.curseforge.com/projects/ye-olde-tanks">Ye Olde Tanks</a></h3>
<span class="mod-version"><a href="http://minecraft.curseforge.com/projects/ye-olde-tanks/files/2301852">1.7.0</a></span>
<p class="mod-desc">
Fluid stuff: Fluid barrels, creative fluid barrels, fluid barrel minecarts, infinite water buckets.
</p>
</div>
<div class="mod">
<h3 class="mod-name"><a href="http://minecraft.curseforge.com/projects/discordchat">DiscordChat</a></h3>
<span class="mod-version"><a href="http://minecraft.curseforge.com/projects/discordchat/files/2301839">1.2.0</a></span>
<p class="mod-desc">
Merges a Discord channel with Minecraft chat, primarily intended for servers.
</p>
</div>
<div class="mod">
<h3 class="mod-name"><a href="http://minecraft.curseforge.com/projects/shadowtweaks">ShadowTweaks</a></h3>
<span class="mod-version"><a href="http://minecraft.curseforge.com/projects/shadowtweaks/files/2302146">1.9.0</a></span>
<p class="mod-desc">A little tweaks mod with a variety of client/server tweaks.</p>
</div>

View File

@ -0,0 +1,14 @@
```
title = "Introducing RTFM"
tags = ["minecraft"]
date = "2016-06-29 12:00:00 -0400"
old_permalink = ["/meta/2016/06/29/introducing-rtfm/", "/minecraft/2016/introducing-rtfm/"]
short_desc = "RTFM is the brand new website that will contain the documentation for all of my projects, currently it only contains documentation for MC mods."
```
[RTFM](https://rtfm.shadowfacts.net/) is the brand new website that will contain the documentation for all of my projects, currently it only contains documentation for MC mods. Like this website, it is [hosted on GitHub](https://github.com/shadowfacts/RTFM) using GitHub pages.
<!-- excerpt-end -->
![XKCD #293 RTFM](https://imgs.xkcd.com/comics/rtfm.png)

View File

@ -0,0 +1,10 @@
```
title = "Forge Modding Tutorials for 1.10.2"
tags = ["minecraft"]
date = "2016-06-30 10:35:00 -0400"
old_permalink = ["/meta/2016/06/30/forge-1102-tutorials/", "/minecraft/2016/forge-modding-tutorials-for-1-10-2"]
short_desc = "The Forge modding tutorials have all the been updated to MC 1.10.2 as has the GitHub repo."
```
The Forge modding tutorials have all the been [updated to MC 1.10.2](/tutorials/forge-modding-1102/) as has the [GitHub repo](https://github.com/shadowfacts/TutorialMod/).

View File

@ -0,0 +1,152 @@
```
title = "Introducing Mirror"
tags = ["java"]
date = "2016-07-28 16:45:00 -0400"
old_permalink = ["/java/2016/07/28/introducing-mirror/", "/java/2016/introducing-mirror/"]
short_desc = "Allow me to introduce my latest project, Mirror. Mirror is a reflection library for Java designed to take advantage of the streams, lambdas, and optionals introduced in Java 8."
```
Allow me to introduce my latest project, Mirror. Mirror is a [reflection][] library for Java designed to take advantage of the streams, lambdas, and optionals introduced in Java 8.
<!-- excerpt-end -->
The source code is publicly available on [GitHub][source] under the MIT license and the JavaDocs are viewable [here][docs].
## Installation
All version of Mirror are [available on my Maven][maven].
### Maven
```plaintext
<repositories>
<repository>
<id>shadowfacts</id>
<url>http://mvn.rx14.co.uk/shadowfacts/</url>
</repository>
</repositories>
<dependency>
<groupId>net.shadowfacts</groupId>
<artifactId>Mirror</artifactId>
<version>1.0.0</version>
</dependency>
```
### Gradle
```plaintext
repositories {
maven {
name "shadowfacts"
url "http://mvn.rx14.co.uk/shadowfacts/"
}
}
dependencies {
compile group: "net.shadowfacts", name: "Mirror", version: "1.0.0"
}
```
## Usage
A couple of simple examples for getting started with Mirror.
For more complex examples of everything possible with Mirror, you can look at the [unit tests][tests].
### General Overview
The `Mirror.of` methods are used to retrieve mirrors on which operations can be performed. The types of mirrors are:
- [`MirrorClass`][class]
- [`MirrorEnum`][enum]
- [`MirrorConstructor`][constructor]
- [`MirrorMethod`][method]
- [`MirrorField`][field]
The `Mirror.ofAll` methods are used to create mirror stream wrappers for a given stream/collection/array of reflection objects or mirrors.
These examples will use the following classes:
```java
public class Test {
public static String name = "Mirror";
public static String author;
public static String reverse(String str) {
return new StringBuilder(str).reverse().toString();
}
}
public class Test2 {
public static String name = "Test 2";
public static void doSomething() {
}
}
```
### Getting Fields
```java
// get the field
Optional<MirrorField> optional = Mirror.of(Test.class).field("name");
// unwrap the optional
MirrorField field = optional.get();
// get the value of the field
// we pass null as the instance because the field is static
field.get(null); // "Mirror"
```
### Setting Fields
```java
// get the field
Optional<MirrorField> optional = Mirror.of(Test.class).field("author");
// unwrap the optional
MirrorField field = optional.get();
// set the value of the field
// we once again pass null as the instance because the field is static
field.set(null, "Shadowfacts");
```
### Invoking Methods
```java
// get the method using the name and the types of the arguments it accepts
Optional<MirrorMethod> optional = Mirror.of(Test.class).method("reverse", String.class);
// unwrap the optional
MirrorMethod method = optional.get();
// invoke the method
method.invoke(null, "Mirror"); // "rorriM";
```
### Class Streams
```java
Mirror.ofAllUnwrapped(Test.class, Test2.class) // create the stream of classes
.unwrap() // map the MirrorClasses to their Java versions
.toArray(); // [Test.class, Test2.class]
```
### Field Streams
```java
Mirror.ofAllUnwrapped(Test.class, Test2.class) // create the stream of classes
.flatMapToFields() // flat map the classes to their fields
.get(null) // get the value of the fields on null
.toArray(); // ["Mirror", "Shadowfacts", "Tesst 2"]
```
### Method Streams
```java
Mirror.ofAllUnwrapped(Test.class, Test2.class) // create the stream of classes
.flatMapToMethods() // flat map the classes to their methods
.filter(m -> Arrays.equals(m.parameterTypes(), new MirrorClass<?>[]{Mirror.of(String.class)})) // filter the methods by which accept only a String
.invoke(null, "Shadowfacts") // invoke them all on nothing, passing in "Shadowfacts"
.toArray(); // ["stcafwodahS"]
```
[reflection]: https://en.wikipedia.org/wiki/Reflection_(computer_programming)
[source]: https://github.com/shadowfacts/Mirror/
[docs]: https://shadowfacts.net/Mirror/
[maven]: http://mvn.rx14.co.uk/shadowfacts/net/shadowfacts/Mirror
[tests]: https://github.com/shadowfacts/Mirror/tree/master/src/test/java/net/shadowfacts/mirror
[class]: https://shadowfacts.net/Mirror/net/shadowfacts/mirror/MirrorClass.html
[enum]: https://shadowfacts.net/Mirror/net/shadowfacts/mirror/MirrorEnum.html
[constructor]: https://shadowfacts.net/Mirror/net/shadowfacts/mirror/MirrorConstructor.html
[method]: https://shadowfacts.net/Mirror/net/shadowfacts/mirror/MirrorMethod.html
[field]: https://shadowfacts.net/Mirror/net/shadowfacts/mirror/MirrorField.html

View File

@ -0,0 +1,40 @@
```
title = "Kotlin and Minecraft Forge"
tags = ["minecraft"]
date = "2016-08-06 16:45:30 -0400"
old_permalink = ["/forge/2016/08/06/kotlin-and-forge/", "/minecraft/2016/kotlin-and-minecraft-forge/"]
short_desc = "So, you wanna use Kotlin in your Forge mod? Well there's good news, I've just released Forgelin, a fork of Emberwalker's Forgelin, a library that provides utilities for using Kotlin with Minecraft/Forge. "
```
So, you wanna use [Kotlin][] in your Forge mod? Well there's good news, I've just released [Forgelin][], a fork of [Emberwalker's Forgelin][EWForgelin], a library that provides utilities for using Kotlin with Minecraft/Forge.
Forgelin provides a Kotlin langauge adapter that allows your main-mod class to be a [`object`][KotlinObject]. In order to use the language adapter, you must specify the `modLanguageAdapter` property in your `@Mod` annotation to be `net.shadowfacts.forgelin.KotlinAdapter`.
<!-- excerpt-end -->
Additionally, Forgelin repackages the Kotlin standard library, reflect library, and runtime so that you don't have to, and so that end users don't have to download the 3 megabytes of Kotlin libraries multiple times.
~~Additionally, Forgelin provides a number of [extensions][KotlinExtensions] (which are viewable [here][ExtensionsList]) for working with Minecraft/Forge.~~
~~While you can shade Forgelin, it is not recommended to do so. It will increase your jar size by approximately 3 megabytes (as Forgelin itself includes the entire Kotlin, standard lib, reflect lib, and runtime) and may cause issues with other mods that shade Kotlin or Forgelin. It is recommended that you have your users download Forgelin from [CurseForge][].~~
**Update Feb 17, 2017:**
1. As of Forgelin 1.1.0, the extensions have been moved from Forgelin to [ShadowMC][].
2. As of Forgelin 1.3.0, Forgelin includes an `@Mod` annotated object. This means:
1. **Forgelin can no longer be shaded.**
2. `required-after:forgelin;` can now be used in the `dependencies` field of your `@Mod` annotation for a nicer error message when Forgelin isn't installed.
A bare-bones example mod using Forgelin is available [here][example].
[Kotlin]: https://kotlinlang.org/
[Forgelin]: https://github.com/shadowfacts/Forgelin
[EWForgelin]: https://github.com/Emberwalker/Forgelin
[KotlinObject]: https://kotlinlang.org/docs/reference/object-declarations.html
[KotlinExtensions]: https://kotlinlang.org/docs/reference/extensions.html
[ExtensionsList]: https://github.com/shadowfacts/Forgelin/tree/master/src/main/kotlin/net/shadowfacts/forgelin/extensions
[ShadowMC]: https://github.com/shadowfacts/ShadowMC/tree/1.11.2/src/main/kotlin/net/shadowfacts/forgelin/extensions
[CurseForge]: https://minecraft.curseforge.com/projects/shadowfacts-forgelin
[example]: https://github.com/shadowfacts/ForgelinExample

View File

@ -0,0 +1,32 @@
```
title = "The Great Redesign"
tags = ["meta"]
date = "2016-08-07 15:39:48 -0400"
old_permalink = ["/meta/2016/08/07/the-great-redesign/", "/meta/2016/the-great-redesign/"]
short_desc = "Welcome to the fourth iteration of my website."
```
Welcome to the fourth iteration of my website. I'm still using Jekyll, however I've rewritten most of the styles from scratch. This theme is based on the [Hacker theme][HackerHexo] for [Hexo][] which is turn based on the [Hacker WordPress theme][HackerWP] but it has some notable differences.
<!-- excerpt-end -->
### 1\. It's built for Jekyll.
Because Jekyll (and more specifically, GitHub Pages) uses Sass instead of [Styl][] like Hacker, all of the styles had to be rewritten from scratch in SCSS. Most of the original [Minima][] styles were scrapped, except for a couple of code styling details and the footer design.
### 2\. It has a dark theme
This is accomplished storing the current them (`dark` or `light`) in a cookie, reading it in the head, and writing a `<link>` element based on the the value of the theme. All the styles are stored in `_sass/theme.scss` and the `css/light.scss` and `css/dark.scss` files store the variable definitions for all the colors used in the theme. Jekyll then compiles the two main SCSS files into two CSS files that each contain [Normalize.css][Normalize], the theme (compiled from the variable definitions), and the [Darcula][RougeDarcula] syntax highlighting theme.
While this does increase the load time and isn't best practice, I think providing the option of a dark theme (especially when the deafult theme is incredibly light (the majority of the page is pure white (ooh, tripple nested parentheses))) outweights the cost. Besides, when testing locally the entire script loading and executiononly cost 5 miliseconds, completely unnoticable.
The selector in the third column of the footer simply updates the cookie value based on the checkbox status and reloads the page via `window.location.reload()` triggering the changed theme CSS to be loaded.
[HackerHexo]: https://github.com/CodeDaraW/Hacker
[Hexo]: https://hexo.io/
[HackerWP]: https://wordpress.org/themes/hacker/
[Styl]: https://github.com/tj/styl
[Minima]: https://github.com/jekyll/minima
[Normalize]: https://necolas.github.io/normalize.css/
[RougeDarcula]: https://github.com/shadowfacts/RougeDarcula

View File

@ -0,0 +1,116 @@
```
title = "Type: A FOSS clone of typing.io"
tags = ["misc"]
date = "2016-10-08 17:29:42 -0400"
old_permalink = ["/misc/2016/10/08/type/", "/misc/2016/type/", "/2016/type-a-foss-clone-of-typing-io/"]
short_desc = "I made an awesome FOSS clone of typing.io that you can check out at type.shadowfacts.net."
slug = "type"
```
**TL;DR**: I made an awesome FOSS clone of [typing.io](https://typing.io) that you can check out at [type.shadowfacts.net](https://type.shadowfacts.net) and the source of which you can see [here](https://github.com/shadowfacts/type).
I've used [typing.io](https://typing.io) on and off for almost a year now, usually when I'm bored and have nothing else to do. Unfortunately, I recently completed the Java file, the C++ file, and the JavaScript file (that last one took too much time, jQuery has weird coding formatting standards, IMO) meaning I've completed pretty much everything that interests me. Now if you want to upload your own code to type, you have to pay $9.99 _a month_, which, frankly, is ridiculous. $10 a month to be able to upload code to a website only to have more than the 17 default files (one for each langauge) when I could build my own clone.
<!-- excerpt-end -->
This is my fourth attempt at building a clone of typing.io, and the first one that's actually been successful. (The first, second, and third all failed because I was trying to make a syntax highlighting engine work with too much custom code.)
Type uses [CodeMirror](https://codemirror.net/), a fantastic (and very well documented) code editor which handles [syntax highlighting](#syntax-highlighting), [themes](#themes), [cursor handling](#cursor-handling), and [input](#input).
## Input
Input was one of the first things I worked on. (I wanted to get the very basics working before I got cought up in minor details.) CodeMirorr's normal input method doesn't work for me, because in Type, all the text is in the editor beforehand and the user doesn't actually type it out. The CodeMirror instance is set to `readOnly` mode, making entering or removing text impossible. This is all well and good, but how can you practice typing if you can't type? Well, you don't actually type. The DOM `keypress` and `keydown` events are used to handle character input and handle special key input (return, backspace, tab, and escape) respectively.
The `keypress` event handler simply moves the cursor one character and marks the typed character as completed. If the character the user typed isn't the character that's in the document they are typing, a CodeMirror [TextMarker](http://codemirror.net/doc/manual.html#markText) with the `invalid` class will be used to display a red error-highlight to the user. These marks are then stored in a 2-dimensional array which is used to check if the user has actully completed the file.
The `keydown` event is used for handling special key pressed namely, return, backspace, delete, and escape.
When handling a return press, the code first checks if the user has completed the current line (This is a little bit more complicated than checking if the cursor position is at the end of the line, because Type allows you to skip typing whitespace at the beggining and end of lines because every IDE/editor under the sun handles that for you). Then, the editor moves the cursor to the beggining of the next line (see the previous parenthetical).
Backspace handling works much the same way, checking if the user is at the begging of the line, and if so, moving to the end of the previous line, or otherwise moving back 1 character. Delete also has a bit of extra functionality specific to Type. Whenever you press delete and the previous character was marked as invalid, the invalid marks needs to A) be cleared from the CodeMirror document and B) removed from the 2D array of invalid marks that's used for completion checking.
The tab key requires special handling because it's not entered as a normal character and therefore special checking has to be done to see if the next character is a tab character. Type doesn't handling using the tab key with space-indentation like most editors/IDEs because most places where you'd encounter significant amounts of whitespace in the middle of a line, it's a tab character used to line up text across multiple lines.
Escape is handled fairly simply. When escape is pressed and the editor is focused, a global `focus` variable is toggled, causing all other input-handling to be disabled, a **`Paused`** label is added/removed in the top right of the top bar, and lastly the `paused` class is toggled on the page, which, when active, gives the editor 50% opacity, giving it a nice effect that clearly indicates the paused state.
## Cursor Handling
Preventing the cursor movement (something you obviously don't want for a typing practice thing) that's still possible, even in CodeMirror's read only mode, is accomplished simply by adding an event listener on the CodeMirror `mousedown` event and calling `preventDefault` on the event to prevent the editor's default behavior from taking place and calls the `focus` method on the editor instance, focusing it and un-pauses it if it was previously paused.
## Syntax Highlighting
Syntax highlighting is handled completely using CodeMirror's [modes](http://codemirror.net/mode/index.html), so Type supports* everything that CodeMirror does. By default, Type will try to automatically detect a language mode to use based on the file's extension, falling back to plain-text if a mode can't be found. This is accomplished by searching the (ridiculously large and manually written) [langauges map](https://github.com/shadowfacts/type/blob/master/js/languages.js) that stores A) the JS CodeMirror mode file to load, B) the MIME type to pass to CodeMirror, and C) the extensions for that file-type (based on GitHub [linguis](https://github.com/github/linguist) data). Yes, I spent far too long manually writing that file when I probably could have [automated](https://xkcd.com/1319/) it. The script for the mode is then loaded using `jQuery.getScript`and the code, along with the MIME type, and a couple other things, are passed into `CodeMirror.fromTextArea`.
\* Technically it does, however only a subset of those languages can actually be used because they seem common enough** to warrant being manually added to the languages map.
** I say "common" but [Brainfuck](https://github.com/shadowfacts/type/blob/master/js/languages.js#L2) and [FORTRAN](https://github.com/shadowfacts/type/blob/master/js/languages.js#L142) aren't really common, I just added them for shits and giggles.
## Themes
Themes are handled fairly similarly to syntax highlighting. There's a massive `<select>` dropdown which contains all the options for the [CodeMirror themes](https://github.com/codemirror/CodeMirror/tree/master/theme). When the dropdown is changed, the stylesheet for the selected theme is loaded and the `setTheme` function is called on the editor.
## Chunks
Chunks were the ultimate solution to a problem I ran into fairly early on when I was testing Type. Due to the way Type handles showing which parts of the file haven't been completed (having a single `TextMarker` going from the cursor to the end of the file and updating it when the cursor moves), performance suffers a lot for large files because of the massive amount of DOM updates and re-renders when typing quickly. The solution I came up with was splitting each file up into more managable chunks (50 lines, at most) which can more quickly be re-rendered by the browser. Alas, this isn't a perfect solution because CodeMirror's lexer can sometimes break with chunks (see [Fixing Syntax Highlighting](#fixing-syntax-highlighting)) , but it's the best solution I've come up with so far.
## Storage
One of the restrictions I imposed on myself for this project (mostly because I didn't want to pay for a server) was that Type's functionality had to be 100% client-side only. There are two primary things that result from this 1) Type is account-less and 2) therefore everything (progress, current files, theme, etc.) have to be stored client-side.
I decided to use Mozilla's [localForage](https://github.com/localForage/localForage) simply because I remembered it when I had to implement storage stuff. (If you don't know, localForage is a JS wrapper around IndexedDB and/or WebSQL with a fallback to localStorage which makes client-side persistence much nicer.)
Basic overview of what Type stores:
<pre>
root
|
+-->theme
|
+-->owner/repo/branch
|
+-->path/to/file
|
+-->chunk
|
+-->chunks array
|
+-->0
|
+-->cursor
| |
| +-->line
| |
| +-->ch
|
+-->elapsedTime
|
+-->invalids array
|
+-->invalids array
|
+-->0
|
+-->line
|
+-->ch
</pre>
If you want to see actually what Type stores, feel free to take a look in the Indexed DB section of the Application tab of the Chrome web inspector (or the appropriate section of your favorite browser).
## WPM Tracking
WPM tracking takes place primarily in the [`updateWPM`](https://github.com/shadowfacts/type/blob/master/js/type.js#L561) function which is called every time the user presses return to move to the next line. `updateWPM` does a number of things.
1. If the editor is focused, it updates the elapsed time.
2. It gets the total number of words in the chunk. This is done by splitting the document text with a regex that matches A) any whitespace character B) a comma C) a period and getting the length of the resulting array.
3. Getting the total number of minutes from the elapsed time (which is stored in miliseconds).
4. The WPM indicator is updated (# of words / # of minutes).
# What's Next
Right now, Type is reasonably complete. It's in a perfectly useable state, but there are still more things I want to do.
## Fixing Syntax Highlighting
Because of the chunking system, in some cases syntax highlighting is utterly broken because a key thing that the lexer needs to understand what the code is isn't present because it's in the previous chunk. One relatively common example of this is block-comments. If a block-comment begins in one chunk but terminates in a later chunk, the text that's inside the comment but in a different chunk than the starting mark has completely invalid highlighting because the lexer has no idea it's a comment.
## Skipping Comments
This is a really, really nice feature that typing.io has which is that as you're typing, the cursor will completely skip over comments, both on their own lines and on the ends of other lines. This should be possible, I just need to hook into CodeMirror's syntax highlighting code and find out if the next thing that should be typed is marked as a comment and if so, skip it.
## Polishing
If you've looked at the site, you can tell it's fairly unpolished. It's got almost no styling and is fairly unintuitive. It'll probably remain minimalistic, but it'd be nice to have unified design/theme across the entire site.
## Typo Heatmap
This is a feature in the premium version of typing.io that I'd like to add to Type. It shows a heat map of all the keys you make errors on. The only thing that's preventing me from working on this currently is it would require manually writing a massive data file containing all the locations of all the keys and which characters correspond to which keys, something I don't want to do after spending too much time manually writing the [language map](https://github.com/shadowfacts/type/blob/master/js/languages.js).

View File

@ -0,0 +1,23 @@
```
title = "The Pretty Good Minor Update"
tags = ["meta"]
date = "2017-02-17 14:30:42 -0400"
old_permalink = ["/meta/2017/02/17/the-pretty-good-minor-update/", "/meta/2017/the-pretty-good-minor-update/"]
short_desc = "It's been about six months since the last time I redesigned the site, and while I didn't want to redesign it yet again, I felt it could use a little update to make sure everything's still good."
slug = "the-pretty-good-minor-update"
```
It's been about six months since the last time I redesigned the site, and while I didn't want to redesign it yet again, I felt it could use a little update to make sure everything's still good.
<!-- excerpt-end -->
After reading this [blog post](http://jacquesmattheij.com/the-fastest-blog-in-the-world) about optimizing sites (specifically using a static site generator, like this one does) that was posted on HN, I got to thinking about optimizing my site. I tested my site on Google's [PageSpeed Insights](https://developers.google.com/speed/pagespeed/insights) and got a mediocre score (I don't recall the exact number, but it was in the 70s or 80s). I haven't gone anywhere near as all-out with the optimization as the blog post described, but I'll still go over the couple things I did do:
- Removing custom fonts. The only custom font I previously used was [Hack](https://github.com/chrissimpkins/Hack) for code blocks, so removing that shaved off several extra requests and quite a bit of time without changing much.
- Replace [js-cookie](https://github.com/js-cookie/js-cookie) with a [couple functions](https://github.com/shadowfacts/shadowfacts.github.io/blob/master/_includes/head.html#L17-L34) included in the inline script, saving ~2 kilobytes and an additional HTTP request.
- [CloudFlare](https://www.cloudflare.com) provides a number of optimizations (caching and HTML/CSS minification).
Additionally, there's now a fallback `<noscript>` tag so that if the viewer has JavaScript disabled, the site will still look normal (JavaScript being disabled does mean the theme can't be changed, so the user will always see the light theme). And lastly, there's now a custom 404 page so if you end up at the wrong URL, you'll see something nicer than the default 404 page for GitHub Pages.

View File

@ -0,0 +1,43 @@
```
title = "Comments Powered by GitHub"
tags = ["meta"]
date = "2017-04-23 09:05:42 -0400"
old_permalink = ["/meta/2017/04/23/comments-powered-by-github/", "/meta/2017/comments-powered-by-git-hub/"]
short_desc = "I built a way of commenting on my static website using GitHub to store comments."
slug = "comments-powered-by-git-hub"
```
**NOTE:** This article has been superseded by the [ActivityPub comments system](/2019/reincarnation/#activity-pub).
After seeing [this article][orig] the other morning about replacing the Disqus comments on a blog powered by a static site generator (like this one) with comments backed by a GitHub issue and some front-end JavaScript to load and display them, I thought it would be fun to implement something similar. First I only built the code for displaying comments, similar to the aforementioned article, but I decided to take it one step further by allowing users to submit comments directly from my site.
<!-- excerpt-end -->
You might be wondering, *Why even use weird, front-end comments like this when you could just use a database on the backend and generate the page dynamically?* Well, there are a couple reasons:
Firstly, it's a lot simpler to code (I don't have to handle any of the backend stuff like storage/moderation tools/etc.)
Secondly, and more important, my site can remain entirely static. This second reason was the key factor for me. Being static allows my site to be hosted for free on [GitHub Pages](https://pages.github.com/) so I don't have to handle any of the server-side stuff myself. It also makes the site ridiculously fast. A draft of this post has all the content and styles loaded within ~150ms and has the comments loaded after ~300ms.
So, how did I implement it? Well the first part is fairly simple and based on the [original article][orig]. It simply sends a request to the GitHub API [endpoint](https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue), parses the resulting JSON, generates some HTML, and injects it back into the page.
The second part is a bit more complicated, as it handles authentication with the GitHub API and posting comments directly from my site. Since this is a fair bit more complicated with several possible paths to the desired behavior, I'll go through this (twice, the reasoning for which will become clear soon) as would actually happen:
1. The user enters some comment into the textarea and clicks the submit button. At this point, since the user's never submitted a comment via the website before, we need to authorize with GitHub before we can submit.
2. When the user clicks the submit button, the forms submits to a separate backend helper application that handles the OAuth authorization flow.
3. The server app then temporarily stores the submitted comment with a random ID and redirects the user to the GitHub authorization page where the user grants access to their account.
4. From there, GitHub redirects back to the helper app with the same random ID and an OAuth code.
5. The helper app then sends a request to GitHub with the OAuth code, client ID, and client secret (this is why the helper is necessary, to keep the secret secret) getting an authorization token in response.
6. The helper app uses the random ID to retrieve the comment being submitted and the URL being submitted from, redirecting the user back to the original URL with the comment and auth token in the URL hash.
7. The client loads the comment and auth token from the hash and the clears the hash.
8. The auth token is stored in a cookie for future use.
9. Finally, the client then sends a POST request to the GitHub API [endpoint](https://developer.github.com/v3/issues/comments/#create-a-comment) with the comment, issue ID, and the token to submit the comment.
This is the flow for when the client's never submitted a comment before, but as stated in step 8, the auth token is cached on the client, making things simpler the next time someone wants to submit a comment. When the comment submit button is pressed and there is an auth token cached, we simply cancel the form submission and send the POST request to GitHub, submitting the comment.
All of the code for this is open source. The front-end JS is available [here](https://github.com/shadowfacts/shadowfacts.github.io/blob/master/js/comments.js) and the backend GitHub API helper is [here](https://github.com/shadowfacts/gh-comment-poster).
And that's it! So, do you like this system? Hate it? Have suggestions for how it could be improved? Well now you can leave a comment.
[orig]: http://donw.io/post/github-comments/

View File

@ -0,0 +1,89 @@
```
title = "Reincarnation"
tags = ["meta"]
date = "2019-09-18 10:34:42 -0400"
short_desc = "Stand by for reincarnation."
old_permalink = "/meta/2019/reincarnation/"
use_old_permalink_for_comments = true
```
<figure>
<img src="/2019/reincarnation/galactic_entity.png" alt="Futurama Galactic Entity image" />
<figcaption>A wise man once said that nothing really dies, it just comes back in a new form. Then he died.</figcaption>
</figure>
Stand by for ***reincarnation***.
Welcome to the Version Five of my website. Quite a bit has changed, so let's go over it.
<!-- excerpt-end -->
## New Theme
As you can see (at least, if you're reading this online), the theme for my website has been redesigned from the ground up. Unlike the [previous version](/meta/2016/the-great-redesign/), which was based on someone else's theme, this design is entirely my own. The design is, in no small part, inspired by [Brutalist Web Design](https://www.brutalist-web.design/)[^1]. It doesn't look super shiny because it shouldn't; it is designed, first and foremost, to be pleasant to read. There is a grand total of one (1) fancy animation used, and the overall style is pretty minimal, placing emphasis on the content above all else.
[^1]: Also excellent is the [Better Motherfucking Website](http://bettermotherfuckingwebsite.com/).
The main decorative choice I made (which is really halfway between brutalist and decorative) is the Markdown decorations. If you're reading this post on the web, you'll see a couple of things in the formatting of the body text are a bit unusual. The text of links is surrounded by square brackets and link destination is shown in parentheses, bold and italic text is wrapped in double asterisks and underlines respectively, headings have pound signs in front of them, etc. The goal of these styles is to replicate (to a certain extent) what the original, Markdown-formatted source of the blog posts looks like. With the exception of the heading pound signs, they're entirely implemented in CSS using pseudo-elements, which has the added benefit that they're not included in text highlighted/copied on the webpage. The pound signs next to subheadings in posts are the one exception to this and are actual tags because they serve a functional purpose: they provide jump links to the different subsections of posts. These are decorations and serve no functional purpose, but I still believe they're brutalist because they reflect the raw content of the Markdown before it's been processed. I think the decorations look nice and add some visual distinction to the design without being ostentatious or detracting from the experience of reading posts.
As for reading posts, I spent probably more time than I should have reading [_Practical Typography_](https://practicaltypography.com/) and fiddling with the fonts, trying to make the typography as pleasant as possible for reading articles. While I appreciate the author's point about not using [system fonts](https://practicaltypography.com/system-fonts.html), for my own personal blog, I care more about speed and page weight than how unique the typography is. After much fiddling, I settled on Charter for the main body text and Avenir for the headings as well as the text in the header, footer, and comments sections. I chose them because they had the highest grade of any of the system fonts, and I think they look pretty good (both separately and together). Using a serif font for the body text, I find to be a much easier on the eyes compared to the sans serif font used by the previous design of this blog. A sans serif font for what I call the "UI" elements of the page provides a nice contrast with the body text. Code (both blocks and inline) uses Apple's [SF Mono](https://developer.apple.com/fonts/) if it's installed (because it's my personal favorite) and falls back on the browser's default monospace font, because I find most system monospace fonts either ugly or too light weight.
Regarding color schemes, there are still light and dark variations. But, as you'll notice if you look at the theme selector at the bottom of the page, there's now a new automatic theme (which is also the default). It uses the [`prefers-color-scheme`](https://developer.mozilla.org/en-US/docs/Web/CSS/@media/prefers-color-scheme) media query to support dynamically adapting to the user's preferred color scheme across their operating system. For users on operating systems or browsers that don't support the media query, light mode is used by default as before. Also, the new color scheme used for code blocks is the Atom One color scheme for [Highlight.js](https://highlightjs.org/).
## New Features
There are a couple new features, in addition to the new automatic theme-switching. There are comments (discussed more [below](#activity-pub)). In addition to the [main RSS feed](/feed.xml), there are now feeds specifically for the individual categories (e.g., the [feed](/meta/feed.xml) for the meta category). There's also an estimated reading time counter shown in the post metadata (it's quite simple, it calculates the time by taking the word count divided by an average reading speed of 225 WPM[^2]).
[^2]: My sources for that number are pretty bad. It's based on the reading time things from various other blogging engines. If you have better sources, let me know.
The site remains almost entirely JavaScript free. There are two places where client-side JS is used: the theme switcher and the comments section. There are ways to implement the theme switcher without client-side scripts, but I don't believe it's worth the trade off. The automatic theme is the default, and it's what those with JS entirely disabled will get.
(There are also footnotes, which, so far at least, I'm getting a lot of mileage out of.)
## The Backend
The previous version of my website used [Jekyll](https://jekyllrb.com/) (and WordPress before that, and Jekyll again before that). In what may become a pattern, I've once more switched away from Jekyll. Version Four uses something completely custom. It has been a work-in-progress in one form or another for about a year now. It started out as a Node.js project that was going to be a general-purpose static site generator. Then, around the time I was learning Elixir (which I love, and will be the subject of another blog post), I attempted to rewrite it in that[^3]. Then we finally arrive at the current iteration of the current iteration of my website. In spite of my distaste for the ecosystem[^4], I returned to Node.js. This time, however, the project took a bit of a different direction than the previous two attempts at a rewrite. It has two main parts: the static site generator and the ActivityPub integration.
[^3]: Unfortunately, this attempt ran into some issues fairly quickly. Elixir itself is wonderful, but the package ecosystem for web-related things such as Sass, Markdown rendering, and syntax highlighting, is lackluster.
[^4]: The `package.json` for the project explicitly lists 30 dependencies, 13 of which are TypeScript type definitions. There are 311 packages in my `node_modules` folder. Enough said.
### Static Site Generator
The static site generator is by far the most important piece. Without it, there would be no website. I once again went with an SSG for a couple reasons, starting and ending with performance. When it comes down to it, nothing is generated at request time. Everything exists as static files on disk that are generated when the service starts up. The basic architecture isn't all that special: there are posts written in Markdown, gathered into various collections, rendered to HTML using various page layouts, and then gathered together in various indexes (the main index, category-specific ones, and RSS feeds).
The current iteration, however, was a bit different in its conception. Instead of attempting to be a general purpose static site generator that anyone could pick up and use, it is designed to be completely specific to my needs and the requirements for this blog. The different collections of posts (blog posts, the different tutorial series) are hardcoded in. The Markdown pipeline used to render posts to HTML is hardcoded, and contains some custom extensions for the Markdown decorations used. The list of which files are copied verbatim is hardcoded. You get the idea.
I've toyed with the idea of refactoring out all of the custom code I'm using to generate the blog (as well as the ActvityPub integration) into a separate library which other people could use for their own blogs. It's not entirely off the table, but if it does happen, it won't be soon. For now, I'm perfectly content with the functionality my pile of hardcoded, janky, custom code provides me. Besides, this project has been ongoing for more than a year, and I don't need it to drag on any further.
### ActivityPub
It may be mostly static (and could be used entirely statically), but there's one big difference: it runs a web server that's responsible for serving the static files and for handling the bit that actually needs to be dynamic: the ActivityPub integration. (There's another blog post coming at some point about resources I found helpful in getting AP actually working with other platforms.)
If you haven't heard of it before, [ActivityPub](https://activitypub.rocks) is the protocol that underpins the 'fediverse,' a network of federated software projects. Most of the current projects are aimed at being replacements for traditional social networks: [Mastodon](https://joinmastodon.org) and [Pleroma](https://pleroma.social) are microblogging implementations similar to Twitter, [Pixelfed](https://pixelfed.org) is a photo sharing platform similar to Instagram, and [PeerTube](https://joinpeertube.org/) is a video hosting service like YouTube. There are also blog engines that federate using ActivityPub, including [Plume](https://joinplu.me) and [WriteFreely](https://writefreely.org), which were the inspiration for this project.
Could I have used Plume or WriteFreely instead of rolling my own custom solution? Yeah, probably. But I have no doubt I would have run into substantial problems along the way. (Off the top of my head, probably ones related to having separate collections for tutorial series as well as the Markdown decorations.)
The current ActivityPub is pretty bare bones. It implements the minimum necessary to allow: 1) looking up posts, 2) following the blog from other AP-implementing services, and 3) commenting on posts. You may be able to do other things with my posts from other AP services (e.g., favoriting and reblogging), but the code on my end doesn't know or care. In fact, only handles a whopping four types of incoming activities: Follow, Create (only for Note objects), Delete (also only for Notes), and Undo (only for Follows). This may be expanded in the future, but for now I'm content with the functionality it provides.
The way I've set up comments to work here is pretty simple. The underlying ActivityPub objects are stored in the database, and whenever someone loads the page and expands the comment section, some client-side JavaScript is triggered. It sends a request to the back end to retrieve all the comments for the given post from the database, then turns them into a tree, renders them to HTML, and injects them into the page. This does mean that viewing comments requires JavaScript, but the other option would have been to have request pages be generated upon each request, making them no longer static pages. Given the trade offs, I opted to keep the article pages static and retain the performance benefits that brings. I think the compromise is worth it; most people have JavaScript enabled and those who don't probably aren't looking at the comments.
If you want to comment on any of my blog posts, just copy and paste the URL into the search field of your client and hit the reply button!
### Technical Details
If you're interested, here are some of the technical details about how the back end is implemented.
All of the backend stuff is written in Node.js and TypeScript. The SSG piece uses Markdown (with some extensions) for rendering posts, Sass (specifically SCSS) for the styles, and EJS for the templates. The ActivityPub integration uses Postgres with TypeORM to store remote actors and comments. And the web server itself is Express.js.
When the program starts, the static site generation is performed before the web server is started to ensure that broken or outdated files aren't served. First, some files are copied over verbatim (such as favicons and the client-side JS for loading comments), the CSS files for the individual themes are compiled from the Sass files, and the error pages are generated. Then, tutorials and blog posts are generated by scanning through the respective directories and rendering any Markdown files. Finally, the home page, category index pages, and RSS feeds are generated.
That's it for the generation, but before the web server starts, the ActivityPub module takes all of the posts, checks if there are any new ones, and, if so, adds them to the database and federates them out to all the remote actors following the blog. Then, all the web server routes get set up and the server finally starts.
By the way, the source code for the generator, ActivityPub integration, and content is self is all visible [on my Gitea](https://git.shadowfacts.net/shadowfacts/shadowfacts.net). (Note: the source for the generator and AP integration (the `lib` directory) is open source whereas the contents of my website (the `site` directory) is only _visible_ source.)
## Conclusion
A lot of stuff has technical, under the hood stuff changes, and I'd like to think that I haven't wasted my time and that I'll actually use this new version of my blog to publish posts. But, I don't know what will happen and I can't make any promises. I have some drafts of posts that I'd like to finish and finally publish, so stay tuned (you can subscribe on [RSS](/feed.xml) or by following `@blog@shadowfacts.net` on your favorite ActivityPub platform). As for the Minecraft modding tutorial series, those have been discontinued. They remain available for posterity, but they haven't been updated (merely transplanted into the new blog), and I don't currently have any plans to write new ones.

View File

@ -0,0 +1,42 @@
```
title = "ActivityPub Resources"
tags = ["activitypub"]
date = "2019-09-22 17:50:42 -0400"
short_desc = "A compilation of resources I found useful in learning/implementing ActivityPub."
old_permalink = "/activitypub/2019/activity-pub-resources/"
use_old_permalink_for_comments = true
slug = "activity-pub-resources"
```
This isn't really going to be a blog most, but more of a collection of tidbits and resources I found helpful in implenting the [ActivityPub integration](/meta/2019/reincarnation/#activity-pub) for the new version of my blog.
This post was last updated on Oct 10, 2019.
<!-- excerpt-end -->
### Specs
- The [ActivityStreams 2.0 spec](https://www.w3.org/TR/activitystreams-core/) is important, as it's what ActivityPub is built on top of.
- Similarly, the [AS 2.0 Vocabulary](https://www.w3.org/TR/activitystreams-vocabulary/) defines all the objects and activities that AP actually uses in practice (and many more that it doesn't).
- The [ActivityPub spec](https://www.w3.org/TR/activitypub/) itself is quite useful, despite its many omissions.
- There's also [LitePub](https://litepub.social/litepub/), which has some extensions to AP.
- The least useful by far spec is [JSON-LD](https://www.w3.org/TR/json-ld/) which defines how to use JSON to represent linked data and objects graphs. AS2 and AP are both built on this, but if you're going for a simple implementation (or even a complex one), you can entirely ignore this and treat JSON-LD as plain old JSON objects.
[This](https://tinysubversions.com/notes/reading-activitypub/) is also a helpful resource about how to go about reading the AP specification.
### Actually Federating
- Gargron's blog posts on [implementing a basic AP server](https://blog.joinmastodon.org/2018/06/how-to-implement-a-basic-activitypub-server/) and [implementing HTTP signatures](https://blog.joinmastodon.org/2018/07/how-to-make-friends-and-verify-requests/) are good guides for how to actually get federating with other servers in the wild.
- [Lain's blog post](https://blog.soykaf.com/post/activity-pub-in-pleroma/) on some of the weird quirks of how ActivityPub actually gets used.
- [Kaniini's blog post](https://blog.dereferenced.org/federation-what-flows-where-and-why) about how data actually moves through the fediverse.
### Reference Material
- Darius Kazemi has a [simple reference implementation](https://github.com/dariusk/express-activitypub/) of an ActivityPub server written using Node.js.
- I used the [Pleroma source code](https://git.pleroma.social/pleroma/pleroma/) a great deal when working on my implementation, mainly just because I'm familiar with Elixir.
- I'd also like to think [my own implementation](https://git.shadowfacts.net/shadowfacts/shadowfacts.net/src/branch/master/lib/activitypub) is fairly approachable (it's about 700 lines of not-too-complicated TypeScript).
- Ted Unangst has a collection of [sample data](https://jawn.tedunangst.com/a/R526ZQ49MbYt5J4KpR) which is useful for comparing how different implementations represent things in AP.
### Other
- For actually testing federation, [ngrok](https://ngrok.com/) is very useful for testing your implementations against others. It creates a tunnel from your local machine to a public domain with HTTPS already setup. Because your code is still running locally, you have access to all your usual debugging tools and can iterate rapidly.
- Testing against other implementations running locally (be it on your machine or inside a VM/container) lets you access debug logs and see what the other server is actually receiving, which can be quite useful.
- Darius Kazemi also wrote [an application](https://tinysubversions.com/notes/activitypub-tool/) that lets you send ActivityPub objects directly to other servers, which is useful for testing your application against outside data without polluting other people's instances.
- Ted Unangst also has his own [compilation of AP-related links](https://flak.tedunangst.com/post/ActivityPub-as-it-has-been-understood).

View File

@ -0,0 +1,39 @@
```
title = "Learning Elixir"
tags = ["elixir"]
date = "2019-10-10 12:29:42 -0400"
short_desc = "How I learned Elixir and why I love it."
old_permalink = "/elixir/2019/learning-elixir/"
use_old_permalink_for_comments = true
```
About a year ago, I set out to learn the [Elixir](https://elixir-lang.org) programming language. At the time, it was mainly so I could contribute to [Pleroma](https://pleroma.social), but I've since fallen in love with the language.
<!-- excerpt-end -->
To actually learn Elixir, I did a few things. I started by reading through the official [Elixir Guide](https://elixir-lang.org/getting-started/introduction.html), and after that, following along with the [Elixir School](https://elixirschool.com/en/lessons/basics/basics/) lessons. These were useful in giving me a general idea of how the language works.
I strongly believe that the best way to learn a programming language (especially if you already know others) is to just start writing code. [Exercism](https://exercism.io/tracks/elixir) is a resource I've found to be quite useful in that process. It has a sequence of programming challenges that increase in difficulty, so it gives you a good feel for what it's like to use Elixir to actually solve problems and helps shift your brain into thinking about problems in a functional context.
By this point, it was almost December, so I decided I was going to try to do the [Advent of Code](https://adventofcode.com) problems only using Elixir. These challenges were more difficult than the Exercism ones, but they provided the same benefit of letting me get experience actually writing Elixir and solving problems with it an isolated context, without a whole bunch of moving parts.
I knew what I ultimately wanted to do with Elixir was build web apps, so after that I went through the official [Phoenix Guide](https://hexdocs.pm/phoenix/overview.html) which explains the overall architecture of the Phoenix framework and shows you how a bunch of common patterns and techniques for building webapps with it.
Lastly, and most importantly, I actually started building projects using Elixir. The first one I started was [frenzy](https://git.shadowfacts.net/shadowfacts/frenzy), an RSS aggregator I built using Phoenix and Ecto. Originally, the project was a couple hundred lines of shoddily written JS. I wrote it even before I started learning Elixir, inteding it to be a stopgap. As I was learning Elixir, I knew this project was what it was building up to, so as read things and did programming exercises, I noticed things that I thought would become useful once I got around to rewriting frenzy in Elixir.
When learning a language, there's no substitute for actually learning, and this step was by far the most important for me. In addition to all of the algorithmic experience, and general knowledge of how to write Elixir, actually doing this project gave me the _pratical_ knowledge of what it's like to actually work with this language and these tools. If you're interested in learning Elixir (or any programming language, really), my biggest piece of advice is to keep in the back of your head something concrete that you want to build with it.
After having learned Elixir, and continuing to use other languages for other projects, there are few key differences that distinguish Elixir from the other languages I know. They're not just small differences between other programming languages, They're what distinguishes between languages that are 'just another language' and languages that I find truly enjoyable to use.
Firstly, it is a functional language, but not obtusely so. It's not steeped in terms and ideas that sound like you need an advanced math degree to understand. Of the functional programming languages that I've tried to learn, Elixir has been by far the easiest. It uses FP concepts, but in a way that makes it easy to learn coming from an imperative mindset. There's no worrying about monads and functors and combinators. The hardest change is learning to use recursion by default instead of instinctively reaching for imperative constructs like loops. You don't have to think about how specifically to structure your code so that FP concepts fit will with it. You can just write your code in a way that feels normal, and the functional aspects will become second nature.
Secondly, Elixir is _fantastic_ for REPL driven development. It comes with a built-in REPL (`iex`) which lets you quickly test code fragments, recompile your project, and view documentation.
Like [Erlang](https://www.erlang.org/), which it's built on, Elixir runs on the BEAM virtual machine, which provides super-robust code-reloading. During the normal course of development, the only time I ever need to restart the running program is when I change the config file. Other than that, I can make a change or alteration and just run `recompile` from the `iex` session and nearly instantly have the all my latest changes running. Even when there are changes to data types or method signatures or compile-time constants, everything can just get swapped in seamlessly. Compared to the hot-swapping in other environments (nowhere near as good on the JVM, non-existent in many more), this is incredible.
The reason I find this makes such a big difference to the way I code is that lets me speed up my internal development loop immensely. I can very rapidly flip back and forth between writing code and testing it; I never feel like I'm being held up by the tools. I can stay in the flow much longer because there are no [lengthy compile times](https://www.xkcd.com/303/) to let me get distracted or start procrastinating.
Compared to something like iOS app development, this is a godsend. Even in small projects where incremental compiles only take a few seconds, the iteration loop is much slower. My usual development cycle goes something like this: 1) make a change, 2) hit build and run, 3) switch to my browser to glance at social media, 4) 30 seconds later switch to the Simulator and hope it's finished launching. With Elixir projects, I'm generally just switching back and forth between my editor and the terminal and/or web browser to test whatever I'm working on. There are no intermediate steps. When I make a change, there's no waiting for an app to launch, or for a database connection to be established, or for a network request to be made, or for config files to be read, or for anything else. Generally, it takes me more time to switch windows and type `recompile` than it does for the recompilation to actually take place and the change to take effect.
Elixir is a language that I've come to find incredibly valuable. It's very powerful and in the areas where it excels, it's unique characteristics make it an extremely valuable tool. If you're thinking about dipping your toes into functional programming, or want to try something new, or even just spend a lot of time doing back end web development,I encourage you to try Elixir.

View File

@ -0,0 +1,242 @@
```
title = "Building a JavaScript-Free Slide-Over Menu"
tags = ["web"]
date = "2019-11-11 21:08:42 -0400"
short_desc = "Building a slide-over hamburger menu without using JavaScript."
old_permalink = "/web/2019/js-free-hamburger-menu/"
slug = "js-free-hamburger-menu"
use_old_permalink_for_comments = true
```
Slide-over menus on the web are a pretty common design pattern, especially on mobile. Unfortunately, they seem to generally be accompanied by massive, bloated web apps pulling in megabytes of JavaScript for the simplest of functionality. But fear not, even if you're building a JavaScript-free web app, or simply prefer to fail gracefully in the event the user has disabled JavaScript, it's still possible to use this technique by (ab)using HTML form and label elements.
<!-- excerpt-end -->
Now, I could just spew a bunch of code onto the page and give you cursory explanation of what it's doing, but that would be boring, so instead I'm going to walk through the progression of how I built it and the reasons for the changes I made along the way. If that's all you want, you can take a look at the <a href="/2019/js-free-hamburger-menu/final.html" data-link="final.html">final version</a> and View Source to see all the code.
We'll start off with a tiny bit of markup in the body (I'm assuming you can set up an HTML page yourself):
```html
<div id="sidebar-content">
<p>Some sidebar content</p>
</div>
<main>
<p>
<!-- lorem ipsum text, just so we don't have an entirely blank page -->
</p>
</main>
```
We'll also have some basic CSS to start, so looking at our page isn't looking quite so painful.
```css
body {
font-family: sans-serif;
}
main {
position: relative;
max-width: 980px;
margin: 0 auto;
}
```
Then, we'll need the element this whole thing hinges on: a checkbox input. Because of the CSS trick we're using to implement the visibility toggling, the checkbox element needs to be at the same level in the DOM as our `#sidebar-container` element. We're going to use the adjacent sibling selector (`+`), which means that the checkbox input needs to come directly before the sidebar container, but you could also use the general sibling selector (`~`) which would let you put the checkbox anywhere in the DOM given it has the same parent element as the sidebar container.
```html
<input type="checkbox" id="sidebar-visible">
<div id="sidebar-content">
<!-- ... -->
```
The other half of the HTML behavior that we're relying on to make this work without JavaScript is that clicking `<label>` tags that are associated with a checkbox toggles the checkbox, and that the label tag can be anywhere in the document in relation to their associated element. We can also have several label elements controlling the same checkbox, which will let us provide a couple different options to the user for how to close the slide-over menu.
We'll need a couple of labels to start with: one shown next to the content and one that will be inside the menu to dismiss it.
```html
<div id="sidebar-content">
<p>Some sidebar content</p>
<label for="sidebar-visible" class="sidebar-toggle">Close</label>
</div>
<main>
<label for="sidebar-visible" class="sidebar-toggle">Open Sidebar</label>
<!-- ... -->
</main>
```
Now, all we need to start toggling our sidebar is just a few CSS rules:
```css
#sidebar-visible {
display: none;
}
.sidebar-toggle {
cursor: pointer;
}
#sidebar-content {
display: none;
}
#sidebar-visible:checked + #sidebar-content {
display: block;
}
```
The user never needs to see the checkbox itself, since they'll always interact with it through the label elements, so we can always hide it. For a good measure, we'll have our labels use the pointer cursor when they're hovered over, to hint to the user that they can be clicked on. Then we'll hide the sidebar content element by default, since we want it to start out hidden.
The most important rule, and what this whole thing hinges on, is that last selector. We're looking for an element with the ID `sidebar-visible` that matches the `:checked` pseudo-selector (which only applies to checked checkboxes or radio inputs) that _has a sibling_ whose ID is `sidebar-content`. The key is that the element we're actually selecting here is the `#sidebar-content`, not the checkbox itself. We're essentially using the `:checked` pseudo-selector as a predicate, telling the browser that we only want to select the sidebar content element _when our checkbox is checked_.
If we take a look at <a href="/2019/js-free-hamburger-menu/toggling.html" data-link="toggling.html">our web page now</a>, we can see we've got the building blocks in place for our slide-over menu. The page starts off not showing our sidebar content, but we can click the Open Sidebar label to show it, and then click the Close label to hide it once more.
Next, we'll need a bit of CSS to get it looking more like an actual sidebar. To start off, we'll give it a fixed position with all of its edges pinned to the edges of the viewport. We'll also give it a nice high z-index, to make sure it's shown above all of the regular content on our page.
```css
#sidebar-content {
display: none;
position: fixed;
top: 0;
bottom: 0;
left: 0;
right: 0;
z-index: 100;
}
```
This will get our sidebar element positioned correctly, but it's not so pretty. To clean it up a bit, we'll move the sidebar content element inside a new container element. Giving both elements background colors will also provide a visual cue of where the sidebar is in relation to the main content of our page.
```html
<div id="sidebar-container">
<div id="sidebar-content">
<label for="sidebar-visible" class="sidebar-toggle">Close</label>
<p>Some sidebar content</p>
</div>
</div>
```
```css
#sidebar-container {
display: none;
position: fixed;
top: 0;
bottom: 0;
left: 0;
right: 0;
z-index: 100;
background-color: rgba(0, 0, 0, 0.3);
}
#sidebar-visible:checked + #sidebar-container {
display: block;
}
#sidebar-content {
background-color: #eee;
}
```
Note that we've change the rules that we previously applied to `#sidebar-content` to now target the `#sidebar-container` element, since that's now the root element of our sidebar. If we take a look at our page again, now we'll see that displaying the content works correctly and the backgrounds for the various parts of our page are good. But, the sidebar looks more like a topbar. Let's fix that by giving it an explicit size, instead of letting it size itself:
```css
#sidebar-content {
width: 25%;
height: 100vh;
}
```
If you haven't encountered it before the `vh` unit in CSS represents a percentage of the viewport's height, so `100vh` is the height of the viewport and `50vh` would be half the height of the viewport (likewise, there's a `vw` unit representing the viewport's width).
<a href="/2019/js-free-hamburger-menu/background.html" data-link="background.html">Now</a> we're making good progress. Trying out our slide-over menu, one thing that would be nice is the ability to click anywhere _outside_ of the menu to dismiss it, as if we had clicked close instead. We can accomplish that by adding yet another label that's hooked up to our checkbox:
```html
<div id="sidebar-container">
<!-- ... -->
<label for="sidebar-visible" id="sidebar-dismiss"></label>
</div>
```
We'll need to position and size this label so that it covers the entire rest of the page. We could do this manually, specifying the position and sizes of each label, or we could be a bit clever and use Flexbox. First, we'll need to go back and change our sidebar container to be in flexbox mode when it's shown:
```css
#sidebar-visible:checked + #sidebar-container {
display: flex;
flex-direction: row;
}
```
We set the flex direction to row because our sidebar content and the label will layout horizontally across the page, with our content on the left and the dismissal label on the right. We can also go back to our sidebar content styles and remove the height rule. Since we don't specify otherwise, the flex items will expand to fill the container along the axis perpendicular to the flex direction (in this case, that will be the vertical axis), and since the flex container fills the viewport height, so too will the flex items.
```css
#sidebar-content {
background-color: #eee;
width: 25%;
}
```
Making our dismissal label fill the remaining space is then as simple as setting its flex-grow priority to 1 (any number greater than the default of 0, which our content has, will do).
```css
#sidebar-dismiss {
flex-grow: 1;
}
```
On <a href="/2019/js-free-hamburger-menu/dismiss.html" data-link="dismiss.html">our updated page</a>, after opening the slide-over menu, we can click anywhere outside it (in the translucent, darkened area) to dismiss the menu.
The last thing that would be nice to add is a simple transition for when the menu opens or closes. Before we can start adding transitions, we'll need to make a change to our existing CSS. Currently, we're hiding and showing the menu using the display property, switching between none and flex. But that won't work with transitions. Since the browser has no way of knowing how we want to interpolate between the two values, none of the transitions we specify will have any effect because the container will still be shown/hidden instantaneously. Luckily, there's another solution to hiding and showing the container element: the `visibility` property, which _is_ [interpolatable](https://developer.mozilla.org/en-US/docs/Web/CSS/visibility#Interpolation) between `visible` and `hidden`. So, we'll change our container to always be in flexbox mode, but start out being hidden and then become visible when the checkbox is toggled on.
```css
#sidebar-container {
visibility: hidden;
display: flex;
flex-direction: row;
/* ... */
}
#sidebar-visible:checked + #sidebar-container {
visibility: visible;
}
```
Now we've got the exact same behavior as before, but we have the ability to add transitions. Let's start with making the partially opaque background to fade in and out as the menu is shown and hidden. We can accomplish this by moving the background color rule to only apply when the checkbox is checked, and have the container start out with a fully transparent background color. We'll also instruct it to transition both the `visibility` and `background-color` properties.
```css
#sidebar-container {
/* ... */
background-color: transparent;
transition:
visibility 0.35s ease-in-out,
background-color 0.35s ease-in-out;
}
#sidebar-visible:checked + #sidebar-container {
visibility: visible;
background-color: rgba(0, 0, 0, 0.3);
}
```
Now if we try showing and hiding the menu, we can see the semi-translucent gray background fades in and out properly, but the sidebar content itself is still shwoing up immediately, without any transition. Let's actually provide a transition for it now. We'll have it slide on and off the side of the page. To do this, we'll initially set the sidebar's left position to `-100%`, which will put the right edge of the sidebar at the left edge of the screen, leaving the content off-screen. We also need to specify that the content element is positioned relative to itself.
```css
#sidebar-content {
/* ... */
position: relative;
left: -100%;
}
```
Then, when the checkbox is checked, we can reset the left property to 0, bringing it back on-screen:
```css
#sidebar-visible:checked + #sidebar-container > #sidebar-content {
left: 0;
}
```
Lastly, we'll tell the sidebar content element to transition its left property with the same parameters as the background transition:
```css
#sidebar-content {
/* ... */
transition: left 0.35s ease-in-out;
}
```
Now <a href="/2019/js-free-hamburger-menu/transition.html" data-link="transition.html">our menu</a> has a nice transition so it's not quite so jarring when it's shown/hidden.
I've polished it up a little bit more for the <a href="/2019/js-free-hamburger-menu/final.html" data-link="final.html">final version</a>, but the core of the menu is done! And all without a single line of JavaScript.

View File

@ -0,0 +1,131 @@
```
title = "Mocking HTTP Requests for iOS App UI Tests"
tags = ["swift"]
date = "2019-12-22 19:12:42 -0400"
short_desc = "Integrating a tiny web server into your Xcode UI test target to mock HTTP requests."
old_permalink = "/ios/2019/mock-http-ios-ui-testing/"
slug = "mock-http-ios-ui-testing"
use_old_permalink_for_comments = true
```
I recently decided to start writing User Interface tests for [Tusker](https://git.shadowfacts.net/shadowfacts/Tusker), my iOS app for Mastodon and Pleroma. But I couldn't just write tests that interacted with an account on any real instance, as that would be far too unpredictable and mean my tests could have an impact on other people. The solution to this problem is, of course, mocking. The core idea is that instead of interacting with external things, your program interacts with mock versions of them, which appear to be their real counterparts, but don't actually perform any of the operations they claim to. This allows for very tight control over what data the application receives, making it much more amenable to testing.
Unfortunately, if you search around, some of the most common solutions on the internet recommend using the environment variables (one of the only ways of sending data directly from the test to the application under test) to insert the mocked response into the app. Meaning, for every API response you need to mock, you would have an environment variable that contains the response data. This isn't a great solution, because it leaves whole code paths untested (everything after the request URL would be generated). It would also mean that there's no way of testing things like what requests are actually made by your app.
The solution to this problem is to actually run a local HTTP server that functions as the API server. This way, the app can communicate with the web server exactly as it would in the normal operating environment, but still have strictly controlled data. Of course, actually doing this isn't quite so straightforward.
<!-- excerpt-end -->
There are a couple of things to think about when looking for a solution: First, in order to meet the requirement of being able to test what API calls are made, the web server needs to be accessible to the test process. Second, we want to avoid modifications to the app if at all possible. There should be as few differences as possible in the app between the testing and production environments. The more differences, the more code that goes untested and the more potential edge-cases.
For the first requirement, there are a pair of handy open-source libraries that we can use to take care of the grunt work of responding to HTTP requests and serving responses. First there's [Embassy](https://github.com/envoy/Embassy), which acts as an asynchronous HTTP server. It handles actually listening for connections on a port and receiving and sending data to them. The other part of this stack is [Ambassador](https://github.com/envoy/Ambassador) which handles routing incoming requests and responding to them in a nicer fashion than just sending strings out.
(Note: both of these libraries are made by a company called Envoy, to which I have no relation.)
So, we need to get these libraries into our project, with the condition that we only want them to be present at test-time, not for release builds (as that would mean shipping unnecessary, extra code). If you're using CocoaPods, it can take care of this for you. But if you're not using a package manager, it's (unsurprisingly) more complicated.
First, the Xcode projects for the two libraries need to be included into the workspace for your app[^1]. Next, our app needs to be configured to actually compile against the two frameworks so we can use them in our tests. This is done in the app target of the Xcode project. Both frameworks should be added to the "Frameworks, Libraries, and Embedded Content" section of the General tab for the app target. Once added, the libraries should be set as "Do Not Embed". We don't want Xcode to embed the frameworks, because we'll handle that ourself, conditioned on the current configuration.
[^1]: I tried to use Xcode 11's new Swift Package Manager support for this, but as far as I can tell, it doesn't provide direct access to the built frameworks produced by the libraries, so this technique isn't possible.
To handle that, add a new Run Script phase to the Build Phases section of the target. It'll need input and output files configured for each of our projects:
Input files:
- `${BUILT_PRODUCTS_DIR}/Embassy.framework`
- `${BUILT_PRODUCTS_DIR}/Ambassador.framework`
Output files:
- `${BUILT_PRODUCTS_DIR}/${FRAMEWORKS_FOLDER_PATH}/Embassy.framework`
- `${BUILT_PRODUCTS_DIR}/${FRAMEWORKS_FOLDER_PATH}/Ambassador.framework`
For reference, the `BUILT_PRODUCTS_DIR` environment variable refers to the location in Xcode's DerivedData folder where, as the name suggests, the compiled outputs of the project's targets live. `FRAMEWORKS_FOLDER_PATH` refers to the Frameworks folder inside of our app bundle (e.g. `Tusker.app/Frameworks`).
Configuring input and output files for the Run Script build phase instead of just hard-coding paths in the script itself has the advantage that Xcode will know to re-run our script if the input files change or the output files are missing.
The script our build phase will run is the following:
```bash
if [ "${CONFIGURATION}" == "Debug" ]; then
echo "Embedding ${SCRIPT_INPUT_FILE_0}"
cp -R $SCRIPT_INPUT_FILE_0 $SCRIPT_OUTPUT_FILE_0
codesign --force --verbose --sign $EXPANDED_CODE_SIGN_IDENTITY $SCRIPT_OUTPUT_FILE_0
echo "Embedding ${SCRIPT_INPUT_FILE_1}"
cp -R $SCRIPT_INPUT_FILE_1 $SCRIPT_OUTPUT_FILE_1
codesign --force --verbose --sign $EXPANDED_CODE_SIGN_IDENTITY $SCRIPT_OUTPUT_FILE_1
else
echo "Skipping embedding debug frameworks"
fi
```
If the product configuration is anything other than Debug, the script will simply log a message and do nothing else. If it is in Debug configuration, then we'll take a couple actions for each framework: First, we simply copy the built framework into the app's Frameworks folder. Then, we re-codesign the frameworks in the app so that they're signed with the same identity as our app is. In the script, the input/output file environment variables refer to exactly what they say, the input and output files configured for the build phase in Xcode. The `EXPANDED_CODE_SIGN_IDENTITY` environment variable gives the code signing identity[^2] that was used to sign the app, which is the same one we want to sign the frameworks with. We also need to give codesign the `--force` option, so that it will overwrite any existing signature on the framework.
[^2]: "Expanded" refers to the format of the identifier. If you look at the `codesign` manpage, it describes several formats that can be used with the `--sign` operation. The expanded format is the forty hexadecimal digit SHA-1 hash of the identity.
Now, if we build our app for debugging and take a look at the frameworks folder inside the app bundle, we can see both `Embassy.framework` and `Ambassador.framework` are present. Switching to the release build and again looking at the product in Finder, neither of those two frameworks are present.
One thing to be aware of is that this setup only excludes the frameworks from being _copied_ in release configurations. They'll still be available at compile-time, so if you're not paying attention, you could accidentally import one of them and start using it, only to encounter a crash in production due to a missing framework. As far as I could tell, there's no way within Xcode of specifying that a framework only be compiled against in certain configurations. If I've missed something and this is indeed possible, please let me know.
With that finally done, we can start setting up the web server so we can test our app. This simplest way to do this is to create a base class for all of our test cases which inherits from `XCTestCase` that will handling setting up the web server. We'll have a method called `setUpWebServer` which is called from `XCTestCase`'s `setUp` method. (In our actual test case, we'll need to be sure to call `super.setUp()` if we override the `setUp` method.)
```swift
var eventLoop: EventLoop!
var router: Router!
var eventLoopThreadCondition: NSCondition!
var eventLoopThread: Thread!
private func setUpWebServer() {
eventLoop = try! SelectorEventLoop(selector: try! KqueueSelector())
router = Router()
server = DefaultHTTPServer(eventLoop: eventLoop, port: 8080, app: router.app)
try! server.start()
eventLoopThreadCondition() = NSCondition()
eventLoopThread = Thread(block: {
self.eventLoop.runForever()
self.eventLoopThreadCondition.lock()
self.eventLoopThreadCondition.signal()
self.eventLoopThreadCondition.unlock()
})
eventLoopThread.start()
}
```
In that method, we'll create an event loop which handles performing asynchronous operations for the web server. We'll also need a `Router` object which will handle HTTP requests and delegate them to other handlers based on the request's path. Next, we'll need the web server itself, an instance of `DefaultHTTPServer`. Once we've got that, we can add our routes to the router and start the web server. Finally, we'll create a separate thread that runs in the background for actually processing web requests. We also override the `tearDown` method and stop the web server, waiting for it to gracefully shut down.
```swift
override func tearDown() {
server.stopAndWait()
eventLoopThreadCondition.lock()
eventLoop.stop()
while eventLoop.running {
if !eventLoopThreadCondition.wait(until: Date(timeIntervalSinceNow: 10)) {
fatalError("Join eventLoopThread timeout")
}
}
}
```
Now that we've finally gotten everything set up, we can test out the web server and make sure everything's working. In a new test case class, we'll extend the base class we created instead of `XCTestCase`, and we'll override the `setUp` method to add a new route:
```swift
override func setUp() {
super.setUp()
router["/hello"] = JSONResponse(handler: { (_) in
return ["Hello": "World"]
})
}
```
To actually test it out, a simple test method that just sleeps for a long time will do the trick:
```swift
func testWebServer() {
sleep(10000000)
}
```
Once we run the test and the simulator's up and runing, we can visit `http://localhost:8080/hello` in a web browser and see the JSON response we defined. Now, actually using the mock web server from the app is a simple matter of adding an environment variable the override the default API host.
One caveat to note with this set up is that, because the web server is running in the same process as the test code (just in a different thread), when the debugger pauses in a test (_not_ in the app itself), any web requests we make to the mock server won't complete until the process is resumed.

View File

@ -0,0 +1,85 @@
```
title = "Faking the Mongo Eval Command"
tags = ["swift"]
date = "2020-01-28 19:33:42 -0400"
short_desc = "MongoDB 4.2 removed the eval command, which is a good security measure, but unfortunate for building database-viewing GUI."
slug = "faking-mongo-eval"
```
One of the changes in MongoDB 4.2 was the removal of the `eval` command. While a reasonable security measure, this is rather annoying if you're building [an app](https://git.shadowfacts.net/shadowfacts/MongoView) for interacting directly with a Mongo database. If you want to be able to run commands directly on the database, you now have to go through the `mongo` shell. This seems straightforward, but actually getting the data back into a format that's usable is a bit of a hassle.
<!-- excerpt-end -->
Actually running the command is, surprisingly, the easiest part of this whole endeavor. You can simply launch a [`Process`](https://developer.apple.com/documentation/foundation/process) which invokes the `mongo` shell with a few options as well the command to evaluate:
```swift
let mongoProc = Process()
process.launchPath = "/usr/local/bin/mongo"
mongoProc.arguments = ["mongodb://localhost:27017/your_database", "--quiet", "--norc", "--eval", command]
mongoProc.launch()
```
The `--quiet` option prevents the shell from logging its own messages, making parsing the output a little easier. The `--norc` option prevents it from executing `.monorc.js` on startup, so that the environment our command is running in is entirely standard. The `--eval` option does exactly what it says, it evaluates the following parameter in the shell.
This bit of code does make the assumption that the mongo shell is installed in or linked to `/usr/local/bin/mongo` (i.e., it's been installed through Homebrew). To do this properly, you would probably want to try and detect where Mongo is installed and use that path, as well as offer the user a way of customizing the path.
One additional thing to note is that launching an arbitrary executable requires either the App Sandbox be disabled, or Full Disk Access be requested (at least on macOS Catalina), otherwise the process will fail to launch with a message saying "launch path not accessible".
Getting the output is a little bit more difficult, but still not too complicated.
```swift
let outputPipe = Pipe()
let mongoProc = Process()
// ...
mongoProc.standardOutput = outputPipe
mongoProc.launch()
let outputHandle = outputPipe.fileHandleForReading
var output = ""
var data: Data!
do {
data = outputHandle.availableData
output.append(String(data: data, encoding: .utf8))
} while (data.count > 0)
outputHandle.closeFile()
```
We can create a [`Pipe`](https://developer.apple.com/documentation/foundation/pipe) object representing a UNIX pipe. The `mongo` process then uses that pipe as its stdout. We can then read from the pipe's output file handle in order to get the contents of what the shell printed.
Many StackOverflow posts on the topic of getting the output from a process just call `waitUntilExit` on the process and then read the entirety of the data from the pipe's output file handle. While suitable for small output, this approach does not work for situations where the total output of the command is greater than the buffer size of the pipe (as may very well be the case when running queries against large databases). To solve this, we need to continuously read from the pipe until there's no remaining data (meaning the pipe has closed).
Now that we've got the output from Mongo, we need to get it into Swift. Unfortunately, parsing it is a bit annoying. The `mongo` shell outputs a non-standardized format that's like JSON, but with a bunch of JavaScript helpers (e.g. `ObjectId("5e00eb48a14888e105a74fda")`) embedded in it. The [MongoSwift](https://github.com/mongodb/mongo-swift-driver) library can't parse this format (nor can anything else, as far as I can tell). So, in order to turn the shell output into the [Extended JSON](https://docs.mongodb.com/manual/reference/mongodb-extended-json/) format that MongoSwift can parse, we'll need to modify the command that we invoke the shell with.
We'll add some helper code at the beginning of the command we send that defines a function on both the `Object` and `Array` prototypes. This function will take whatever it's invoked on, pass it through `JSON.stringify` to convert it to Extended JSON, and then print it to the console.
The same function defined on the `Array` prototype will perform the same operations, just for each operation in the array, instead of on the array object as a whole. This isn't strictly necessary, but for my purposes I don't want to deal with top-level arrays, and this will make handling it a bit simpler as top-level array elements will be newline-delimited.
```javascript
Object.prototype.printExtJSON = function() { print(JSON.stringify(this)); };
Array.prototype.printExtJSON = function() { this.map(JSON.stringify).forEach(it => print(it)); };
```
For the Array helper, we can't just call `.forEach(print)` since `forEach` passes in multiple arguments (the value, the current index, and the whole array) all of which would get printed out if passed directly to `print`.
We can include these helpers at the beginning of our command and call it on the expression we've been passed in (where `prelude` is a string containing the above JavaScript code):
```swift
let command = "\(prelude)\(command).printExtJSON()"
```
This approach does have a drawback: only the result of the last expression in the user-inputted `command` will be stringified and printed. The results of any statements before will be lost, unless the command specifically calls our `printExtJSON` helper. Again, for my purposes, this is a reasonable trade off.
Now, back to Swift. We've got the Extended JSON output from the Mongo shell as one giant string and we just need to have MongoSwift parse it into something usable. Because of the way we're printing arrays (separate `print()` calls for each element) and the fact that we've put everything through `JSON.stringify`, it is guaranteed that there will be only one document per line of output. So, to parse each document separately, we can simply split the output we got at newlines and parse each individually:
```swift
let decoder = BSONDecoder()
let result = output.components(separatedBy: "\n").compactMap { (json) in
try? decoder.decode(BSON.self, from: json)
}
```
And there we have it, the data is finally in a form we can understand from the Swift side of things. If you want to see the whole source code for this, it's [part of MongoView](https://git.shadowfacts.net/shadowfacts/MongoView/src/commit/9488c108b693607e827ef77e5bc16f2cdd491f7c/MongoView/MongoEvaluator.swift). As far as I have come up with, that's about the best way of replicating the `eval` command of previous versions of Mongo. If you have any suggestions for how to improve this or make it more robust, let me know!

View File

@ -0,0 +1,325 @@
```
title = "Simple Swift Promises"
tags = ["swift"]
date = "2020-02-18 22:10:42 -0400"
short_desc = "Building a rudimentary implementation of asynchronous promises in Swift."
slug = "simple-swift-promises"
```
Recently, I've been working on cleaning up the networking code in Tusker, my iOS client for Mastodon/Pleroma and I briefly played around with using the new [Combine](https://developer.apple.com/documentation/combine) framework as well as the built in `URLSession.DataTaskPublisher` helper. Combine, much like SwiftUI, uses Swift's incredibly powerful type system to great advantage because it's a Swift-only framework. It's quite efficient, but because there are so many generic types and implementations of different protocols, the API (in my experience) isn't the most pleasant to work with. I was thinking about other asynchronous programming schemes and the one that came to mind as being the nicest to use was JavaScript's Promises. It has a fairly simple API, so I started wondering how much work it would be to build something similar in Swift. Turns out: not that much.
<!-- excerpt-end -->
Be warned, this code isn't great. It's the result of a few hours of fiddling around trying to build something, not the best possible solution.
To start off with, there's a `Promise` class that's generic over its result type. It stores 1) a list of closures that will be invoked when it is resolved and 2) the resolved result (or `nil`, if the promise hasn't yet been resolved). There's a helper function that resolves the promise by storing the result and invokes any already-added completion handlers with the result. There's another function that's called to add a handler to the promise, storing it if the promise hasn't been resolved and invoking it immediately if it has.
```swift
public class Promise<Result> {
private var handlers: [(Result) -> Void] = []
private var result: Result?
func resolve(_ result: Result) {
self.result = result
self.handlers.forEach { $0(result) }
}
func addHandler(_ handler: @escaping (Result) -> Void) {
if let result = result {
handler(result)
} else {
handlers.append(handler)
}
}
}
```
To keep things clean, everything in the public API is implemented in a public extension on `Promise`. To start with, the most primitive way of constructing a promise. The initializer takes a closure (`resultProvider`) which itself receives as an argument a closure that takes a `Result`. In the initializer, the result provider is immediately invoked passing the `self.resolve` helper function from earlier. This will kick off whatever potentially long-running/asynchronous task is being wrapped in a promise. Once the task has completed, it will call the closure passed in as the `resolve` parameter with whatever value it ultimately got.
```swift
public extension Promise {
convenience init(resultProvider: @escaping (_ resolve: @escaping (Result) -> Void) -> Void) {
self.init()
resultProvider(self.resolve)
}
}
```
Using it might be something like this:
```swift
let promise = Promise<String> { (resolve) in
performLongOperation() { (result) in
resolve(result)
}
}
```
With this in place, the first helper function can be implemented. It will take a single value and produce a promise that has that is resolved with that value:
```
public extension Promise {
static func resolve<Result>(_ value: Result) -> Promise<Result> {
let promise = Promise<Result>()
promise.resolve(value)
return promise
}
}
```
Using it is as simple as `Promise.resolve("blah")`. (The only reason this is a static method instead of just another convenience initializer on Promise is to match the JavaScript API that it's modeled after.)
Next up, there needs to be a way of adding a completion block to a promise. There are a couple different possibilities for using this and each will be implemented slightly differently.
The first and simplest is adding a completion block that receives the result of the promise and doesn't return anything. Another `then` implementation takes a closure that receives the value of the promise and produces a new value, resulting in a promise that produces the new value. Finally, there's one that takes a closure which produces a promise of a new value, resulting in a promise that returns the new value.
```swift
public extension Promise {
@discardableResult
func then(_ fn: @escaping (Result) -> Void) -> Promise<Result> {
addHandler(fn)
return self
}
func then<Next>(_ mapper: @escaping (Result) -> Next) -> Promise<Next> {
let next = Promise<Next>()
addHandler { (parentResult) in
let newResult = mapper(parentResult)
next.resolve(newResult)
}
return next
}
func then<Next>(_ mapper: @escaping (Result) -> Promise<Next>) -> Promise<Next> {
let next = Promise<Next>()
addHandler { (parentResult) in
let newPromise = mapper(parentResult)
newPromise.addHandler(next.resolve)
}
return next
}
}
```
In the simplest case, the promise can simply add the handler to itself and return itself for other uses. This is marked with `@discardableResult` because the API user should be able to add a completion handler without causing a unnecessary compile-time warning.
When given a closure that produces a value, `then` should return a new promise that's for the type of the result of the closure. To achieve this, the `then` function is generic for the `Next` type which is both the return type of the closure and the result type of the promise returned by `then`. A new promise is constructed, and a completion handler is added to `self` to resolve the next promise once self has resolved with the value produced by passing its own result through the mapper function.
Finally, when given a closure that produces a promise, a new promise is also constructed and a completion handler added to the current promise. This time, when the parent result is passed into the mapper function, it receives back a promise. A completion handler is added to that promise which resolves the next promise with the value it produces, linking the promise returned by `then` onto the promise produced by the closure. This version of `then` in particular is very powerful, because it allows promises to composed together and sets of nested callbacks collapsed.
And with that, a barebones promises API is born.
## Handling Errors
This promise implementation can fairly easily be extended to support handling errors in much the same manner as normal results (a lot of the code will look very familiar).
Promise could be made generic over some failure type as well, but using the plain Swift `Error` type makes things a bit simpler.
```swift
public class Promise<Result> {
private var catchers: [(Error) -> Void] = []
private var error: Error?
func reject(_ error: Error) {
self.error = error
self.catchers.forEach { $0(error) }
}
func addCatcher(_ catcher: @escaping (Error) -> Void) {
if let error = error {
catcher(error)
} else {
catchers.append(catcher)
}
}
}
```
Similarly to the normal promise resolution stuff, the Promise class stores a list of functions which handle any error as well as the error itself, if one's already been produced. There's also a `reject` internal helper function which is called to reject the promise, storing the error and passing it to any already-registered catch functions. Also paralleling the `addHandler` method, there's an `addCatcher` helper which takes a closure that consumes an error, either invoking it immediately if the promise has already been rejected or appending it to the internal array of catcher functions.
The main convenience initializer is also amended to receive a closure that itself takes two closure parameters: functions that respectively resolve and reject the promise. The closure is invoked immediately passing `self.resolve` and `self.reject` as the resolver and rejecter functions.
```swift
public extension Promise {
convenience init(resultProvider: @escaping (_ resolve: @escaping (Result) -> Void, _ reject: @escaping (Error) -> Void) -> Void) {
self.init()
resultProvider(self.resolve, self.reject)
}
}
```
With that in place, a static `reject` helper can also be created, which works almost exactly the same as the static `resolve` helper. It takes an error and produces a promise that's rejected with that error by immediately invoking the `reject` function with that error in the result provider closure.
```swift
public extension Promise {
static func reject<Result>(_ error: Error) -> Promise<Result> {
let promise = Promise<Result>()
promise.reject(error)
return promise
}
}
```
Additionally, the two `then` functions that produce new promises are changed to make them reject the next promise when they themself reject. The one that accepts a closure returning a promise is also tweaked so that, when the new promise is received from the closure, the next promise is made to fail if that promise fails.
```swift
public extension Promise {
func then<Next>(_ mapper: @escaping (Result) -> Next) -> Promise<Next> {
let next = Promise<Next>()
addHandler { (parentResult) in
let newResult = mapper(parentResult)
next.resolve(newResult)
}
addCatcher(next.reject)
return next
}
func then<Next>(_ mapper: @escaping (Result) -> Promise<Next>) -> Promise<Next> {
let next = Promise<Next>()
addHandler { (parentResult) in
let newPromise = mapper(parentResult)
newPromise.addHandler(next.resolve)
newPromise.addCatcher(next.reject)
}
addCatcher(next.reject)
return next
}
}
```
Next, for actually handling errors there are public `catch` functions on `Promise` in the same fashion as `then`:
```swift
public extension Promise {
@discardableResult
func `catch`(_ catcher: @escaping (Error) -> Void) -> Promise<Result> {
addCatcher(catcher)
return self
}
func `catch`(_ catcher: @escaping (Error) -> Result) -> Promise<Result> {
let next = Promise<Result>()
addHandler(next.resolve)
addCatcher { (error) in
let newResult = catcher(error)
next.resolve(newResult)
}
return next
}
func `catch`(_ catcher: @escaping (Error) -> Promise<Result>) -> Promise<Result> {
let next = Promise<Result>()
addHandler(next.resolve)
addCatcher { (error) in
let newPromise = catcher(error)
newPromise.addHandler(next.resolve)
newPromise.addCatcher(next.reject)
}
return next
}
}
```
The interesting implementations of the `catch` function both first add a handler to itself which simply resolves the next promise with the same result. They also add catchers to themselves which invoke the `catcher` closure with error produced by the parent and either gets a result back immediately, in which case it resolves the next promise, or gets back a promise for a new result, in which case it adds a handler to the new result promise to either resolves the next promise when the new promise succeeds or reject it if the new promise fails (that is, if a catcher function produces a promise that resolves, the parent's error is resolved, or if it produces a promise that rejects, the parent's error is replaced with the new error).
One difference between these functions and the `then` function, is that the result type of the parent promise must be the same as the new promise's result type. This is done because JavaScript promises have the semantic where `then` handlers added after a `catch` are invoked regardless of whether or not the promise resolved (unless, of course, the catch block produced a rejected promise). This means that the catcher closure must produce a value of the same type as the parent promise, otherwise, there would be a case in which subsequent thens could not be invoked with the actual result value. That makes the following example, in which a potential error is replaced with some default value meaning `print` will always be invoked, possible:
```swift
longRunningPossiblyRejectingPromise()
.catch { (error) -> String in
// log error
return "default value"
}.then { (str) -> Void in
print(str)
}
```
Now, the simple promise implementation is capable of handling errors as well.
## Finishing Touches
First, because of the way promises are implemented, the queue a then/catch closure is executed on depends solely on the queue on which the previous promise resolved/rejected. To make switching queues easier, a simple helper function can be written that simply passes the result through, just resolving on a different queue.
```swift
public extension Promise {
func handle(on queue: DispatchQueue) -> Promise<Result> {
return self.then { (result) in
return Promise { (resolve, reject) in
queue.async {
resolve(result)
}
}
}
}
}
```
Next, the `Promise.all` helper function can be implemented using a `DispatchGroup` to take an array of promises with the same result type and create a new promise that resolves to an array of values of the same type as the result type:
```swift
public extension Promise {
static func all<Result>(_ promises: [Promise<Result>], queue: DispatchQueue = .main) -> Promise<[Result]> {
let group = DispatchGroup()
var results: [Result?](repeating: nil, count: promises.count)
var firstError: Error?
for (index, promise) in promises.enumerated() {
group.enter()
promise.then { (res) -> Void in
queue.async {
results[index] = res
group.leave()
}
}.catch { (err) -> Void in
if firstError == nil {
firstError = err
}
group.leave()
}
}
return Promise<[Result]> { (resovle, reject) in
group.notify(queue: queue) {
if let firstError = firstError {
reject(firstError)
} else {
resolve(results.compactMap { $0 })
}
}
}
}
}
```
This method follows the same semantics as the JavaScript equivalent. If any of the individual promises rejects, the `all` promise will be rejected with the first error that occurred. It also maintains the order of the results.
## Conclusion
Promises can be pretty useful, but they're not without their own pitfalls. Primarily, if you want to use the result of an intermediate promise in one further along the chain, you have to do something like passing it along with every intermediate result in a tuple, which is less than ideal. But, in some specific cases, they can be quite useful.
Consider making a new post in a social media app. First, any selected attachments are uploaded to the server. Then, only after all of those have completed successfully, can the post be made. After the post has completed, the resulting post received back from the API is stored. After that, UI changes can be made on the main thread to indicate that the post has succeeded. And, for all of those steps, there's some common error handling code to show a message to the user. As in the following (simplified) example, this fits fairly well into the model of promises we've constructed.
```swift
let attachmentPromises = attachments.map { (attachment) -> Promise<Attachment> in
ApiClient.shared.uploadAttachment(attachment)
}
Promise<[Attachment]>.all(attachmentPromises).then { (attachments) -> Promise<Post> in
ApiClient.shared.createPost(text: self.postText, attachments: attachments)
}.then { (post) -> Post in
ApiObjectCache.shared.store(post)
self.currentDraft?.remove()
return post
}.handle(on: DispatchQueue.main).then { (post) in
self.dimiss(animated: true)
}.catch { (error) -> Void in
let alert = createAlertController(title: "Couldn't Post", message: error.localizedDescription)
self.present(alert, animated: true)
}
```
As for my own purposes, I don't know whether I'll end up using this or not. It's neat, but it feels like it's verging on an unnecessary abstraction. Either way, it was a fun experiment.
If you want to check out the full code, the project is in [a repo](https://git.shadowfacts.net/shadowfacts/SimpleSwiftPromises) on my Gitea (trying to do anything asynchronous in a Swift Playground is painful). I've also made public a [branch](https://git.shadowfacts.net/shadowfacts/Tusker/src/branch/simple-swift-promises) of Tusker which is using these promises in some places.

View File

@ -0,0 +1,40 @@
```
title = "Writing a JavaScript Syntax Highlighter in Swift"
tags = ["swift"]
date = "2020-04-09 11:48:42 -0400"
short_desc = "Things I learned while building a tiny syntax highlighter."
slug = "syntax-highlighting-javascript"
```
For [a project](https://git.shadowfacts.net/shadowfacts/MongoView) I'm currently working on, I need to display some JavaScript code[^1], and because I'm a perfectionist, I want it to be nice and pretty and display it with syntax highlighting. Originally, I was planning to use John Sundell's [Splash](https://github.com/JohnSundell/Splash) Swift syntax highlighting library (both a "(Swift syntax) highlighting library" and a "Swift (syntax highlighting) library"). It can already render to my desired output format, an `NSAttributedString`, and it has an interface for defining new grammars, which I thought would make it relatively easy to extend to support JavaScript. After getting started, it quickly became apparent that it wouldn't be quite so easy. In addition to writing all the code to parse JavaScript, I'd have to go through the Splash codebase and understand a decent amount about how it works. This grew uninteresting pretty quickly, so I decided I would try just writing everything myself. My highlighting needs were fairly simple, how hard could it be?
[^1]: Actually, some [not JavaScript code](/2020/faking-mongo-eval/) that looks for all intents and purposes like JavaScript code, so highlighting it is the same.
<!-- excerpt-end -->
The actual parse loop is fairly straightforward: it starts at the beginning of the string and tries to parse statements until it reaches the end of the string. Parsing a statement means looking at the next character, and depending what it looks like trying to parse something of that type. If it starts with a single or double quote, it tries to parse a string literal, if it starts with a digit, it tries to parse a number literal, if it starts with an alphabetical character, it tries to parse an identifier, and so on. Most of the things that can be parsed aren't all that complicated. The most difficult are template, object, and array literals all because they can all contain further expressions and you need to be careful when recursing to be sure that when parsing the inner expression, you don't start consuming part of the outer thing.
One simplifying factor is that there are a number of things my highlighter intentionally doesn't handle, including keywords and block statements. The main reason is I expect those to come up rarely, if ever, in the context I'm using this in. I also purposely didn't touch a bunch of other things that an actual JavaScript parser/interpreter would have to be concerned with in order to actually execute code. At the top of that list is things like automatic semicolon insertion (JavaScript's weird way of making semicolons optional), and operator precedence, since they have no effect on the highlighted output.
One of the more annoying parts, completely unrelated to JavaScript, is dealing with strings in Swift. Sure Swift's handling of strings is totally safe and correct, but it's an absolute pain in the ass to use. _Want to get the fifth character in a string? Just use `string[string.index(string.startIndex, offsetBy: 5)]`, it's super simple!_ So, the highlighter keeps track of `String.Index` internally and has several helper methods for moving around within the string. Furthermore, the CharacterSet class is weird and doesn't work the way you'd expect. Because it's bridged from Objective-C, its `contains(_:)` method doesn't take a Swift `Character`, it takes a `Unicode.Scalar`. Because of this, the entire highlighter doesn't care about characters as Swift views them, it only cares about Unicode scalars, using the string's `String.UnicodeScalarView`.
Also, this may be the first time I've ever used while/let in Swift. The peek function returns the next character in the string, or `nil`, if there are none remaining, so, with while/let, consuming all characters in a set is as simple as:
```swift
while let char = peek(),
CharacterSet.whitespacesAndNewlines.contains(char) {
consume()
}
```
I spent a couple days profiling it, trying to improve the performance to a point where it's usable for live-highlighting a decently large file. Right now, a full rehighlight of a 1200 line JSON object takes around 10 ms, which, while not spectacularly fast, is fast enough that there's not appreciable latency while typing. One of the single biggest changes I made was to ensure that I'm only ever using the string's Unicode scalar view. Just going from `string[currentIndex] == "\\"` to `string.unicodeScalars[currentIndex] == "\\"` in the JS-string handling code resulted in an 8 ms improvement. Another performance-driven change I made, though not to the syntax highlighter itself, was to try and only rehighlight when absolutely necessary. For the most common operations, typing or deleting a single character, I find the token that is being modified, and, if the added/removed character wouldn't cause a structural change to the rest of the text (e.g., inserting a character inside of a string), I can alter the length of the modified token and shift the locations of all subsequent tokens. This takes about 70 &mu;s for deleting a single character and 130 &mu;s for inserting a single character. Inserting, I think (but haven't verified), takes so much longer because I also have to add an attribute to the attributed string for the newly inserted character, which kicks off a bunch of work inside the text view.
## Conclusion
If you'd asked me a year ago, heck, even a couple months ago, if I'd ever think about undertaking a project like this myself, I'd have said absolutely not and proceeded to go find a third party library that could do the job adequately. But recently, I've been watching [Jonathan Blow](https://youtu.be/MnctEW1oL-E) talk about building parsers and [Andreas Kling](https://youtu.be/byNwCHc_IIM) actually build a JavaScript interpreter starting from scratch, and there's one thing that they both mentioned on multiple occasions that really stuck with me: it's just code. Sure, its input is source code, but the operations it performs to produce syntax highlighted output aren't anything insanely complicated or out of the reach of any reasonably experienced programmer.
I'm not trying to claim that what I've written is anywhere near as complicated as a full-blown parser or interpreter that could be used to execute code. Nor is it a simple one.
But it is one that, not too long ago, I wouldn't have willingly undertaken. Parsers, particularly parsers for programming language source code have this perception that only the best of the best can build that because they're so incredibly complicated. And that's not true at all. Sure, they're complex programs, because the problem they're solving is non-trivial. But the way you go about solving it isn't insanely difficult, doesn't require any specialized knowledge, and doesn't use any uncommon techniques. The most important thing is breaking down one big problem ("how do you parse source code?") into smaller and smaller chunks that can be solved individually and then combined together.

View File

@ -0,0 +1,112 @@
```
title = "The Sorry State of Thunderbolt 3 Docks"
tags = ["computers"]
date = "2020-04-13 17:19:42 -0400"
short_desc = "On a quest to find a Thunderbolt dock that meets my needs."
slug = "thunderbolt-3"
```
My primary computer is a 2019 16" MacBook Pro. It has four ports. All of which are USB-C/Thunderbolt 3. Enough words by enough people have been expended complaining about how the lack of common ports makes their lives more difficult, so instead, I'm going to complain about how the solutions for connecting non-USB-C peripherals are awful. This is something I've ranted about multiple times on the fediverse, since it's something you'd think would be a solved problem by now. But clearly it isn't, so here we go again.
<!-- excerpt-end -->
I've got a pair of monitors connected to my laptop, one of which uses Mini DisplayPort and the other of which uses HDMI. Currently, I've got a USB-C &#x2192; MiniDP dongle as well as a USB-C &#x2192; HDMI (and several other things) dongle. Unfortunately, they're both rather flaky. Every other time (roughly), when my laptop wakes from sleep, the MiniDP dongle won't work and I'll have to disconnect/reconnect it to get video output to that monitor. Completely unrelaed, the HDMI dongle will randomly (often multiple times a day) start outputting incorrectly. The image will wrap around by about 25% of the width of the screen, resulting in the left edge of the picture being displayed about a quarter of the way over the screen, and the rightmost quarter of the picture displaying on the leftmost quarter of the screen. Similarly, the fix for this is to disconnect and reconnect the HDMI cable. In the past three days, the two adapters have flaked out a combined 11 times. This has become particularly annoying as, given the current state of things, I'm working from home through at least mid-June. Meaning I'm using my computer far more than I otherwise would.
## A Survey of Bad Options
"Thunderbolt™ 3 The USB-C That Does It All" boldly proclaims an [Intel press release](https://thunderbolttechnology.net/blog/thunderbolt-3-usb-c-does-it-all) from June 2015. So, let's see exactly how many choice there are that can Do It All. The ports I'm looking for are: two video outputs (preferably one DisplayPort and one HDMI), two USB type-A, and ethernet. Power delivery would be nice, but isn't a requirement. Not too much to ask, right?
### Option 1 - $250 <!-- https://www.caldigit.com/ts3-plus/ -->
- USB-C, Thunderbolt 3, host connection, 87W USB-PD
- USB-C, 3.1 Gen 1, data only
- USB-C, 3.1 Gen 2, data only
- USB-C, Thunderbolt 3, downstream daisy-chaining
- 5x USB-A, 3.1 Gen 1
- Gigabit Ethernet
- DisplayPort
- SD Card
- 3.5mm audio in/out
- S/PDIF
This would require a DisplayPort &#x2192; Mini DisplayPort adapter (this is the least of my concerns, since it could at least operate passively since MiniDP is just a different physical connector for the same protocol) as well as a USB-C &#x2192; HDMI adapter, the very thing I'm trying to get away from. It also only provides 87W of power delivery, 9 watts less than the charger than comes in the box with the MacBook Pro. While plugging in the laptop's own charger isn't a big deal, I don't want to pay several hundred dollars for a device most of whose capabilities I would not be using.
### Option 2 - $200 <!-- https://www.caldigit.com/usb-c-pro-dock/ -->
- USB-C, Thunderbolt 3 host connection, 85W USB-PD
- 2x DisplayPort
- 3x USB-A, 3.2 Gen 1
- 1x USB-C, 3.2 Gen 2
- SD Card
- Gigabit Ethernet
- 3.5mm audio in/out
Apparently, the lack of a USB-C port, two USB-A ports, S/PDIF, and two whole watts of power delivery save you $50 since this is from the same manufacturer as option 1. This option would also require chaining dongles (either USB-C &#x2192; HDMI or DisplayPort &#x2192; HDMI) and may not be able to fully power the laptop when under full load.
### Option 3 - $150 <!-- https://www.caldigit.com/mini-dock/ -->
- USB-C, Thunderbolt 3 host connection
- Gigabit Ethernet
- Either of:
- 2x DisplayPort and 1x USB-A 3.0
- 2x HDMI, 1x USB-A 3.0, and 1x USB-A 2.0
Yes, the HDMI model has an extra USB type A 2.0 port for reasons I cannot find elucidated anywhere on the manufacturer's website (I would assume bandwidth limitations, but the manufacturer claims both types of video connections support up to 4096x2160 @ 60Hz, so who knows). Either model would require me to adapt either DisplayPort &#x2192; HDMI or HDMI &#x2192; DisplayPort, and chaining video adapters is something I wish to avoid. In any case, a single USB-A port is not enough to connect all of my peripherals, meaning I would need yet another dongle. This also has no external power input or power delivery, so I would still be using three of my four ports.
### Option 4 - $250 <!-- https://store.hp.com/us/en/pdp/hp-thunderbolt-dock-120w-g2 -->
- USB-C, Thunderbolt 3 host connection
- USB-C port of indeterminate capabilities
- USB-C that supports DisplayPort
- 3x USB-A
- 2x DisplayPort
- Gigabit Ethernet
- 3.5mm audio
- VGA
And so we arrive at the first option that can do 100W power delivery. Of course, this would still require a DisplayPort &#x2192; HDMI adapter. This one has particularly mixed reviews as to whether it works with Macs or not (they all have mixed reviews about reliability, but this one especially so). Some claim it doesn't support multiple monitors since Macs don't support DisplayPort Multi-Stream Transport.
### Option 5 - $300 <!-- https://www.lenovo.com/us/en/accessories-and-monitors/home-office/Thunderbolt-Dock-Gen-2-US/p/40AN0135US -->
- USB-C, Thunderbolt 3 host connection, 90W USB-PD
- USB-C, Thunderbolt 3
- 2x pairs of DisplayPort/HDMI, only one of which is usable at a time
- 5x USB-A, 3.1 Gen 2
- Gigabit Ethernet
- 3.5mm audio
Using multiple of the video outputs requires Multi-Stream Transport support, meaning the sole USB-C port would need to be connected to a USB-C &#x2192; HDMI/DisplayPort dongle to get a second video output (this somehow bypasses the MST requirement?). Also, apparently both the ethernet and audio are not supported under macOS.
### Option 6 - $240 <!-- https://www.cablematters.com/pc-887-130-certified-aluminum-thunderbolt-3-docking-station-with-dual-4k-60hz-video-and-60w-power-delivery.aspx -->
- USB-C, Thunderbolt 3 host connection, 60W USB-PD
- USB-C, Thunderbolt 3
- 5x USB-A 3.0
- HDMI
- Gigabit Ethernet
- SD Card
Once again, a USB-C &#x2192; DisplayPort dongle would be needed, and the builtin power delivery isn't anywhere near sufficient.
### Option 7 - $150 <!-- https://www.elgato.com/en/dock/thunderbolt-3-mini -->
- USB-C, Thunderbolt 3 host connection
- DisplayPort
- HDMI
- USB-A 3.1
- Gigabit Ethernet
The first option that has both a DisplayPort and HDMI connection. Of course, doesn't have enough USB-A ports, or any power delivery. Despite it's lack of ports, it would seem like one of the better options. But, I can't find any concrete information on the internet about whether or not it supports outputting to two displays simultaneously without mirroring on macOS (meaning whether MST support is required or not).
### Option 8 - $400 <!-- https://www.razer.com/gaming-laptops/razer-core-x -->
- USB-C, Thunderbolt 3 host connection, 100W USB-PD
- However many video outputs I want
The final option is an eGPU enclosure. This one does support power delivery, enough to sustain my laptop under full load. I have an RX580 from my old Hackintosh which: has native support in macOS, is comparable to the dedicated GPU in my laptop, and has plenty of video outputs (DisplayPort, HDMI, and DVI-D). Of course, it doesn't have any additional ports (there are models which also provide USB-A and Ethernet, but the graphics card alone will consume all 40Gbps of bandwidth Thunderbolt 3 has, so adding anything more will bottleneck it and cause stability to suffer). While somewhat tempting (the idea of upgrading to a beefier graphics card is interesting), $400 is an abusrdly high cost for what amounts to: a box, a cheap fan, a cheap power supply, and a Thunderbolt controller chip.
There are even more options I haven't bothered to list since they have pretty much the exact same port selection and set of trade-offs as one of these. Even the ones that are the least bad are still very expensive for what they offer. There are even more constraints, because macOS support (even putting aside MST) is by no means a given. The ones I feel most confident about are the only brand that's sold through Apple's own store (and also some of the most expensive).
## A Historical Perspective
None of these options entirely meet my desire set of capabilities, so clearly something that does is impossible, right? There's absolutely no way a device could exist that connects to a computer, provides power to it, allows it to output multiple video signals, hooks it up to several peripherals, and connects it to a hard-wired network, right? Such an astounding assortment of ports, such a triumph of technology, and miracle of modern engineering this fantastical gizmo would be, that it couldn't possibly exist. Right?
Thunderbolt 3 was first implemented in consumer products in 2015. A full 3 years earlier, in 2012, Lenovo had released the ["ThinkPad Mini Dock Plus Series 3"](https://support.lenovo.com/us/en/solutions/migr-74447), which, in addition to an unwieldy name, has a stunning variety of ports, the likes of which the present day can only dream of. It has six USB ports; five video outputs (up to three of which can be used simultaneously), including DisplayPort, DVI-D, and VGA; audio in/out; eSATA; ethernet; and a 135W charger. Compare that to even the best, most expensive options I listed above. Quite a stark contrast. Granted, the ThinkPad dock uses a proprietary connector and doesn't haven't to care about the vastly higher bandwidth standards that modern docks do. But, accepting the different times they were released, the now 8 year old ThinkPad dock is more versatile and more useful in every way[^1]. For my exact use case of a couple of displays, a few USB peripherals, a wired network connection, and power, the ThinkPad dock would have been more than sufficient.
[^1]: It may not have an SD card reader, as is common today, but the [ThinkPad W510](https://support.lenovo.com/us/en/solutions/pd003320) it was designed to complement had one built it, along with a myriad of other ports.
## Conclusion
So, has the magnificent pairing of Thunderbolt 3 and USB Type-C delivered on its promise to be the one port that does it all? In my estimation: no, absolutely not. There's no one dock that meets my needs, and of the options that don't fulfill my requirements, there isn't a single one that is obviously the least bad. What happened? How did we go from these behemoths of connectivity to not only laptops, but docks, with vastly fewer ports. Even Lenovo's own current generation ThinkPad docks, which are based on USB-C and Thunderbolt, offer far fewer connections than 8 years ago. Maybe USB 4 (which has subsumed Thunderbolt) will fix things. Or maybe it will just introduce a whole new set of confusing constraints and restrictions to keep track of.
I don't know what I'm going to end up doing. There are so many different choices with a such huge variety of trade-offs that keeping them all in my head at once and trying to make a decision is rather difficult. Who knows, maybe I'll just stick with my broken USB-C dongles.

View File

@ -0,0 +1,32 @@
```
title = "Switching to Vim"
tags = ["editors"]
date = "2020-05-21 18:22:42 -0400"
short_desc = "How I went about joining the cult of Vim."
slug = "switching-to-vim"
```
At the beginning of last summer, for reasons that I can no longer recall, I set a goal for myself to learn Vim over that summer. Now that summer is over and almost here again, I wanted to reflect on that process and whether I achieved my goal. By no means have I mastered Vim or am a Vim expert, but I feel reasonably proficient. I use Vim itself on the command line and in GUI form, as well as Vim bindings/plugins in all my IDEs. It has gotten so strongly ingrained into my muscle memory that I now find myself hitting ESC to exit insert mode in text boxes in my web browser and typing Vim commands into word processors.
<!-- excerpt-end -->
In order to force myself to try and become more proficient and get the keybindings worked into my muscle memory, I made the decision fairly early on (shortly after I felt comfortable with the basics of text navigation and editing) to switch all of my IDEs to use Vim keybindings/plugins and use it full time.[^1]
[^1]: Since I've gone all in on Vim, I also switched to rebinding the Caps Lock key to Escape on all my computers and keyboards. But I don't just use it in Vim, I use the Caps Lock key as Esc in macOS for everyday things, and even games. A side effect of which has been I now find it infuriating to use computers where Caps Lock has not been remapped Escape because I press it instinctively and end up changing case and getting confused all too frequently.
I briefly considered using Vim independently of any IDE and relying entirely on plugins and command line tools to provide the rest of the functionality of the IDE, but I didn't end up doing this for a couple reasons. First and foremost, a great deal of the functionality the IDEs provide is not easily replicable. Secondly, I didn't want to have to learn even more things simultaneously. I wanted to stay focused on learning Vim, and adding a whole new set of tooling on top of that would distract from my main objective.
While I use Vim independently on some things, for most of the projects I work on both personally and for my job, I use an IDE. The ones I use most frequently are RubyMine for work and Xcode for personal iOS projects (and occasionally others in the JetBrains family, like IDEA and WebStorm).
The JetBrains IDEs sport a truly wonderful first-party plugin called [IdeaVim](https://github.com/JetBrains/ideavim) which emulates a great deal of Vim. In my experience using it the only feature of Vim that I've found IdeaVim does not also support is user-defined text objects (although all of the ones I use in Vim itself are supported already by IdeaVim).
In my experience using it, I have not encountered a single feature of Vim that IdeaVim does not support (granted, I'm not doing anything that crazily complicated).
For Xcode, there is a third-party plugin called [XVim](https://github.com/XVimProject/XVim2/) which performs a similar function to IdeaVim. Unfortunately, XVim is nowhere near as full-featured or as comprehensive as IdeaVim. The feature I use most frequently which it doesn't support is using the `.` command to repeat a command involving a text object (e.g. `ciwFoo<ESC>`). It either crashes Xcode or performs some sequence of actions that seems to bear no relation to my actual command. In environments where it is supported, I've found it very useful for pulling out bits of text that I recently deleted. Another of XVim's shortcomings compared to IdeaVim is the fact that, in order to use it, you must have a copy of Xcode that has been re-codesigned with your own self-signed certificate. When Xcode is signed by Apple, it does not load third party plugins. This means I end up having multiple copies of Xcode on my disk (since I prefer to keep the Apple-signed ones and an old version around just in case. I currently have 4 copies of Xcode, for a total of 73.76 GB on disk). I've considered switching to AppCode, JetBrains' IDE for iOS/macOS apps, but on the brief occasions I've tried it, it's been rather lacking compared to Xcode and so not worth making the switch.[^2]
[^2]: I mostly remember to not press `.` in normal mode. Xcode only crashes a couple times per week.
For the projects I work on which I don't use an IDE for (primarily Elixir projects), I use [MacVim](https://github.com/macvim-dev/macvim) with an assortment of plugins. I use [NERDTree](https://github.com/scrooloose/nerdtree) as a file manager, [fzf](https://github.com/junegunn/fzf) for quickly switching files, [coc.nvim](https://github.com/neoclide/coc.nvim) for integration with language servers (including Elixir and TypeScript), and a few other small plugins.
At this point, I could go on about how switching to Vim has vastly increased my productivity and turned me into the mythic 10x developer. But that isn't true. I can only imagine switching to Vim has made little, if any, difference to my efficiency. Sure, when I'm typing or changing large sections of text, I feel faster. But the fact is, the vast majority of the time, I'm not doing that. The bottleneck is not my hands or my keyboards, it's my brain. And Vim can't magically change that.

View File

@ -0,0 +1,18 @@
```
title = "Algorithmic Bias"
tags = ["misc", "social media"]
date = "2020-06-05 09:55:42 -0400"
slug = "algorithmic-bias"
```
I am subscribed to Marques Brownlee on YouTube. I watch almost every one of his videos. YouTube is smart. It knows this, it recommends me almost all of his videos. But not this one. No matter how many times I refresh the page. No matter how far down the page I scroll. Despite the fact that the video has gotten 2.3 million views in 16 hours, performing better than a number of his recent videos. Despite the fact that it's recommending me videos that are from people I am not subscribed to, videos that are years old, videos that I have watched before, videos that are about politics, videos that are about the ongoing Black Lives Matter protests in the wake of George Floyd's murder.
This is what algorithmic bias looks like. **Algorithms are not neutral.**[^1]
<figure>
<img src="/2020/algorithmic-bias/youtube_thumb.png" alt="YouTube thumbnail of an MKBHD video">
<figcaption>A screenshot of the thumbnail for a YouTube video from MKBHD titled "<a href="https://www.youtube.com/watch?v=o-_WXXVye3Y" data-no-link-decoration>Reflecting on the Color of My Skin</a>".</figcaption>
</figure>
[^1]: "Algorithm" is a word here used not in the purely computer science sense, but to mean a element of software which operates in a black box, often with a machine learning component, with little or no human supervision, input, or control.

View File

@ -0,0 +1,170 @@
```
title = "Replicating Safari's Link Preview Animation"
tags = ["swift"]
date = "2020-07-03 16:28:42 -0400"
slug = "uipreviewparameters-textlinerects"
```
**Update:** See [the follow up](/2022/textkit-2/) for info about adapting this to TextKit 2.
In iOS 13, Apple replaced the Peek and Pop force touch system with new context menus and previews[^1]. These new previews have a fancy animation for when the appear, in which they expand out of the content and onto the screen. Back when I first replaced the previews in Tusker with the new context menus (over a year ago, shortly after WWDC19), I wanted to replicate the behavior in Safari for links and mentions in post bodies. At the time, there was pretty much zero official documentation about the new context menu APIs, so I decided to wait for either Apple to publish docs or for someone else to figure it out first. Now that WWDC20 has come and gone, and I've been working on it a little bit at a time for over a year, I finally have a fully working implementation.
[^1]: I still miss popping into view controllers. RIP.
<!-- excerpt-end -->
Here's what the Safari behavior looks like with animations slowed down, both with a single-line link and one that spans multiple lines:
<div>
<video controls style="width: 50%; float: left;" title="Screen recording of a single-line link being previewed in Safari on iOS">
<source src="/2020/uipreviewparameters-textlinerects/safari.mp4" type="video/mp4">
</video>
<video controls style="width: 50%; float: right;" title="Screen recording of a multi-line link being previewed in Safari on iOS">
<source src="/2020/uipreviewparameters-textlinerects/safari-multiline.mp4" type="video/mp4">
</video>
</div>
They both look pretty much like you'd expect. In the single-line case, the text of the link appears inside a sort of bubble which animates into the link preview. For the multi-line link, it's pretty similar, but there are two bubbles each of which contains the part of the link that's on their respective lines. If the text fragments overlapped horizontally at all, then the two bubbles would be merged together. By default, if you don't provide a custom preview, UIKit will simply display the entire view during the animation. This doesn't look great, particularly for long pieces of text, of which the link may only be a small portion. Only highlighting the link text looks much better. So, let's see what it takes to reimplement the same behavior as Safari.
First, a little bit about how custom previews work: From the `contextMenuInteraction(_:previewForHighlightingMenuWithConfiguration:)` method, you provide a custom `UITargetedPreview` describing the configuration of the preview itself. It takes the view to be displayed as the preview, a preview parameters object, and a preview target object. The preview parameters describe how the preview should be displayed and the preview target defines what view the preview view should be within and where inside that view it should be anchored. So, in the `previewForHighlightingMenuWithConfiguration` method, we need to construct all of these and then assemble them into a targeted preview.
The most obvious starting place is the [`UIPreviewParameters(textLineRects:)`](https://developer.apple.com/documentation/uikit/uipreviewparameters/3295952-init) initializer, since it directly deals with the behavior we're trying to replicate. It takes an array of `CGRect`s (wrapped in `NSValue`s) representing the rectangles occupied by the text (one rect per line the text is on) in the coordinate space of the preview view.
Because this is a `UITextView` subclass, the CoreText stack is already setup and we can directly access it. `NSLayoutManager` has a handy method called `enumerateEnclosingRects(forGlyphRange:withinSelectedGlyphRange:in:)` which takes the range of some text and gives us access to the rectangles that the text occupies.
But to use that method, we need to get the link range from somewhere. Since we have the context menu interaction in the `previewForHighlightingMenuWithConfiguration` method, we could ask it for its location within ourself and then find the link and range at that point, but that would be duplicating work done almost immediately before in the `contextMenuInteraction(_:configurationForMenuAtLocation:)` method. Instead, we'll store the link range on a private property of the text view subclass from the `configurationForMenuAtLocation` method, and then retrieve it in `previewForHighlightingMenuWithConfiguration`. (The code below assumes that there already exists a method called `getLinkRangeAtPoint` which takes a point inside the text view and returns the range that the link spans, if there is one.)
```swift
private var currentPreviewedLinkRange: NSRange?
func contextMenuInteraction(_ interaction: UIContextMenuInteraction, configurationForMenuAtLocation location: CGPoint) -> UIContextMenuConfiguration {
guard let range = self.getLinkRangeAtPoint(location) else {
return nil
}
self.currentPreviewedLinkRange = range
return // ...
}
```
Then, in the `contextMenuInteraction(_:previewForHighlightingMenuWithConfiguration:)` method, we can grab the stored range and get to creating the preview. Returning `nil` from this method will simply use the default preview configuration, showing the entire text view in the preview animation, which is a reasonable fallback if for some reason we don't have the link range.
```swift
func contextMenuInteraction(_ interaction: UIContextMenuInteraction, previewForHighlightingMenuWithConfiguration configuration: UIContextMenuConfiguration) -> UITargetedPreview? {
guard let linkRange = currentPreviewedLinkRange else {
return nil
}
currentPreviewedLinkRange = nil
}
```
With the link range, we can use the `enumerateEnclosingRects` method on the text view's layout manager and then use those rectangles to construct the preview parameters object.
```swift
let notFoundRange = NSRange(location: NSNotFound, length: 0)
var textLineRects = [CGRect]()
self.layoutManager.enumerateEnclosingRects(forGlyphRange: linkRange,
withinSelectedGlyphRange: notFoundRange,
in: self.textContainer) { (rect, stop) in
textLineRects.append(rect)
}
let parameters = UIPreviewParameters(textLineRects: textLineRects as [NSValue])
```
Now that we've finally got the text line rects and the preview parameters, we can move on to the next piece of the puzzle: the view that's going to be shown in the preview animation. You might think that we could use `self` as the preview view, but that wouldn't work. While the animation is running, the preview view is removed from the regular view hierarchy, meaning the rest of the text would disappear while the animation is running (what's more, since later we'll use `self` as the target for the preview, the preview itself wouldn't even appear). We could try to duplicate ourself, and copy over all the layout-impacting attributes, but that's just asking for slight layout differences.[^2] Instead, to ensure we get a view that appears exactly the same as our text view, we can use a [snapshot view](https://developer.apple.com/documentation/uikit/uiview/1622531-snapshotview).
[^2]: Indeed, when I attempted exactly this, there was some attribute I couldn't find (even through diffing the internal descriptions of each text view) that was altering the layout of the preview copy.
```swift
guard let snapshot = self.snapshotView(afterScreenUpdates: false) else {
return nil
}
```
Next, we need to create a preview target. Reading the documentation, you might notice the `UITargetedPreview(view:parameters:)` initializer and wonder why this is even necessary. Well, if you try to use that initializer with a snapshot view, your app will crash because the snapshot view hasn't been added to the view hierarchy, and therefore, because there's no target, UIKit doesn't know where to put it. The `UIPreviewTarget` describes exactly that. It needs to know the container view that the preview will placed in (simply `self`, since we're in the text view) as well as where inside the target container the center of the preview should be anchored. We want to anchor the center point of the preview view such that the text of the link appears to be in the exact same place. With the text line rects, we can determine the overall bounds of the link's text fragment. From there, since the preview will have the same bounding rectangle as the link text, we can just use the center of the rect enclosing the text.
```swift
var minX = CGFloat.greatestFiniteMagnitude, maxX = -CGFloat.greatestFiniteMagnitude,
minY = CGFloat.greatestFiniteMagnitude, maxY = -CGFloat.greatestFiniteMagnitude
for rect in textLineRects {
minX = min(rect.minX, minX)
maxX = max(rect.maxX, maxX)
minY = min(rect.minY, minY)
maxY = max(rect.maxY, maxY)
}
let textLineRectsCenter = CGPoint(x: (minX + maxX) / 2, y: (minX + maxX) / 2)
let target = UIPreviewTarget(container: self, center: textLineRectsCenter)
```
Then, we can finally construct and return the targeted preview:
```swift
return UITargetedPreview(view: snapshot, parameters: parameters, target: target)
```
If we run this, the preview animation will be limited to the text of the link, and it looks pretty good:
<div>
<video controls style="width: 50%; float: left;" title="Screen recording of a link being previewed appearing as expected.">
<source src="/2020/uipreviewparameters-textlinerects/unmasked.mp4" type="video/mp4">
</video>
<video controls style="width: 50%; float: right;" title="Screen recording of a link being previewed next to other text, with the other text visible inside the preview animation.">
<source src="/2020/uipreviewparameters-textlinerects/unmasked-broken.mp4" type="video/mp4">
</video>
</div>
Unfortunately, there's still a pretty big problem: if the link is near enough other text, and particularly if it spans multiple lines, the text that's visually near the link will be partly visible in the preview. This happens because UIKit takes the text line rects passed into `UIPreviewParameters`, does some stuff to expand them and round the corners and merge them, creating the bubble shape, and then masks the preview view to the resulting path. Unfortunately, it doesn't mask the text beforehand; it masks everything in one pass. So, what we need to do ourselves before giving UIKit the preview view is mask to directly around the text, preventing anything else from seeping in.
To do this, we have to do something similar to what UIKit is doing. We need to generate a path which contains the entirety of all of the text line rects, and nothing more. (Note: this is not the convex hull problem, since we don't need _a_ path that contains the points of all the rectangles, we need the _smallest_ path that encloses them.) Implementing this isn't particularly interesting, and is left as an exercise to the reader[^3]. Assuming there's a custom initializer `UIBezierPath(wrappingAroundRects:)` which produces a path from an array of `CGRect`s, the obvious thing to do is mask the view to that path using a layer mask with a `CAShapeLayer`:
[^3]: Or, if you really wanted, you could look at my fairly primitive [solution](https://git.shadowfacts.net/shadowfacts/Tusker/src/commit/f86d3a0ed15ac23a77c47d9f56deb91e2eba661c/Tusker/Extensions/UIBezierPath+Helpers.swift). In a nutshell: it constructs a path by starting at the top left corner of the top-most rect, walks down the left side of the rects stair-stepping as necessary when the left bound changes, accross the bottom, back up the right side, and then finally accross the top.
```swift
let path = UIBezierPath(wrappingAroundRects: textLineRects)
let maskLayer = CAShapeLayer()
maskLayer.path = path.cgPath
snapshot.layer.mask = maskLayer
```
Running this, however, doesn't quite work. Everything looks exactly the same as before, with the nearby text appearing inside the preview during the animation. You might check the documentation and think to try the `visiblePath` attribute on `UIPreviewParameters`. Unfortunately, that entirely overrides the mask generated by the `textLineRects` initializer, the exact opposite of the current problem.
It seems that, when using the `UIPreviewParameters(textLineRects:)` initializer, UIKit will silently discard any existing layer mask on the view provided as the preview view (FB7832297). This is also true for [`UIView` masks](https://developer.apple.com/documentation/uikit/uiview/1622557-mask). This caused a great deal of hair-pulling for me, until I disabled the preview parameters stuff and the mask suddenly started working. The simple workaround for this is to just apply the mask to the snapshot view, embed the snapshot inside an additional container view of the same size, and then use that container as the view for the `UITargetedPreview`:
```swift
let snapshotContainer = UIView(frame: snapshot.bounds)
snapshotContainer.addSubview(snapshot)
return UITargetedPreview(view: snapshotContainer, parameters: parameters, target: target)
```
And with that, only the link text is visible in the preview animation and it expands nicely into the full preview:
<div>
<video controls style="width: 50%; margin: 0 auto; display: block;" title="Screen recording of a link being previewed and dismissed with the link text animating back to its starting position upon dismissal.">
<source src="/2020/uipreviewparameters-textlinerects/masked.mp4" type="video/mp4">
</video>
</div>
But, there's still one small detail as keen-eyed readers may have noticed. In Safari, when dismissing the full preview, it animates back into the preview view and springs back to the original position. With our implementation, however, it doesn't. The preview view controller does animate back into the preview view, however, instead of returning to the original position, it disappears off into the middle of the screen. This is because there's still one `UIContextMenuInteractionDelegate` method we need to implement: `contextMenuInteraction(_:previewForDismissingMenuWithConfiguration:)`. Similar to the `previewForHighlighting` method, this method takes the interaction and the context menu configuration, creating a `UITargetedPreview` that should be used during the dismissal animation. Since we want the preview to go back to the same location while dismissing as it came from while expanding, we can cache the targeted preview we've already constructed for the highlight method and return it from the dismissal method.
```swift
private weak var activeTargetedPreview: UITargetedPreview?
func contextMenuInteraction(_ interaction: UIContextMenuInteraction, previewForHighlightingMenuWithConfiguration configuration: UIContextMenuConfiguration) -> UITargetedPreview? {
// ...
let preview = UITargetedPreview(view: snapshotContainer, parameters: parameters, target: target)
self.activeTargetedPreview = preview
return preview
}
func contextMenuInteraction(_ interaction: UIContextMenuInteraction, previewForDismissingMenuWithConfiguration configuration: UIContextMenuConfiguration) -> UITargetedPreview? {
return self.activeTargetedPreview
}
```
Now, when dismissing the preview, it animates back into the link text where it originally came from:
<div>
<video controls style="width: 50%; margin: 0 auto; display: block;" title="Screen recording of a link being previewed and dismissed with the link text animating back to its starting position upon dismissal.">
<source src="/2020/uipreviewparameters-textlinerects/dismiss.mp4" type="video/mp4">
</video>
</div>

View File

@ -0,0 +1,473 @@
```
title = "Implement a Gemini Protocol Client Using Network.framework"
tags = ["swift", "gemini"]
date = "2020-07-22 21:57:42 -0400"
slug = "gemini-network-framework"
```
[Gemini](https://gemini.circumlunar.space/) is a small protocol bulit on top of TCP and TLS that's designed to serve as a transport mechanism primarily for text documents while lacking a great deal of the complexity of HTTP. [Network.framework](https://developer.apple.com/documentation/network) was introduced to Apple's platforms in 2018 as a modern framework for dealing with network connections and building custom network protocols. So, let's use it to build a Gemini client implementation.
<!-- excerpt-end -->
## The Protocol
First, an overview of the Gemini protocol. This is going to be fairly brief, as there are some more details that I'm not going to go into, since this post is meant to focus on using Network.framework to build a TCP-based protocol client, rather than the protocol itself[^1]. If you're interested, you can read more about the details of the protocol in its [specification](https://gemini.circumlunar.space/docs/specification.html).
[^1]: That said, the rest of the Gemini protocol, as well as the text format, and the community that's sprung up around it is super interesting, and you should definitely check it out. An easy way to start is by using a Gemini-to-web proxy. Checkout the [homepage](https://proxy.vulpes.one/gemini/gemini.circumlunar.space) and explore from there.
At the highest level, Gemini is fairly similar to HTTP: every connection is made to request a singel resource at a specific URL. After the connection is opened, and the TLS handshake completed, the client sends the request. The request is the CRLF-terminated absolute URL of the resource being requested. The URL string is encoded as UTF-8 and has a maximum length of 1024 bytes. The URL scheme doesn't have to be specified, the default is `gemini://` when using the Gemini protocol for transport. The port is also optional, and defaults to `1965`[^2].
[^2]: Because the first [crewed mission](https://en.wikipedia.org/wiki/Gemini_3) of the Gemini Program launched on March 23, 1965.
```plaintext
gemini://example.com:1965/some/resource?foo<CR><LF>
```
Likewise, the response starts with a CRLF-termianted, UTF-8 encoded string. It begins with a two digit status code, where the most significant digit defines the overall response type and the least significant digit provides more specificity. The status code is followed by a space character, then a string up to 1024 bytes in length, and finally the carriage return and line feed characters. The meaning of the meta string in the response is defined by the various status codes (for example, `20` is the status code for success and defines the meta string to be the MIME type of the response body).
```plaintext
20 text/gemini<CR><LF>
```
Finally, if the response was successful (i.e. the server returned status code in the `2x` range), there may be a response body, which is arbitrary binary data.
## The Implementation
With Network.framework, everything starts with an `NWProtocol`. The framework provides a bunch of concrete subclasses for dealing with protocols like TCP, UDP, and TLS. New in 2019 is the `NWProtocolFramer` class which provides an interface for defining your own protocols on top of the builtin stack. Using it starts with an class that conforms to the `NWProtocolFramerImplementation` protocol:
```swift
import Network
class GeminiProtocol: NWProtocolFramerImplementation {
static let label = "Gemini"
required init(framer: NWProtocolFramer.Instance) {}
}
```
The protocol has a bunch of requirements that need to be satisfied. Starting off with the simple ones, it needs a static read-only String variable called label, which will be used in log messages to identify which framer implementation is being used. It also needs an initializer which takes an `NWProtocolFramer.Instance`. Nothing needs to be done in this initializer—the framer instance doesn't even need to be stored, since all of the other methods that have to be implemented directly receive it.
There's also a static `definition` property which stores the `NWProtocolDefinition` that's configured to use this class as the framer's implementation. This needs to be a singleton, not constructed for every request, because it will later be used as a key to get some implementation-specific data out of other framework classes.
```swift
class GeminiProtocol: NWProtocolFramerImplementation {
static let definition = NWProtocolFramer.Definition(implementation: GeminiProtocol.self)
// ...
}
```
Next, there are a few other simple methods to implement:
```swift
class GeminiProtocol: NWProtocolFramerImplementation {
// ...
func start(framer: NWProtocolFramer.Instance) -> NWProtocolFramer.StartResult {
return .ready
}
func wakeup(framer: NWProtocolFramer.Instance) {
}
func stop(framer: NWProtocolFramer.Instance) -> Bool {
return true
}
func cleanup(framer: NWProtocolFramer.Instance) {
}
}
```
Since the Gemini protocol doesn't use long-running/persistent connections, there's no work that needs to be done to start, wakeup, stop, or cleanup an individual connection. And, since each connection only handles a single request, there isn't even any handshake that needs to be performed to start a Gemini connection. We can just send the request and we're off to the races. Similarly, stopping a Gemini connection doesn't mean anything, the connection is just closed.
Actually sending a request is nice and simple. The `NWProtocolFramerImplementation` protocol has a `handleOutput` method (output, in this case, meaning output _from_ the client, i.e., the request). This method receives an instance of the protocol's message type, which in this case is `NWProtocolFramer.Message`. Since `NWProtocolFramer` is designed to be used to implement application-level protocols, its message type functions as a key-value store that can contain arbitrary application protocol information.
For the Gemini protocol, a simple struct encapsulates all the data we need to make a request. All it does is ensure that the URL is no longer than 1024 bytes upon initialization (a limit defined by the protocol spec) and define a small helper property that creates a `Data` object containg the URL string encoded as UTF-8 with the carriage return and line feed characters appended.
```swift
struct GeminiRequest {
let url: URL
init(url: URL) throws {
guard url.absoluteString.utf8.count <= 1024 else { throw Error.urlTooLong }
self.url = url
}
var data: Data {
var data = url.absoluteString.data(using: .utf8)!
data.append(contentsOf: [13, 10]) // <CR><LF>
return data
}
enum Error: Swift.Error {
caes urlTooLong
}
}
```
Also, a simple extension on `NWProtocolFramer.Message` provides access to the stored `GeminiRequest`, instead of dealing with string keys directly. There's also a conveniece initializer to create a message instance from a request that's set up to use the protocol definition from earlier.
```swift
private let requestKey = "gemini_request"
extension NWProtocolFramer.Message {
convenience init(geminiRequest request: GeminiRequest) {
self.init(definition: GeminiProtocol.definition)
self[requestKey] = request
}
var geminiRequest: GeminiRequest? {
self[requestKey] as? GeminiRequest
}
}
```
With those both in place, the protocol implementation can simply grab the request out of the message and send its data through to the framer instance:
```swift
class GeminiProtocol: NWProtocolFramerImplementation {
// ...
func handleOutput(framer: NWProtocolFramer.Instance, message: NWProtocolFramer.Message, messageLength: Int, isComplete: Bool) {
guard let request = message.geminiRequest else {
fatalError("GeminiProtocol can't send message that doesn't have an associated GeminiRequest")
}
framer.writeOutput(data: request.data)
}
}
```
Parsing input (i.e., the response from the server) is somewhat more complicated. Parsing the status code and the meta string will both follow a similar pattern. The `parseInput` method of `NWProtocolFramer.Instance` is used to get some input from the connection, given a valid range of lengths for the input. This method also takes a closure, which receives an optional `UnsafeMutableRawBufferPointer` containing the input data that was received as well as a boolean flag indicating if the connection has closed. It returns an integer representing the number of bytes that it consumed (meaning data that was fully parsed and should not be provided on subsequent `parseInput` calls). This closure is responsible for parsing the data, storing the result in a local variable, and returning how much, if any, of the data was consumed.
First off is the status code (and the following space character). In the protocol implementation, there's a optional `Int` property used as temporary storage for the status code. If the `tempStatusCode` property is `nil`, the `parseInput` method is called on the framer. The length is always going to be 3 bytes (1 for each character of the status code, and 1 for the space). Inside the `parseInput` closure, if the buffer is not present or it's not of the expected length, the closure returns zero to indicate that no bytes were consumed. Otherwise, the contents of the buffer are converted to a String and then parsed into an integer[^3] and stored in the temporary property (this is okay because the closure passed to `parseInput` is non-escaping, meaning it will be called before `parseInput` returns). Finally, the closure returns `3` to indicate that three bytes were consumed and should not be provided again as input.
[^3]: If you were really building an implementation of the Gemini protocol, you would probably want to wrap the raw integer status code in something else to avoid dealing with magic numbers throughout your codebase. An enum backed by integer values, perhaps.
Outside the `if`, there's a `guard` that checks that there is a status code present, either from immediately prior or potentially from a previous invocation of the method. If not, it returns `3` from the `handleInput` method, telling the framework that that it expects there to be at least 3 bytes available before it's called again. The reason the status code is stored in a class property, and why the code ensures that it's `nil` before trying to parse, is so that if some subsequent parse step fails and the method returns and has to be invoked again in the future, it doesn't try to re-parse the status code because the actual data for it has already been consumed.
```swift
class GeminiProtocol: NWProtocolFramerImplementation {
// ...
private var tempStatusCode: Int?
func handleInput(framer: NWProtocolFramer.Instance) -> Int {
if tempStatusCode == nil {
_ = framer.parseInput(minimumIncompleteLength: 3, maximumLength: 3) { (buffer, isComplete) -> Int in
guard let buffer = buffer, buffer.count == 3 else { return 0 }
let secondIndex = buffer.index(after: buffer.startIndex)
if let str = String(bytes: buffer[...secondIndex], encoding: .utf8),
let value = Int(str, radix: 10) {
self.tempStatusCode = value
}
return 3
}
}
guard let statusCode = tempStatusCode else {
return 3
}
}
}
```
Next up: the meta string. Following the same pattern as with the status code, there's a temporary property to store the result of parsing the meta string and a call to `parseInput`. This time, the minimum length is 2 bytes (since the Gemini spec doesn't specify a minimum length for the meta string, it could be omitted entirely, which would leave just two bytes for the carriage return and line feed) and the maximum length is 1026 bytes (up to 1024 bytes for the meta string, and again, the trailing CRLF).
This time, the closure once again validates that there is enough data to at least attempt to parse it, but then it loops through the data looking for the CRLF sequence which defines the end of the meta string[^4]. Afterwards, if the marker sequence was not found, the closure returns zero because no data was consumed. Otherwise, it constructs a string from the bytes up to the index of the carriage return, stores it in the temporary property, and returns the number of bytes consumed (`index` here represents the end index of the string, so without the additional `+ 2` the trailing CRLF would be considered part of the body). After the call to `parseInput`, it similarly checks that the meta was parsed successfully and returns if not.
[^4]: You can't scan through the data backwards, because the response body immediately follows the CRLF after the meta string, so you could end up finding a CRLF sequence inside the body and incorrectly basing the length of the meta string off that.
One key difference between parsing the meta string and parsing the status code is that if the status code couldn't be parsed, the exact number of bytes that must be available before it can be attempted again is always the same: 3. That's not true when parsing the meta text: the number of bytes necessary for a retry is depedent on the number of bytes that were unsuccessfully attempted to be parsed. For that reason, there's also an optional `Int` variable which stores the length of the buffer that the closure attempted to parse. When the closure executes, the variable is set to the length of the buffer. If, inside the closure, the code fails to find the carriage return and line feed characters anywhere, one of two things happens: If the buffer is shorter than 1026 bytes, the closure returns zero to indicate that nothing was consumed. Then, since there's no string, the `handleInput` will return 1 plus the attempted meta length, indicating to the framework that it should wait until there is at least 1 additional byte of data available before calling `handleInput` again. If no CRLF was found, and the buffer count is greater than or equal to 1026, the closure simply aborts with a `fatalError` because the protocol specifies that the cannot be longer than 1024 bytes (it would be better to set some sort of 'invalid' flag on the response object and then pass that along to be handled by higher-level code, but for the purposes of this blog post, that's not interesting code). In the final case, if parsing the meta failed and the `attemptedMetaLength` variable is `nil`, that means there wasn't enough data available, so we simply return 2.
**Update July 7, 2021:** The eagle-eyed among you may notice that there's a flaw in the following implementation involving what happens when meta parsing has to be retried. I discovered this myself and discussed it in [this follow-up post](/2021/gemini-client-debugging/).
```swift
class GeminiProtocol: NWProtocolFramerImplementation {
// ...
private var tempMeta: String?
func handleInput(framer: NWProtocolFramer.Instance) -> Int {
// ...
var attemptedMetaLength: Int?
if tempMeta == nil {
_ = framer.parseInput(minimumIncompleteLength: 2, maximumLength: 1026) { (buffer, isComplete) -> Int in
guard let buffer = buffer, buffer.count >= 2 else { return 0 }
attemptedMetaLength = buffer.count
let lastPossibleCRIndex = buffer.index(before: buffer.index(before: buffer.endIndex))
var index = buffer.startIndex
var found = false
while index <= lastPossibleCRIndex {
if buffer[index] == 13 /* CR */ && buffer[buffer.index(after: index)] == 10 /* LF */ {
found = true
break
}
index = buffer.index(after: index)
}
if !found {
if buffer.count < 1026 {
return 0
} else {
fatalError("Expected to find <CR><LF> in buffer. Meta string may not be longer than 1024 bytes.")
}
}
tempMeta = String(bytes: buffer[..<index], encoding: .utf8)
return buffer.startIndex.distance(to: index) + 2
}
}
guard didParseMeta, let meta = tempMeta else {
if let attempted = attemptedMetaLength {
return attempted + 1
} else {
return 2
}
}
}
}
```
With the entire header parsed, an object can be constructed to represent the response metadata and an `NWProtocolFramer.Message` created to contain it.
```swift
class GeminiProtocol: NWProtocolFramerImplementation {
// ...
func handleInput(framer: NWProtocolFramer.Instance) -> Int {
// ...
let header = GeminiResponseHeader(status: statusCode, meta: meta)
let message = NWProtocolFramer.Message(geminiResponseHeader: header)
}
}
```
`GeminiResponseHeader` is a simple struct to contain the status code and the meta string in a type-safe manner:
```swift
struct GeminiResponseHeader {
let status: Int
let meta: String
}
```
As with the request object, there's a small extension on `NWProtocolFramer.Message` so that all the string keys are contained to a single place.
```swift
private let responseHeaderKey = "gemini_response_header"
extension NWProtocolFramer.Message {
convenience init(geminiResponseHeader header: GeminiResponseHeader) {
self.init(definition: GeminiProtocol.definition)
self[responseHeaderKey] = header
}
var geminiResponseHeader: GeminiResponseHeader? {
self[responseHeaderKey] as? GeminiResponseHeader
}
}
```
To actually pass the message off to the client of the protocol implementation, the `deliverInputNoCopy` method is used. Since the `handleInput` method has already parsed all of the data it needs to, and the response body is defined by the protocol to just be the rest of the response data, the `deliverInputNoCopy` method is a useful way of passing the data straight through to the protocol client, avoiding an extra memory copy. If the protocol had to transform the body of the response somehow, it could be read as above and then delivered to the protocol client with the `deliverInput(data:message:isComplete:)` method.
If the request was successful (i.e., the status code was in the 2x range), we try to receive as many bytes as possible, because the protocol doesn't specify a way of determining the length of a response. All other response codes are defined to never have response bodies, so we don't need to deliver any data. Using `.max` is a little bit weird, since we don't actually _need_ to receive that many bytes. But it seems to work perfectly fine in practice: once all the input is received and the other side closes the connection, the input is delivered without error.
Annoyingly, the return value of the Swift function is entirely undocumented (even in the generated headers, where the parameters are). Fortunately, the C equivalent (`nw_framer_deliver_input_no_copy`) is more thoroughly documented and provides an answer: the function returns a boolean indicating whether the input was delivered immediately or whether the framework will wait for more bytes before delivering it. We don't care at all about this, so we just discard the return value.
Finally, we return 0 from `handleInput`. Ordinarily, this would mean that there must be zero or more bytes available before the framework calls us again. But, because we've delivered all the available input, that will never happen.
```swift
class GeminiProtocol: NWProtocolFramerImplementation {
// ...
func handleInput(framer: NWProtocolFramer.Instance) -> Int {
// ...
_ = framer.deliverInputNoCopy(length: statsCode.isSuccess ? .max : 0, message: message, isComplete: true)
return 0
}
}
```
Actually using the Gemini protocol implementation will require creating an `NWConnection` object, which takes an endpoint and connection parameters. The parameters define which protocols to use and the various options for them. The `NWParameters` class already defines a number of static `NWParameters` variables for commonly used protocols, so adding our own for Gemini fits right in.
```swift
extension NWParameters {
static var gemini: NWParameters {
let tcpOptions = NWProtocolTCP.Options()
let parameters = NWParameters(tls: geminiTLSOptions, tcp: tcpOptions)
let geminiOptions = NWProtocolFramer.Options(definition: GeminiProtocol.definition)
parameters.defaultProtocolStack.applicationProtocols.insert(geminiOptions, at: 0)
return parameters
}
private static var geminiTLSOptions: NWProtocolTLS.Options {
let options = NWProtocolTLS.Options()
sec_protocol_options_set_min_tls_protocol_version(options.securityProtocolOptions, .TLSv12)
return options
}
}
```
Here the only thing we customize about the TLS options is setting the minimum required version to TLS 1.2, as required by the Gemini spec. However, the Gemini spec further recommnds that clients implement a trust-on-first-use scheme to alllow people to host content on the Gemini network using self-signed certificates, but implementing that is out of the scope of this post. If you're interested, a good starting point is the `sec_protocol_options_set_verify_block` function which lets you provide a closure that the framework uses to verify server certificates during the TLS handshake process.
Now, to make an API for all this that's actually pleasant to use, I pretty closely followed the `URLSessionDataTask` approach from Foundation, since it models somthing fairly similar to Gemini.
`GeminiDataTask` is a class which will store the request being sent, a completion handler, as well as an internal state and the underlying `NWConnection`. The initializer stores a few things, and then sets up the network connection. It uses the URL port, if it has one, otherwise the default of 1965. The host is simply the host of the requested URL. These are used to construct an `NWEndpoint` object and, combined with the Gemini `NWParameters` setup previously, create the connection. The convenience initializer also provides a slightly nicer API, so the user doesn't have to directly deal with the `GeminiRequest` object (which, from their perspective, is useless since there's nothing to customize about it beyond the plain old URL).
```swift
class GeminiDataTask {
typealias Completion = (Result<GeminiResponse, Error>) -> Void
let request: GeminiRequest
private let completion: Completion
private(set) var state: State
private let connection: NWConnection
init(request: GeminiRequest, completion: @escaping Completion) {
self.request = request
self.completion = completion
self.state = .unstarted
let port = request.url.port != nil ? UInt16(request.url.port!) : 1965
let endpoint = NWEndpoint.hostPort(host: NWEndpoint.Host(request.url.host!), port: NWEndpoint.Port(rawValue: port)!)
self.connection = NWConnection(to: endpoint, using: .gemini)
}
convenience init(url: URL, completion: @escaping Completion) throws {
self.init(request: try GeminiRequest(url: url), completion: completion)
}
}
```
The `State` enum is quite simple, just a few cases. It isn't used for much, just keeping track of the internal state so that the task doesn't try to perform any invalid operations on the connection.
```swift
extension GeminiDataTask {
enum State {
case unstarted, started, completed
}
}
```
There's also a small helper struct to combine the response body and metadata into a single object:
```swift
struct GeminiResponse {
let header: GeminiResponseHeader
let body: Data?
var status: Int { header.status }
var meta: String { header.meta }
}
```
There are also some small methods to start and stop the request. I also copied the behavior from `URLSessionTask` where the task is automatically cancelled when all references to it are released.
```swift
class GeminiDataTask {
// ...
deinit {
self.cancel()
}
func resume() {
guard self.state == .unstarted else { return }
self.connection.start(queue: GeminiDataTask.queue)
self.state = .started
}
func cancel() {
guard state != .completed else { return }
self.connection.cancel()
self.state = .completed
}
}
```
When the connection starts, it needs to know which `DispatchQueue` to call its handler blocks on. For simplicity, here there's just a single queue used for all Gemini tasks.
```swift
class GeminiDataTask {
static let queue = DispatchQueue(label: "GeminiDataTask", qos: .default)
// ...
}
```
Also in the initializer, the `stateUpdateHandler` property of the connection is set to a closure which receives the connection's new state. If the connection has become ready, it sends the request. If the connection has errored for some reason, it ensures that it's closed and reports the error to the task's completion handler.
```swift
class GeminiDataTask {
// ...
init(request: GeminiRequest, completion: @escaping Completion) {
// ...
self.connection.stateUpdateHandler = { (newState) in
switch newState {
case .ready:
self.sendRequest()
case let .failed(error):
self.state = .completed
self.connection.cancel()
self.completion(.failure(error))
default:
break
}
}
}
}
```
To actually send the request, an `NWProtocoFramer.Message` is constructed for the request using the convenience initializer added earlier. Then, a custom connection context is instantiated, using the message as its metadata. The message isn't sent directly, so the connection context is how `NWProtocolFramer` will later get access to it. There's no data sent because Gemini requests can't have any body and the only data required is already encoded by the `GeminiRequest` object. Since the spec states that every connection corresponds to exactly one request, the request is completed immediately. The only thing the send completion handler needs to do is check if an error occurred while sending the request, and if so, cancel the connection and report the error.
```swift
class GeminiDataTask {
// ...
private func sendRequest() {
let message = NWProtocoFramer.Message(geminiRequest: self.request)
let context = NWConnection.ContentContext(identifier: "GeminiRequest", metadata: [message])
self.connection.send(content: nil, contentContext: context, isComplete: true, completion: .contentProcessed({ (_) in
if let error = error {
self.state = .completed
self.connection.cancel()
self.completion(.failure(error))
}
}))
self.receive()
}
}
```
Once the request has been sent, the `receive` method is called on the task to setup the receive handler for the connection. The receive closure takes the data that was received, another content context, whether the request is completed, and any error that may have occurred. In all cases, it closes the connection and sets the task's internal state to completed. If there was an error, it's reported via the task's completion handler. As when sending the request, the `NWConnection` has no direct knowledge of the `NWProtocolFramer` and its messages, so those have to pulled out via the context. If the message and header were found, then the header is bundled up with the rest of the data that was received into a response object which is given to the completion handler.
```swift
class GeminiDataTask {
// ...
private func receive() {
self.connection.receiveMessage { (data, context, isComplete, error) in
if let error = error {
self.completion(.failure(error))
} else if let message = context?.protocolMetadata(definition: GeminiProtocol.definition) as? NWProtocoFramer.Message,
let header = message.geminiResponseHeader {
let response = GeminiResponse(header: header, body: data)
self.completion(.success(response))
}
self.connection.cancel()
self.state = .completed
}
}
}
```
To recap, here's how it all fits together: First, the user constructs a `GeminiDataTask` representing the request. Next, to kickoff the request, the user calls the `resume` method on it. This starts the underlying `NWConnection` which establishes a TCP connection and performs the TLS handshake. Once the network connection is ready, its `stateUpdateHandler` closure is notified, causing the `sendRequest` method to be called on the task. That method then creates the actual message object, gives it to the connection to send, and then sets up a handler to be called when a response is received. Using the request message and the `GeminiProtocol` implementation, `Network.framework` gets the raw bytes to send over the network. The framework then waits in the background to receive a respone from the server. Once data is received from the server and has been decrypted, it returns to the `GeminiProtocol` which parses the metadata and then sends the rest of the data on to the protocol client. Upon receipt of the full metadata and message, the receive closure is called. The closure then passes the result of the request—either an error or the Gemini response—to the completion handler and closes the connection.
At the end of all this, the API we've got is a nice simple abstraction over a network protocol that should be fairly familiar to most Apple-platform developers:
```swift
let task = GeminiDataTask(url: URL(string: "gemini://gemini.circumlunar.space/")!) { (result)
print("Status: \(result.status)")
print("Meta: '\(result.meta)'")
if let data = result.data, let str = String(data: data, encoding: .utf8) {
print(str)
}
}
task.resume()
```
Network.framework is a super is useful tool for writing custom networking code and building abstractions on top of relatively low level protocols. The example I gave here isn't a hypothetical, I'm using Network.framework and almost this exact code to build a [Gemini browser](https://git.shadowfacts.net/shadowfacts/Gemini) app for Mac and iOS.
This post has barely scratched the surface, there's even more interesting stuff the framework is capable of, such as building peer-to-peer protocols. The documentation, in particular the [Tic-Tac-Toe sample project](https://developer.apple.com/documentation/network/building_a_custom_peer-to-peer_protocol) is great resource for seeing more of what's possible.

View File

@ -0,0 +1,102 @@
```
title = "SwiftUI Auto-Expanding Text Views"
tags = ["swift"]
date = "2020-08-29 11:20:42 -0400"
short_desc = "Building a non-scrolling UITextView for use in SwiftUI layouts."
slug = "swiftui-expanding-text-view"
```
I'm currently in the process of rewriting the Compose screen in Tusker to use SwiftUI. This has mostly been a smooth process, but there have been a few hiccups, the first of which was the main text box. The updates to SwiftUI introduced in iOS 14 included [`TextEditor`](https://developer.apple.com/documentation/swiftui/texteditor), the SwiftUI equivalent of `UITextView` to allow multi-line text editing. Unfortunately, there's no (straightforward) way of disabling scrolling, making it unsuitable for some designs where the text view is embedded in a separate scroll view. Additionally, the fact that it's not available at all on iOS 13 means layouts that require non-scrolling multi-line text editors must wrap `UITextView` themselves in order to be achievable with SwiftUI.
<!-- excerpt-end -->
You'd think this would be pretty simple: just use `UIViewRepresentable` with `UITextView` and disable scrolling. But if you try that approach, you'll find a few issues that make things a bit more complex. While setting the `isScrollEnabled` property on the text view to `false` does indeed disable scrolling and make the text view expand itself as more lines are typed, the text view does not entirely respect SwiftUI's layout system. Typing text that is larger than the available space causes the text view to expand outwards from the centerpoint, screwing up the layout, instead of wrapping the text onto the next line.
<div>
<video controls style="width: 50%; margin: 0 auto; display: block;" title="Screen recording of a non-scrolling UITextView inside SwiftUI being edited. When the text view grows, it expands from the center point and ends up offscreen.">
<source src="/2020/swiftui-expanding-text-view/scroll-disabled.mp4" type="video/mp4">
</video>
</div>
Enabling scrolling on the text view partially solves this, making the text wrap whenever the user types something longer than fits on a single line. Of course, this also reintroduces the problem that the text view now scrolls instead of expanding to fit the contents. The simplest solution I've come up with for this problem is to have the SwiftUI side of the layout automatically expand the text view's frame whenever the contents changes. So, even though the `UITextView` is allowed to scroll, it never will because the layout will ensure that the actual size of the view is always taller than the text's height. Additionally, with bouncing disabled, there's no indication from the user's perspective that this is anything other than a regular non-scrolling text view.
Actually implementing this is pretty simple. There's a `UIViewRepresentable` which wraps the `UITextView` and plumbs a `Binding<String>` up to the text view. It also stores a closure that receives the `UITextView` which is invoked whenever the text changes, using the `UITextViewDelegate` method. This will allow the actual SwiftUI view to listen for text view changes and update the frame of the wrapped view.
```swift
import SwiftUI
struct WrappedTextView: UIViewRepresentable {
typealias UIViewType = UITextView
@Binding var text: String
let textDidChange: (UITextView) -> Void
func makeUIView(context: Context) -> UITextView {
let view = UITextView()
view.isEditable = true
view.delegate = context.coordinator
return view
}
func updateUIView(_ uiView: UITextView, context: Context) {
uiView.text = self.text
DispatchQueue.main.async {
self.textDidChange(uiView)
}
}
func makeCoordinator() -> Coordinator {
return Coordinator(text: $text, textDidChange: textDidChange)
}
class Coordinator: NSObject, UITextViewDelegate {
@Binding var text: String
let textDidChange: (UITextView) -> Void
init(text: Binding<String>, textDidChange: @escaping (UITextView) -> Void) {
self._text = text
self.textDidChange = textDidChange
}
func textViewDidChange(_ textView: UITextView) {
self.text = textView.text
self.textDidChange(textView)
}
}
}
```
One key line to note is that, in the `updateUIView` method, after the text is updated, the `textDidChange` closure is called. This is necessary because the `UITextView.text` setter does not call the delegate method automatically. So, if the text was changed programatically, the delegate method wouldn't be called and, in turn, the did-change callback wouldn't execute, preventing the height from updating. `DispatchQueue.main.async` is used to defer running updating the view height until the next runloop iteration for two reasons:
1. So that we're not modifying view state during view updates, as that's undefined behavior in SwiftUI.
2. Because the UITextView doesn't recalculate its content size immediately when the text is set.
Waiting until the next runloop iteration solves both of those issues: the SwiftUI view updates will have finished and the text view will have recalculated its size.
The wrapping SwiftUI view is pretty simple. It passes the string binding through to the wrapped view and it also stores its minimum height, as well as an internal `@State` variable for the current height of the text view. The text view height is an optional, because before the text view appears, there is no height.
```swift
struct ExpandingTextView: View {
@Binding var text: String
let minHeight: CGFloat = 150
@State private var height: CGFloat?
var body: some View {
WrappedTextView(text: $text, textDidChange: self.textDidChange)
.frame(height: height ?? minHeight)
}
private func textDidChange(_ textView: UITextView) {
self.height = max(textView.contentSize.height, minHeight)
}
}
```
Now, everything works correctly. The text view wraps text and expands to fit user input as expected, as well as updating its height when the content is altered in code.
<div>
<video controls style="width: 50%; margin: 0 auto; display: block;" title="Screen recording of a custom text view inside SwiftUI. When the text changes, the scroll view does not overflow and the height expands to fit the content.">
<source src="/2020/swiftui-expanding-text-view/custom-wrapper.mp4" type="video/mp4">
</video>
</div>

View File

@ -0,0 +1,113 @@
```
title = "More SwiftUI Text View Features"
tags = ["swift"]
date = "2020-09-23 17:35:42 -0400"
short_desc = "Adding additional features to the auto-expanding SwiftUI text view."
slug = "more-swiftui-text-views"
```
In my [last post](/2020/swiftui-expanding-text-view/), I went over the implementation of a custom SwiftUI view that wraps `UITextView` in order to make it non-scrolling and auto-expanding. That worked quite well, but in continuing the reimplementation of Tusker's compose screen in SwiftUI, I ran into a couple more things I had to re-implement myself.
<!-- excerpt-end -->
First up: a placeholder. Unfortunately, UIKit itself still does not provide a built-in way of adding placeholder text to `UITextView`, leaving it to app developers to implement themselves. When using UIKit, a simple solution is to add a `UILabel` on top of the text view and configuring it's position and font to match the text view's.
Replicating this is pretty simple, just move the wrapped `UITextView` inside a `ZStack` and add a `Text` for the placeholder. There are a couple slight complications, however. The first is that the text view's content isn't rendered starting at the very top left corner of the text view. Instead, it's slightly inset. With the fonts matching, I found by trial-and-error that having the placeholder inset 4 points from the left and 8 points from the top aligned it to just about the correct position. There's a slight discrepancy which seems to be a fraction of a point, but I decided to leave it at the nice, integral offsets. All but the most eagle-eyed users would never notice such a difference, especially as the placeholder disappears immediately when the user starts typing.
One additional caveat is that, unlike when you put a `UILabel` in front of a `UITextView`, when using a `ZStack` to position a `Text` on top of wrapped text view, the `Text` will intercept touches on the view behind it. So, if the text were empty and the user tapped on the placeholder label, the text view would not activate. A simple solution for this is to put the placeholder text behind the text view, which has the same visual appearance but results in the text view receiving all the touches within it.
```swift
struct ExpandingTextView: View {
// ...
var body: some View {
ZStack {
if !text.isEmpty {
Text("Type something...")
.font(.system(size: 20))
.foregroundColor(.secondary)
.offset(x: 4, y: 8)
}
WrappedTextView(text: $text, textDidChange: self.textDidChange)
.frame(height: height ?? minHeight)
}
}
// ...
}
```
This of course introduces yet another caveat in that setting a background color on the wrapped `UITextView` is no longer possible—since it would obscure the placeholder. So, if you want to add a background color, it needs to be added to the `ZStack` behind both the text view and the placeholder so that everything appears to be in the correct order.
```swift
struct ExpandingTextView: View {
// ...
var body: some View {
ZStack {
Color(UIColor.secondarySystemBackground)
// ...
}
}
// ...
}
```
Finally, I needed to be able to programatically focus the text view so that the user can start typing immediately. Actually focusing the text view is done by calling the `becomeFirstResponder` method that is present on all `UIResponder` subclasses. This needs to be done from the `UIViewRepresentable` implementation, since it's the only thing that has direct access to the underlying view. So, we need a mechanism to signal to the wrapper when it should instruct the text view to become the first responder. For this, the wrapped text view can take a binding to a boolean to indicate when the it should activate.
Whenever any of the inputs to the wrapped text view change (as well as immediately it's first created), the `updateUIView` method is called to synchronize the wrapped `UIView`'s state with the SwiftUI wrapping struct's state. In this method, if the flag is `true`, we can call the `becomeFirstResponder` method on the text view to activate it. The flag also needs to be set back to `false` so that if it's changed later, it triggers another SwiftUI view update.
This all needs to happen inside the `DispatchQueue.main.async` block for two reasons. First, updating the view state inside during the view update is undefined behavior according to SwiftUI and should be avoided. The second reason is that, while calling `becomeFirstResponder` during `updateUIView` works fine on iOS 14, when running on iOS 13, it causes a crash deep inside UIKit, seemingly because the system is trying to present the keyboard before the view hierarchy is entirely on-screen.
```swift
struct WrappedTextView: UIViewRepresentable {
// ...
@Binding var becomeFirstResponder: Bool
func makeUIView(context: Context) -> UITextView { /* ... */ }
func updateUIView(_ uiView: UITextView, context: Context) {
uiView.text = self.text
DispatchQueue.main.async {
self.textDidChange(uiView)
if self.becomeFirstResponder {
uiView.becomeFirstResponder()
self.becomeFirstResponder = false
}
}
}
}
```
Actually making the text view focus when it appears is now a simple matter of giving the wrapped text view a binding to a boolean that we set to `true` during an `onAppear` callback.
```swift
struct ExpandingTextView: View {
// ...
@State private var becomeFirstResponder = false
@State private var hasFirstAppeared = false
var body: some View {
ZStack {
if !text.isEmpty {
Text("Type something...")
.font(.system(size: 20))
.foregroundColor(.secondary)
.offset(x: 4, y: 8)
}
WrappedTextView(text: $text, textDidChange: self.textDidChange, becomeFirstResponder: $becomeFirstResponder)
.frame(height: height ?? minHeight)
}
.onAppear {
if !hasFirstAppeared {
hasFirstAppeared = true
becomeFirstResponder = true
}
}
}
// ...
}
```
In this example, there's also a `hasFirstAppeared` state variable, so that the text view is only activated automatically the first time the view is shown. If the users dismisses the keyboard, leaves the app, and then returns, the keyboard should stay dismissed. This behavior could also be easily extracted out of the `ExpandingTextView` and into a container view, by having it pass a boolean binding through to the wrapper.

View File

@ -0,0 +1,69 @@
```
title = "A UI Framework Dilemma"
tags = ["swift", "gemini"]
date = "2020-10-05 16:17:42 -0400"
short_desc = "Trying to pick a UI framework for an iOS Gemini browser."
slug = "ui-framework-dilemma"
```
For the past couple of weeks I've been building Rocketeer, an iOS browser for the [Gemini](https://gemini.circumlunar.space) network.[^1] The gemtext format is very minimal, so I thought it would be fairly easy to build something to render Gemini documents. The format is line-oriented and only allows a few different line types. There are regular paragraphs, link lines, 3 levels of headings, unordered list items, preformatted blocks, and block quotes. All of these are pretty simple, visually speaking, and the layout is also straightforward. So, I expected to be able to build a renderer quite easily. Unfortunately, there turned out to be lots of little details that were not so obvious at first and introduced a bunch of complications.
[^1]: I [previously wrote](/2020/gemini-network-framework) about building the networking stack using Network.framework.
<!-- excerpt-end -->
Initially, all of the UI code for Rocketeer was written using SwiftUI (and so it remains in the current (at the time of publication) [public beta](https://testflight.apple.com/join/LAs1URxM)). Originally I chose SwiftUI because it allowed me to build a functional document renderer incredibly quickly, so I could at least see something on screen. But, this worked out better, architecturally speaking, than I expected. Gemtext is a line-oriented format, so each line of text in the source document pretty much maps to a single visual unit for the display purposes. For the same reason, layout is quite simple. Just as the source document is a just list of lines arranged vertically, the rendered document is a list of display units arranged vertically, one after the other. With SwiftUI, the actual layout code is a simple as a single `VStack` (or `LazyVStack` on iOS 14, which gets you a simple layout with lazily initialized views without having to screw around with the table or collection view APIs that weren't designed for such a thing) containing a `ForEach` that iterates over each of the display blocks. Put that inside a `ScrollView`, and bam—you're rendering a whole Gemini document.
This was all well and good, until I realized there were a few features I wanted to add that SwiftUI wasn't capable of. At first it was just custom context menu link previews (similar to what Safari does).
While SwiftUI does provide the `.contextMenu` modifier for adding context menu _actions_ to a view, it doesn't have a native API for creating custom context menu previews (FB8772786). This could in theory be accomplished with a custom `UIViewRepresentable` that wraps a SwiftUI view in a `UIView` with a `UIContextMenuInteraction`, thereby granting access to the regular UIKit context menu APIs, but that's a great deal of work a feature that's small enough it probably wouldn't be missed.
But, that wasn't the end. I realized text selection would be a very useful feature. Replies to gemlog posts are commonly written by block-quoting parts of the original post and then writing the response below it (à la email bottom-posting). Imagine being able to read a Gemini post on an iPad, copying parts of it into a text editor to compose a response, and then uploading it to your server with an FTP app. That sounds like a pretty comfy workflow. One which requires the ability to select and copy text out of the browser. Which SwiftUI's `Text` view can't do (FB8773449).
I put aside text selection as something to revisit later. And then I got to thinking about interesting features that the Gemini format itself would facilitate. The first one that came to mind was generating a table of contents. As a side of a document format that doesn't allow custom layout, documents on Geminispace use markup much more structurally/semantically. Headings are used as, well, headings. And having documents be well-ordered in addition to having three distinct levels of headings, means there's an structure implied by the heading lines. By scanning through a document and looking at the heading lines, you could quite easily generate a table of contents for the entire document. Now, here's where SwiftUI comes into this: If you've got a table of contents, you probably want to be able to skip to a specific point in it (what use would the table of contents be in a book without page numbers?). iOS 14 introduced [`ScrollViewReader`](https://developer.apple.com/documentation/swiftui/scrollviewreader), which, allows the scroll view's position to be manipulated programatically by jumping to specific views (despite the name, it does not do any reading of the `ScrollView`). Of course, this is only available on iOS 14, so any users on iOS 13 wouldn't be able to use it. And given that iOS 14 was released less than a month ago, and how simple this feature seems, I didn't want to make it dependent on a new OS version.
Also on the subject of scroll views, the browser should be able to persist the scroll view's offset. If the user leaves the app and returns, they should be looking at the same point on the page as when they left. Likewise if they navigate backwards/forwards or switch tabs. This isn't possible at all using SwiftUI's `ScrollView`. I briefly tried setting up a `UIScrollView` myself, and then adding the `UIHostingController`'s view a child of it, but this completely removed the benefit of `LazyVStack`, causing abysmal performance when viewing some pages.
Even if wrapping `UIScrollView` myself had worked, what would be the point? Along with all the other things, I'd have written almost the entire Gemini document viewer using UIKit with only the teensiest bit of SwiftUI glue code. Why not then just go one step further and only use UIKit?
And this is entirely putting aside the fact that Rocketeer was originally intended to be a Mac app, with the iOS app as an afterthought when I realized it was easily possible since I was using SwiftUI. Using UIKit for so many integral parts would have meant huge portions of the codebase had to be rewritten for the macOS version.
So, while any one of these features wouldn't be enough to get me to abandon SwiftUI, altogether, it's enough to get me to start eyeing other options. Because to not do so would leave a lot of useful features on the table. The two likely replacements I came up with were: A) converting the Gemini document into an `NSAttributedString` and stuffing it into a `UITextView` or B) rendering the Gemini document to HTML and displaying it with a `WKWebView`. The following table is what what features I want for Rocketeer and with which options they're possible.
| | SwiftUI | UITextView | WKWebView |
| ----------------------------- | ------------ | ---------- | --------- |
| Render all Gemini line types | Yes | Yes | Yes |
| Text selection | No | Yes | Yes |
| Text selection between blocks | N/A | No | Yes |
| Context menu actions | Yes | Yes | Yes |
| Context menu previews | Hacky | Yes | Yes |
| VoiceOver & Voice Control | No (iOS bug) | ? | Yes |
| Persist scroll position | No | Yes | Yes |
| Scroll to anchor | iOS 14 | Yes | Yes |
| Horizontally scrolling blocks | Yes | No | Yes |
| SF Symbols | Yes | Yes | Hacky |
| System fonts | Yes | Yes | Hacky |
| Block quote leading border | Yes | No | Yes |
Clearly, SwiftUI poses the most problems, and WebKit has the most possibilities. But plain old UIKit with a `UITextView` is in an annoying middle ground. A fair number of additional features are possible when compared to SwiftUI. But in exchange for that, it also _loses_ some features that are possible with SwiftUI. And of course, there are still a few things that neither SwiftUI nor `UITextView` support.
First up: VoiceOver and Voice Control. While reading the contents of a text view with VoiceOver is obviously possible, there are still a few questions. The ideal narration behavior for Rocketeer would be to have VoiceOver reach each visual segment one at a time. One-by-one, going through each paragraph and link and list item[^2]. As for Voice Control, the user needs to be able to interact with links within the text view individually. And in addition to the bare numbers all buttons are assigned, users should be able to speak the label of links to simulate tapping on them. I would hope UIKit provides suitable accessibility APIs for this, but I haven't investigated it. I can't imagine it's as simple as using a single `Button` per link in SwiftUI. With `WKWebView`, these are not only possible but are handled automatically and completely for free, thanks to all the work the WebKit team has put into it.
[^2]: Regardless of UI technology, narrating preformatted text with a screen reader is an interesting problem for the Gemini format. I can't imagine listening to something naively read a block of code aloud would be pleasant. Let alone ASCII art, which is relatively common in Geminispace in lieu of inline images.
Then there's the issue of styling block quotes. The appearance I prefer is having the text be a lighter color and italicized, as well as having it slightly inset from the leading edge and have a solid border along the leading edge as well. As is becoming a pattern, with SwiftUI, this is fairly straightforward. You can use an `HStack` with some spacing containing a `Color` that has a frame of a fixed with and then the `Text`. The text will force the stack to expand vertically, and the color view will expand to fill the entire available height. This is also possible with CSS, using only the `border` and `padding` properties. `UITextView` of course makes things more complicated. While there may be an `NSAttributedString` attribute to indent an entire paragraph, there is no good way of applying a border to just a small part of a text view's contents. A solution could be devised, by adding `UIView`s with background colors as subviews of the text view. But that has to make sure the border views are positioned correctly, and that they're kept in sync with the text view as the device is rotated or the window resized. I can also imagine a truly cursed solution that works by performing word wrapping at the desired with, and then inserting a newline character, a special Unicode character that renders as a solid block, and some spaces at each point where the text would wrap at the desired width. Even with the block characters correctly positioned horizontally, there would likely be small gaps in between them vertically due to the font's line spacing. Furthermore, you would have to keep this in sync with viewport size changes, and at any rate, this is just too cursed of a solution for me.
On to the subject of preformatted text, for which the challenge is that line wrapping needs to be disabled. Otherwise, certain preformatted text, like code, would be much more difficult to read. And even worse, ASCII art would be entirely illegible (and potentially take up a huge amount of vertical space unnecessarily, depending on how wide it is). With line wrapping disabled, the preformatted text needs to scroll horizontally so that it is all visible. But the entire document viewport shouldn't scroll because it's likely that the majority of the text is just going to be regular paragraphs, and moving the entire viewport horizontally would leave those off the screen. So, only the actual preformatted sections should be able to scroll horizontally, everything else should be fixed to the width of the screen. With SwiftUI, this is pretty straightforward: there's just a `Text` view inside a horizontal `ScrollView` and that takes care of everything. Using WebKit for this is also very straightforward, since you can use CSS to set the `overflow-x` property on `<pre>` elements to make them scroll. When you want to use `UITextView` is where this gets complicated. This isn't possible just with an attributed string and a plain old text view. You could work around this by adding horizontal another `UITextView` that's configured to disable line wrapping and allow scrolling on the X-axis as a subview of the outer text view. But then you once again would have to deal with manually positioning the inner text views inside of the scroll view content of the outer text view and keeping that position in sync outer view changes size. You also have to somehow add spacing to the contents of the outer text view so that there's an appropriately sized gap in its contents where the inner text view sits. This approach would also introduce problems for text selection.
While `UITextView` does support at least some amount of text selection, which is an improvement over SwiftUI's complete lack thereof, it doesn't support selecting text between multiple separate text views. Most of the time, this isn't a big deal. But what if you want to copy a large chunk of text spanning multiple paragraphs, and say, a preformatted block. That wouldn't be possible. If you were inserting preformatted blocks using the technique described in the previous paragraph, what would happen when you tried to make a selection that crosses the boundary between a piece of preformatted text and regular body text? The selection certainly wouldn't continue between them smoothly, as the user would expect. If you had to insert extra text into the outer text view's contents in order to make space for the inner views, starting a selection in the outer view and dragging across the inner view would just end up selecting the placeholder characters you inserted, which are not actually part of the source document. And if the user started a selection in one of the inner text views, dragging across the boundary into the outer text view would result in the selection just stopping abruptly when it reached the end of the preformatted text. Inserting `NSTextAttachment`s into the text as I previously described would also make the matter of selection more complicated. I use SF Symbols images as icons to show additional information about links (specifically, whether they're pointing to the same domain, a different part of Geminispace, or another protocol altogether). `NSTextAttachment` can contain arbitrary `UIImage`s, so this is possible, but it makes the image a part of the text, meaning the user could end up making a selection that contains an attachment and copying it out of the app. What would happen then, you wonder? I don't know, but I can't I imagine it would be something helpful. Bullet points have a similar problems, since the `U+2022` character is inserted directly into the attributed string when rendering list item lines. `WKWebView` doesn't have this problem, once again thanks to the efforts of the WebKit team. Text selection across multiple HTML elements? No problem. Skip over decorative images? Sure thing. Bullet points? You bet.
Having gotten this far, you might think that using a `WKWebView` with the gemtext converted into HTML is the perfect solution. But of course, there are a couple regressions when going from plain old UIKit to WebKit, since nothing could ever be simple.
The first is the issue of SF Symbols. Although each SF Symbol does have a character code allocated from a resaved section of Unicode, none of the system fonts accessible from the web view will render the symbol, so you'll just end up with a box. The images (or SVGs) for individual SF Symbols can be extracted from system fonts, and the content of a WKWebView does theoretically have a way of accessing resources bundled with the app, so in theory they could be displayed. But who knows if that would get past App Review.
There's a similar problem with fonts. I hadn't mentioned it, but the font I used for both the SwiftUI and `UITextView` versions of this has been Apple's New York, which is the system-provided serifed font. This is no problem for SwiftUI and UIKit, since their font classes both have methods for getting the system font of a certain design. But, as far as I can tell, these system fonts are not accessible from web content. Even using the internal name, `.NewYork-Regular` doesn't work; it just falls back on the browser's default font. A similar approach may be taken to the SF Symbols issue, since Apple does make their system fonts available for download on their developer website[^3]. The font could be bundled with the app and then loaded from the web content, though again, who knows how this would go over with App Review.
[^3]: Say goodbye to the days of extracting SF Mono from inside Terminal.app just to use it inside other text editors.
So, after all that, what am I going to do for Rocketeer. Well, from a customer perspective, the `WKWebView` solution is clearly the best since it both allows far more features and makes a number of others behave much more inline with the way you'd expect. But I'm kinda annoyed about it. This isn't just a document viewer for some random format that I'm building. This is a browser for Gemini, a protocol and a format which are _very_ intentionally designed to avoid the pitfalls and complexities of the web. But the most feature-complete way to build this is, because all the other available UI frameworks aren't up to the (relatively simple) task, to pull in an entire web rendering engine. The very technology Gemini is trying to get away from. Isn't that ironic.

View File

@ -0,0 +1,159 @@
```
title = "Automatically Scroll the Text View Caret into View"
tags = ["swift"]
date = "2020-11-11 10:38:42 -0400"
short_desc = "Caret hide and seek."
slug = "swiftui-text-view-caret"
```
That's right, it's time for this month's installment of the [never](/2020/swiftui-expanding-text-view/) [ending](/2020/more-swiftui-text-views/) SwiftUI text view saga! The text view previously implemented is of course auto-expanding and has scrolling disabled. While this mostly works, it has a rather unfortunate UX problem. Let's say the user is typing into the text view, and they reach the end of the screen. As they continue to type, the text will wrap onto the next line and the caret will go with it. But, because they're already at the bottom of the screen (or immediately above the bottom of the keyboard), the caret, along with the text that they're currently typing, will no longer be visible.
<!-- excerpt-end -->
Ordinarily, UITextView would handle this for us. Whenever the cursor moves, it would simply scroll itself so that the cursor is visible. But because the UITextView expands to show its entire contents, the cursor never occluded by the text view's bounds. So, we need to handle it ourselves.
Using the `textViewDidChangeSelection` delegate method, we can tell whenever the cursor is moved.
```swift
struct WrappedTextView: UIViewRepresentable {
// ...
class Coordinator: NSObject, UITextViewDelegate {
// ...
func textViewDidChangeSelection(_ textView: UITextView) {
ensureCursorVisible(textView: textView)
}
}
}
```
To actually make the cursor visible, we need a two key things: the scroll view that we're actually going to scroll, and the position of the cursor on screen.
Getting the scroll view instance is fairly straightforward. Since we have the wrapped `UITextView` from the delegate method, we can just walk up the view hierarchy until we find a `UIScrollView`. This does technically rely on an internal implementation detail of SwiftUI's `ScrollView` (unless you're also using `UIViewRepresentable` to wrap the scroll view yourself), namely that it's backed by an actual UIKit scroll view, but this is one that I'm willing to live with, given that it doesn't seem likely to change any time soon. We can also fail gracefully if the scroll view isn't found, to try and prevent any future catastrophic breakage.
```swift
class Coordinator: NSObject, UITextViewDelegate {
// ...
private func findParentScrollView(of view: UIView) -> UIScrollView? {
var current = view
while let superview = current.superview {
if let scrollView = superview as? UIScrollView {
return scrollView
} else {
current = superview
}
}
return nil
}
}
```
Now, getting the scroll view is as simple as calling this method with the text view:
```swift
class Coordinator: NSObject, UITextViewDelegate {
// ...
private func ensureCursorVisible(textView: UITextView) {
guard let scrollView = findParentScrollView(of: textView) else {
return
}
}
}
```
Next, we need to get the cursor's position. We can use the `selectedTextRange` property of `UITextView` and its `carectRect(for:)` method. Note that this is the <code>seelcted<em>Text</em>Range</code> property, not the `selectedRange` property. The text range version gives us a `UITextRange` which uses `UITextPosition`s for its start and end locations, in constrast with `selectedRange` which is just a regular `NSRange` of integers. `UITextPosition` is an opaque class, subclasses of which are used internally by objects conforming to the `UITextInput` protocol and is what's accepted by the `caretRect` method.
```swift
class Coordinator: NSObject, UITextViewDelegate {
// ...
private func ensureCursorVisible(textView: UITextView) {
guard let scrollView = findParentScrollView(of: textView),
let range = textView.selectedTextRange else {
return
}
let cursorRect = textView.carectRect(for: range.start)
}
}
```
The `carectRect(for:)` method returns the rect representing the cursor, but in the coordinate space of the text view. In order to actually scroll with it, we need to convert it into the scroll view's coordinate space. After that, we can use the `UIScrollView.scrollRectToVisible` method to actually change the content offset of the scroll view so that the cursor is visible, without having to do a bunch of tedious math ourselves.
```swift
class Coordinator: NSObject, UITextViewDelegate {
// ...
private func ensureCursorVisible(textView: UITextView) {
// ...
var rectToMakeVisible = textView.convert(cursorRect, to: scrollView)
scrollView.scrollRectToVisible(rectToMakeVisible, animated: true)
}
}
```
This works pretty well: when the user is typing and the cursor wraps onto a new line, the scroll view scrolls so that the cursor is back on-screen. However, there are a couple things that could be improved.
First off, the area of the text view that's being made visible is limited to exactly the size and position of the cursor. While this does mean the cursor becomes visible, it's at the very bottom of the screen, and only ever so slightly above the keyboard, which doesn't look great. It would be better if, after scrolling, there was a little bit of space between the edge of the screen and the line of text being edited. We can accomplish this by adjusting the rectangle we want to make visible.
```swift
class Coordinator: NSObject, UITextViewDelegate {
// ...
private func ensureCursorVisible(textView: UITextView) {
// ...
var rectToMakeVisible = textView.convert(cursorRect, to: scrollView)
rectToMakeVisible.origin.y -= cursorRect.height
rectToMakeVisible.size.height *= 3
scrollView.scrollRectToVisible(rectToMakeVisible, animated: true)
}
}
```
By moving the Y coordinate of the rect up by the cursor's height and tripling the rect's height, we change the rect so it's centered on the cursor and there's vertical padding on either side equal to the height of the cursor. Scrolling this rect into view will make sure that the cursor isn't all the way at the top or bottom of the screen, but instead has some room to breathe.
The other issue is that if the user is typing very quickly and the cursor is changing lines rapidly, the scroll view's offset will stop animating smoothly and start jittering around. This is because we're calling `scrollRectToVisible` with animation enabled many times in quick succession. Multiple animations end up running at simultaneously and are competing for which gets to control the content offset. This is slightly more complicated to fix.
We need some way of controlling when the animation is running, so that whenever we're about to start a new animation, we can cancel the old one so that it doesn't interfere. While passing `animated: true` to when scrolling the scroll view, there's no easy way of doing this, since we don't know how the scroll view is actually performing the animation internally. We need to control the animation entirely ourselves. We can do this using a [`UIViewPropertyAnimator`](https://developer.apple.com/documentation/uikit/uiviewpropertyanimator), which allows cancelling an in-progress animation.
```swift
class Coordinator: NSObject, UITextViewDelegate {
// ...
private func ensureCursorVisible(textView: UITextView) {
// ...
let animator = UIViewPropertyAnimator(duration: 0.1, curve: .linear) {
scrollView.scrollRectToVisible(rectToMakeVisible, animated: false)
}
animator.startAnimation()
}
}
```
By passing `false` for the `animated:` parameter of `scrollRectToVisible`, we instruct it to update the scroll view's `contentOffset` immediately, which will be captured by the property animator, and then animated smoothly. I used a duration of a tenth of a second and a linear curve, because it felt quite natural and didn't cause a problem when typing quickly. (Using a non-linear curve could cause a problem, because as animations are rapidly started and stopped, the velocity of the animation would not stay constant. Given such a short duration, this may not be a problem, but, also given the short duration, the effects of a non-linear curve aren't really visible.)
We can then store the animator on the coordinator class so that when we next try to start an animation, we can cancel the any existing, non-completed one:
```swift
class Coordinator: NSObject, UITextViewDelegate {
// ...
private var cursorScrollPositionAnimator: UIViewPropertyAnimator?
private func ensureCursorVisible(textView: UITextView) {
// ...
if let existing = self.cursorScrollPositionAnimator {
existing.stopAnimation(false)
}
let animator = UIViewPropertyAnimator(duration: 0.1, curve: .linear) {
scrollView.scrollRectToVisible(rectToMakeVisible, animated: false)
}
animator.startAnimation()
self.cursorScrollPositionAnimator = animator
}
}
```
The `stopAnimation` method takes a parameter called `withoutFinishing` for which we pass `false` because we want the in-progress animation to stop immediately where it is, without jumping to the end (this also skips calling any completion blocks of the animator, but this doesn't matter as we're not using any).
With that, it finally works correctly. As the user is typing, the scroll view is scrolled smoothly to keep the cursor in view at all times, and there are no issues with the user typing too quickly for the animations to keep up.

View File

@ -0,0 +1,492 @@
```
title = "Parsing ID3 Metadata in Elixir"
tags = ["elixir"]
date = "2020-12-07 21:26:42 -0400"
short_desc = "Extracting metadata stored in ID3 tags from MP3 files with Elixir."
slug = "parsing-id3-tags"
```
On and off for the past year and a half or so, I've been working on a small side project to catalog and organize my music library, which is predominantly composed of MP3 files[^1]. There are existing pieces of software out there that will do this (such as Beets and Airsonic), but, as many a programmer will attest to, sometimes it's just more fun to build your own. The choice of language was easy. For a couple years now, Elixir has been my favorite for any back-end web dev. I also had an inkling that its powerful pattern matching facilities could work on arbitrary binary data—perfect for parsing file formats.
I knew that MP3 files had some embedded metadata, only for the reason that looking at most tracks in Finder shows album artwork and information about the track. Cursory googling led me to the [ID3 spec](https://id3.org/).
[^1]: Actual, DRM-free files because music streaming services by and large don't pay artists fairly[^2]. MP3s specifically because they Just Work everywhere, and I cannot for the life of me hear the difference between a 320kbps MP3 and an \<insert audiophile format of choice> file.
[^2]: Spotify pays artists 0.38¢ per play and Apple Music pays 0.783¢ per play ([source](https://help.songtrust.com/knowledge/what-is-the-pay-rate-for-spotify-streams)). For an album of 12 songs that costs $10 (assuming wherever you buy it from takes a 30% cut), you would have to listen all the way through it between 75 and 150 times for the artist to receive as much money as if you had just purchased the album outright. That's hardly fair and is not sustainable for all but the largest of musicians.
<!-- excerpt-end -->
Initially, I found a package on Hex for parsing ID3 tags from Elixir. It wasn't implemented directly in Elixir though, instead it used a NIF: a Natively Implemented Function. NIFs are pieces of C code which are used to implement functions that are accessible to Erlang/Elixir, which is useful if very high performance is needed. But the NIF wouldn't compile on my machine, so rather than trying to fix it, I decided to try and parse the ID3 data myself. Fortunately, Elixir is a very nice language for parsing binary file formats.
## Binary Pattern Matching
What makes Elixir so nice for parsing file formats? Pattern matching. Specifically bitstring pattern matching, but let's start with ordinary pattern matching. (If you already know this, [skip ahead](#parsing-tags).)
Pattern matching in code is, generally speaking, describing what you expect the shape of a piece of data to be and pulling specific parts of it out. Let's say you have a tuple of two elements, and you want to make sure that the first element is a string containing a certain value and you want to bind the second value to a variable. You could do this in two statements, or you could pattern match on it.
```elixir
{"foo", my_variable} = {"foo", 1234}
IO.inspect(my_variable) # => 1234
```
This becomes even more powerful once you learn that you can use pattern matching in function parameters, so you can provide two completely different implementations based on some aspect of a parameter:
```elixir
def is_foo("foo") do
true
end
def is_foo(value) do
false
end
is_foo("foo") # => true
is_foo(42) # => false
```
Next: pattern matching bitstrings. A bitstring in Elixir/Erlang is just a big ol' sequence of bits. Additionally, a binary is a special case of a bitstring, where the number of bits is evenly divisible by 8, making it a sequence of bytes. Bitstrings and binaries are written in between double angle brackets, with individual values separated by commas. Unless you specify otherwise, each value is the size of a single byte.
```elixir
<<first, rest::binary>> = <<1, 2, 3>>
first # => 1
rest # => <<2, 3>>
```
Here, we're binding the first byte of the binary to the variable `first`, and any remaining bytes to `rest`. When binding variables (or specifying fixed values, as we'll see shortly) in bitstring pattern matching, two colons and the type of the value follow the name. The size determines how much of the bitstring each value will match. This information is critical, since without it, there may be many ways for a certain pattern to match a bitstring leaving the programmer's intention ambiguous. By default, each element in a bitstring match has a size of a single byte. Anything else must be explicitly specified. Here `rest`, is specified to be a binary, meaning it will be a sequence of bytes.
One key thing to note is that the last element of a match is special. Unlike the preceding elemens, its type can be given as just `bitstring` or `binary`, without specifying the size. This lets you get all the remaining data out of the bitstring without knowing how long it is, similar to getting the tail of a list.
You can also match specific sizes, including across multiple bytes and specific numbers of bits:
```elixir
<<first::size(4), rest::bitstring>> = <<0xFF>>
first # => 15
rest # => <<15::size(4)>>
```
This in particular is very useful for parsing binary formats, since it lets you easily unpack bytes which contain multiple bit flags without having to do any bitwise math yourself.
## Parsing Tags
Ok, now with that under our belts can we finally parse some ID3 tags? Actually, not quite yet. First off, I'm only looking at ID3v2 tags in this, since none of the music I've been testing this against uses v1. Second, there are two slightly different versions of the v2 spec that matter: Version [2.4.0](https://id3.org/id3v2.4.0-structure) and version [2.3.0](https://id3.org/id3v2.3.0). At first glance, they're similar, but there are a number of differences lurking beneath the surface that tripped me up as I was building this. Alright, now let's really get started.
Through the magic of pattern matching, we can define a function that takes a single binary as an argument. There will be two implementations: one that's used if the data begins with an ID3 tag and one for if it doesn't. The fallback implementation accepts anything as its parameter and returns an empty map, indicating that there was no ID3 data. The other implementation will match against the contents of the binary, expecting it to match the defined format of an ID3v2 tag.
```elixir
def parse_tag(<<
"ID3",
major_version::integer,
_revision::integer,
_unsynchronized::size(1),
extended_header::size(1),
_experimental::size(1),
_footer::size(1),
0::size(4),
tag_size_synchsafe::binary-size(4),
rest::binary
>>) do
end
def parse_tag(_) do
%{}
end
```
ID3v2 specifies that a file with an ID3v2 tag should begin with the ASCII byte sequence representing "ID3". We can match this directly in the pattern, because strings in Elixir/Erlang are just binaries. The magic string is followed by an integer representing the major version (4 for ID3 version 2.4 and 3 for 2.3) and another integer representing the least significant component of the ID3 spec version used by the file (we don't care about it because no minor revisions of the spec have been released). It's followed by a series of four 1-bit flags and then four unused bitflags, which should always be zero. After that is a 32-bit number that contains the total size of the ID3 tag. The variable name for this is `tag_size_synchsafe` because it's encoded with ID3v2's special synchsafe scheme, which we'll get to shortly. The remainder of the file is bound as a binary to the `rest` variable.
To understand why ID3 encodes many numbers as "synchsafe", it's important to remember that ID3 was designed to carry metadata specifically for MP3 files. As such, it had to take special care to not interfere with existing MP3-handling software that may not have understood ID3 tags. The MPEG audio format encodes an audio stream into individual frames of audio data, each of which starts with a byte composed of all 1s (i.e., 0xFF). This is done to let audio players easily seek through a bytestream and locate a point where valid audio data starts (called synchronizing), without having to parse the entire file leading up to that point. Because the 0xFF byte is the sync marker, its presence in an ID3 tag (which is, of course, not valid audio data), would cause players to start trying to play back nonsense data. So, within an ID3 tag, all numbers need to be encoded in such a way that an all-1s byte never occurs. This is where the synchsafe scheme comes (so called because it's safe from causing false-syncs).
Synchsafe integers work by only using the seven lower bits of each byte and leaving the highest-order bit as 0, thereby preventing any 0xFF bytes, and false syncs, from occurring. The number 255 would be encoded as 0b101111111 (decimal 383). This means that, for every byte of space used, a synchsafe integer stores only seven bits of information. Decoding these integers is pretty easy. For the simplest case of a single byte, nothing needs to be done: the value of the byte is the value of the entire number. Decoding multi-byte synchsafe integers is slightly more complicated, but not terribly so. We need to go through each byte, building up an integer. We shift each byte to the left by seven times the index of the byte in the multi-byte synchsafe integer.
```elixir
use Bitwise
def decode_synchsafe_integer(<<b>>) do
b
end
def decode_synchsafe_integer(binary) do
binary
|> :binary.bin_to_list()
|> Enum.reverse()
|> Enum.with_index()
|> Enum.reduce(0, fn {el, index}, acc ->
acc ||| (el <<< (index * 7))
end)
end
```
The `bin_to_list` function takes a binary, which is just a sequence of bytes, and converts it into an actual list that we can use with the functions in the `Enum` module. This list is reversed and its elements are converted into tuples which include the index. The list needs to be reversed first because the bytes in synchsafe integers are big-endian, meaning the most significant comes first. We want the indices to match this, with the highest index being with the highest-order byte. So, we reverse the list so both the bytes and indices are going from least significant and smallest to most significant and greatest. From there, each byte is shifted left by seven times the index, eliminating the gaps between the actual information-carrying bits and OR'd together.
Here's what decoding the synchsafe number represented as 0b10101001000011010 (0x1521A) would look like:
```
00000001 01010010 00011010
reversed: 00011010 01010010 00000001
Index 0 Index 1 Index 2
00011010 <<< 0 = 00011010
01010010 <<< 7 = 00101001 00000000
00000001 <<< 14 = 01000000 00000000
00000000 00011010
00101001 00000000
||| 01000000 00000000
---------------------
01101001 00011010 = 26906
```
You may have noticed the `unsynchronized` flag in the tag header. The ID3 unschronization scheme is another way of preventing false syncs in longer blocks of data within the tag (such as image data in the frame used for album artwork). I elected not to handle this flag for now, since none of the tracks in my library have the flag set. The ID3v2.4 spec says the unsynchronization scheme is primarily intended to prevent old software which isn't aware of ID3 tags from incorrectly trying to sync onto data in the ID3 tag. Since the ID3v2 spec is over 20 years old, pieces of software which aren't aware of it are few and far between, so I guess the unsynchronization scheme has fallen out of favor.
So, since we've gotten the 4-byte binary that contains the tag size out of the header, we can use the `decode_synchsafe_integer` function to decode it.
```elixir
def parse_tag(...) do
tag_size = decode_synchsafe_integer(tag_size_synchsafe)
end
```
We'll use the tag size when we start decoding ID3 frames, to ensure that we don't go over the end of the ID3 tag into the actual MP3 data. But before we can start parsing frames, we need to take care of the extended header, if it's present. The extended header contains optional extra data that doesn't fit into the regular header and isn't part of any frame. This is where the differences between v2.3 and v2.4 come into play. In version 2.3 of the spec, the size of the extended header is fixed at either 6 or 10 bytes, depending on whether a checksum is present. In version 2.4, the size is variable and depends on various flags in the extended header.
So, when parsing the extended header, we'll use a function that takes the major version specified in the tag header, and parses it differently depending on the version. Since, for my purposes, I don't care about any of the data that it would provide, I just discard it and return the binary containing all the bytes after the extended header. The `skip_extended_header` function also returns the length of the extended header, so that we can take it into account when calculating how long the remainder of the tag is (since the tag size in the ID3 header includes the extended header's length).
For the implementation for version 2.3, the extended header begins with the size of the extended header (including the fixed length of six bytes). This is not encoded as synchsafe (though it is in version 2.4), so we can subtract the six bytes the pattern match has already skipped, and skip any remaining bytes. The length of the extended header is defined by the spec to always be 6 or 10 bytes, so we can safely subtract 6 without breaking anything (matching a binary of size 0 in the pattern match works perfectly fine).
```elixir
def skip_extended_header(3, <<
ext_header_size::size(32),
_flags::size(16),
_padding_size::size(32),
rest::binary
>>) do
remaining_ext_header_size = ext_header_size - 6
<<_::binary-size(remaining_ext_header_size), rest::binary>> = rest
{rest, ext_header_size}
end
```
For the version 2.4 implementation, it's a little bit more complicated. The extended header still starts with four bytes giving the size of the extended header (though this time encoded as a synchsafe number), then has a 0x01 byte, and then a byte for the flags. After those six bytes, there may be more data depending on what flags were set (an additional six bytes for CRC and 2 bytes for tag restrictions). This time, we need to decode the size from synchsafe, and then we can subtract the fixed length and skip over the remainder as before.
```elixir
def skip_extended_header(4, <<
ext_header_size_synchsafe::size(32),
1::size(8),
_flags::size(8),
rest::binary
>>) do
ext_header_size = decode_synchsafe_integer(ext_header_size_synchsafe)
remaining_ext_header_size = ext_header_size - 6
<<_::binary-size(remaining_ext_header_size), rest::binary>> = rest
{rest, ext_header_size}
end
```
In the main `parse_tag` function, we can skip over the extended header if the corresponding flag bit is set. If it isn't, no data needs to be skipped and the length of the extended heder is zero. With that, we can finally procede to parsing the ID3 frames to get out the actually useful data.
```elixir
def parse_tag(...) do
tag_size = decode_synchsafe_integer(tag_size_synchsafe)
{rest, ext_header_size} =
if extended_header == 1 do
skip_extended_header(major_version, rest)
else
{rest, 0}
end
parse_frames(major_version, rest, tag_size - extended_header)
end
```
When passing the remaining tag length to the `parse_frames` function, we need to subtract the extended header length from the tag size that we got from the ID3 header, because it already includes the extended header that we've already skipped over. While parsing frames, we'll use it as a confirmation that we haven't overrun the end of the tag.
The `parse_frames` function also receives the major version of the tag, the actual binary data, and a list of accumulated frames so far (which defaults to an empty list). It will be called in a recursive loop, advancing through the binary and accumulating frames.
```elixir
def parse_frames(major_version, data, tag_length_remaining, frames \\ [])
```
The first case of the function is for if it's reached the total length of the tag, in which case it will just convert the accumulated tags into a map, and return the data that's left (we want to return whatever data's left after the end of the ID3 tag so that it can be used by other parts of the code, say, an MP3 parser...). We can just directly convert the list of frames into a map because, as you'll see shortly, each frame is a tuple of the name of the frame and its data, in whatever form that may be.
```elixir
def parse_frames(_, data, tag_length_remaining, frames)
when tag_length_remaining <= 0 do
{Map.new(frames), data}
end
def parse_frames(_, data, _, frames) do
{Map.new(frames), data}
end
```
The fallback case of `parse_frames`, which will be called if the we can't find a valid ID3 frame header in the data stream, does the same thing.
The bulk of the work is done by the next implementation of `parse_frames`. It starts by matching a valid frame header from the beginning of the binary. After that, it first needs to get the actual size of the frame. In version 2.4 of ID3, the frame size is encoded with the synchsafe scheme, but in 2.3, it's just a plain 32-bit integer. After that, it calculates the entire size of the frame by adding 10 bytes (since the value in the header does not include the size of the header) and then subtracting that from the total tag length remaining, to figure out how much of the tag will be left after the current frame is parsed.
```elixir
def parse_frames(
major_version,
<<
frame_id::binary-size(4),
frame_size_maybe_synchsafe::binary-size(4),
0::size(1),
_tag_alter_preservation::size(1),
_file_alter_preservation::size(1),
_read_only::size(1),
0::size(4),
_grouping_identity::size(1),
0::size(2),
_compression::size(1),
_encryption::size(1),
_unsynchronized::size(1),
_has_data_length_indicator::size(1),
_unused::size(1),
rest::binary
>>,
tag_length_remaining,
frames
) do
frame_size =
case major_version do
4 ->
decode_synchsafe_integer(frame_size_maybe_synchsafe)
3 ->
<<size::size(32)>> = frame_size_maybe_synchsafe
size
end
total_frame_size = frame_size + 10
next_tag_length_remaining = tag_length_remaining - total_frame_size
end
```
After that, it hands the ID of the frame (a four character ASCII string), along with the size of the frame (not counting the header), and the remainder of the binary over to the `decode_frame` which will use the ID of the specific frame to decide how to decode the data.
```elixir
def parse_frames(...) do
# ...
result = decode_frame(frame_id, frame_size, rest)
end
```
## Decoding Frames
The `decode_frame` function pattern matches on the ID of the frame and the binary and does different things, as defined by the spec, depending on the frame. There are a few specific frames I'm interested in, so those are the ones that I'll go into here, but the others could be easily parsed with the same techniques.
First off, the TXXX frame. This frame contains custom, user-defined key/value pairs of strings. It starts off with a number that defines the text encoding scheme.
```elixir
def decode_frame("TXXX", frame_size, <<text_encoding::size(8), rest::binary>>) do
end
```
The text encoding is a single byte containing 0, 1, 2, or 3. First off, the simple encodings: 0 represents an ISO-8859-1 string (byte-compatible with UTF-8, for our purposes) terminated by a null byte and 3 represents a UTF-8 string, also terminated by a null byte. These are very easy to handle, because strings in Erlang are just binaries, so we can return the data directly.
```elixir
def convert_string(encoding, str) when encoding in [0, 3] do
str
end
```
Next, encoding 1 is text encoded as UTF-16 starting with the [byte order mark](https://en.wikipedia.org/wiki/Byte_order_mark) which lets programs detect the endianness of the text. Strings in this encoding are defined by ID3 to end with two null characters. The `bom_to_encoding` function in OTP checks if the given binary starts with the byte order mark, and, if so, returns the detected text encoding and endianness as well as the length of the BOM. These lengths lets us drop the BOM from the beginning of the data. We can then use another function in the `:unicode` module to convert the binary data to a regular UTF-8 string.
```elixir
def convert_string(1, data) do
{encoding, bom_length} = :unicode.bom_to_encoding(data)
{_, string_data} = String.split_at(data, bom_length)
:unicode.characters_to_binary(string_data, encoding)
end
```
Encoding 2 is also UTF-16, but always big-endian and without the byte order mark. It's also terminated by two null bytes.
```elixir
def convert_string(2, data) do
:unicode.characters_to_binary(data, {:utf16, :big})
end
```
The `convert_string` will take a piece of string data and convert it to something we can actually use as a string, but we still need to figure out where the string ends. When decoding a frame, we need to find all the data up to one or two null characters, depending on the encoding.
Unfortunately, a number of tracks in my library have text frames which specify a UTF-16 encoding but are actually malformed and don't end with two null characters (they just run up to the end of the frame). So, the main decoding function is also going to take the maximum length of the frame so that we don't accidentally run over the end.
```elixir
def decode_string(encoding, max_byte_size, data) when encoding in [1, 2] do
{str_data, rest} = get_double_null_terminated(data, max_byte_size)
end
```
We'll write a function that scans through a binary looking for a sequential pair of null characters, or until it reaches the maximum length specified. In either of those cases, it will return the binary up until the null characters as well as the remainder of the binary. If neither of those conditions are met, it will skip two bytes from the beginning of the binary, prepending them to the accumulator, and recursively calling itself. We can safely skip two bytes when the double null sequence isn't found because every UTF-16 character is represented by two bytes and therefore the double null terminator won't cross the boundary between two characters.
```elixir
def get_double_null_terminated(data, max_byte_size, acc \\ [])
def get_double_null_terminated(rest, 0, acc) do
{acc |> Enum.reverse() |> :binary.list_to_bin(), rest}
end
def get_double_null_terminated(<<0, 0, rest::binary>>, _, acc) do
{acc |> Enum.reverse() |> :binary.list_to_bin(), rest}
end
def get_double_null_terminated(<<a::size(8), b::size(8), rest::binary>>, max_byte_size, acc) do
next_max_byte_size = max_byte_size - 2
get_double_null_terminated(rest, next_max_byte_size, [b, a | acc])
end
```
The decode function can then convert the string data to a regular, UTF-8 string and return it, along with how many bytes were consumed (so that the frame decoding can know how much of its length is remaining), and the remaining binary data.
```elixir
def decode_string(encoding, max_byte_size, data) when encoding in [1, 2] do
{str_data, rest} = get_double_null_terminated(data, max_byte_size)
{convert_string(encoding, str), byte_size(str_data) + 2, rest}
end
```
When decoding a UTF-8 string, we split the data at the first occurrence of the null byte and ensure that the size of whatever came before it is no greater than the max. If the size of the data up to the first null byte is greater than the max size, we just split the data at the maximum byte size, considering that to be the string. And once again, the function returns the string itself, the number of bytes consumed, and the remaining data.
```elixir
def decode_string(encoding, max_byte_size, data) when encoding in [0, 3] do
case :binary.split(data, <<0>>) do
[str, rest] when byte_size(str) + 1 <= max_byte_size ->
{str, byte_size(str) + 1, rest}
_ ->
{str, rest} = :erlang.split_binary(data, max_byte_size)
{str, max_byte_size, rest}
end
end
```
Now, back to the TXXX frame. The description is decoded by calling our decode function with the text encoding from the frame, a maximum length of 1 less than the frame size (to account for the text encoding byte), and the data. It gives us back the value for the description string, how many bytes were consumed and the rest of the data. Then, from the `decode_frame` function we return a tuple of two elements: a tuple that represents the frame and the remaining binary data. The tuple that represents the frame is also two elements, the first of which is the frame ID and the second of which is the frame's value (in this case, yet another tuple of the description and value strings) so that we can produce a nice map of all the frame data at the end.
```elixir
def decode_frame("TXXX", frame_size, <<text_encoding::size(8), rest::binary>>) do
{description, desc_size, rest} = decode_string(text_encoding, frame_size - 1, rest)
{value, _, rest} = decode_string(text_encoding, frame_size - 1 - desc_size, rest)
{{"TXXX", {description, value}}, rest}
end
```
Next is the COMM frame, for user specified comments. It starts with a byte for the text encoding, then three bytes specifying a language code, then a string for a short description of the comment, then another for the full value of the comment.
```elixir
def decode_frame("COMM", frame_size, <<text_encoding::size(8), language::binary-size(3), rest::binary>>) do
{short_desc, desc_size, rest} = decode_string(text_encoding, frame_size - 4, rest)
{value, _, rest} = decode_string(text_encoding, frame_size - 4 - desc_size, rest)
{{"COMM", {language, short_desc, value}}, rest}
end
```
The other somewhat complex frame to decode is the APIC frame. It contains an image for the album artwork of the track. It starts with a byte for the text encoding of the description. Then there's a null-terminated ASCII string which contains the MIME type of the image. It's followed by a byte for the picture type (one of 15 spec-defined values which indicates what the picture represents). Then a string for the image description, and lastly the image data itself.
The only thing that's different about how the APIC frame is decoded compared to the other frames we've seen is that there's nothing in-band to signal the end of the picture data. It starts after the description ends and just keeps going until the end of the frame. So, we need to calculate the image's size (i.e., the frame size minus the size of all the data that comes before the image) and then just take that many bytes from the stream.
```elixir
def decode_frame("APIC", frame_size, <<text_encoding::size(8), rest::binary>>) do
{mime_type, mime_len, rest} = decode_string(0, frame_size - 1, rest}
<<picture_type::size(8), rest::binary>> = rest
{description, desc_len, rest} = decode_string(text_encoding, frame_size - 1 - mime_len - 1, rest)
image_data_size = frame_size - 1 - mime_len - 1 - desc_len
{image_data, rest} = :erlang.split_binary(rest, image_data_size)
{{"APIC", {mime_type, picture_type, description, image_data}}, rest}
end
```
Now, for the default case of the `decode_frame` function. There are a couple things this first needs to handle. First is text information frames. These are a class of frames whose IDs start with the letter T and are followed by three alpha-numeric (uppercase) characters. Each of these frames follows the same format and contains just a text encoding indicator and one or more strings.
<aside>
Originally, I tried to handle text information frames the same way I had with other frame types, with just a when condition for the frame ID parameter on a case of the function. It turns out function guards that check if a parameter is an element of a list work by generating a version of the function that matches each specific value. So, trying to check if a param was a valid text information frame ID meant check if it was an element of a 36^3 = 46,656 element long list. 46,656 individual function cases were generated, and the Elixir compiler took almost 10 minutes to compile just that file. And it crashed inside BEAM when I actually tried to run it.
</aside>
There are also the many, many frames which we have not specifically handled. Even if we don't do anything with them, the decoder still needs to be aware of their existence, because if it encounters a frame that it can't do anything with, it needs to skip over it. Otherwise, the decoder would halt upon encountering the first frame of an unhandled type, potentially missing subsequent frames that we do care about. To handle this, I took the list of all declared frames from the [ID3 native frames](https://id3.org/id3v2.4.0-frames) spec and copied it into a constant list that I can then check potential IDs against.
```elixir
def decode_frame(id, frame_size, rest) do
cond do
Regex.match?(~r/^T[0-9A-Z]+$/, id) ->
decode_text_frame(id, frame_size, rest)
id in @declared_frame_ids ->
<<_frame_data::binary-size(frame_size), rest::binary>> = rest
{nil, rest, :cont}
true ->
{nil, rest, :halt}
end
end
```
If it encounters a text information frame, it delegates to another function which handles pulling out the actual value. Text information frames also have a slight difference: their values can be a list of multiple null-terminated strings. So, this function calls `decode_string_sequence` which decodes as many null-terminated strings as it can up until it reaches the end of the frame.
```elixir
def decode_text_frame(id, frame_size, <<text_encoding::size(8), rest::binary>>) do
{strs, rest} = decode_string_sequence(text_encoding, frame_size - 1, rest)
{{id, strs}, rest}
end
def decode_string_sequence(encoding, max_byte_size, data, acc \\ [])
def decode_string_sequence(_, max_byte_size, data, acc) when max_byte_size <= 0 do
{Enum.reverse(acc), data}
end
def decode_string_sequence(encoding, max_byte_size, data, acc) do
{str, str_size, rest} = decode_string(encoding, max_byte_size, data)
decode_string_sequence(encoding, max_byte_size - str_size, rest, [str | acc])
end
```
If the frame ID is valid, but it wasn't already handled, it simply skips over the data for that frame. It returns `nil` for the frame itself, and adds a third element to the returned tuple. This is an atom, either `:cont` or `:halt` which signals to the main `parse_frames` loop whether it should keep going or stop when no frame is found.
```elixir
def parse_frames(...) do
# ...
result = decode_frame(frame_id, frame_size, rest)
case result do
{nil, rest, :halt} ->
{Map.new(frames), rest}
{nil, rest, :cont} ->
parse_frames(major_version, rest, next_tag_length_remaining, frames)
{new_frame, rest} ->
parse_frames(major_version, rest, next_tag_length_remaining, [new_frame | frames])
end
end
```
After attempting to decode a frame, the `parse_frames` function matches on the result of the decode attempt. If the frame ID was not valid and it's instructed to halt, it creates a map from the list of frames and returns it along with the remainder of the data. If there was a valid frame, but nothing was decoded from it, it recurses, calling `parse_frames` again with whatever data came after the skipped frame. If there was a frame, it adds it to the list and and again recurses.
And with that, we can finally have enough to parse the ID3 data from a real live MP3 file and get out, maybe not all the information, but the parts I care about:
```elixir
iex> data = File.read!("test.mp3")
iex> ID3.parse_tag(data)
{
%{
"APIC" => {"image/jpeg", 3, "cover", <<...>>},
"COMM" => {"eng", "", "Visit http://justiceelectro.bandcamp.com"},
"TALB" => "Woman Worldwide",
"TIT2" => "Safe and Sound (WWW)",
"TPE1" => "Justice",
"TPE2" => "Justice",
"TRCK" => "1",
"TYER" => "2018"
},
<<...>>
}
```
One of the pieces of information I was hoping I could get from the ID3 tags was the durations of the MP3s in my library. But alas, none of the tracks I have use the TLEN frame, so it looks like I'll have to try and pull that data out of the MP3 myself. But that's a post for another time...

View File

@ -0,0 +1,211 @@
```
title = "Calculating the Duration of MP3 Files"
tags = ["elixir"]
date = "2021-01-03 21:02:42 -0400"
short_desc = "Parsing the MP3 binary format and calculating file duration."
slug = "mp3-duration"
```
Armed with the ID3 decoder from my [last post](/2020/parsing-id3-tags/), we can extract most of the metadata from MP3 files. However, the one piece I still want for my music cataloging software is the track duration, which, for the vast majority of my files, is not included in the ID3 tag. Getting the duration of an audio file isn't as straightforward as I had hoped. One of the easiest solutions would be to just shell out to another piece of software, such as ffmpeg, which can handle a great many audio formats. But that would be boring, and I wanted to minimize the number of dependencies, which meant writing a rudimentary MP3 decoder myself. Luckily, I don't need to actually playback audio myself, so I can avoid a great deal of complexity. I only need to parse enough of the MP3 file to figure out how long it is.
<!-- excerpt-end -->
First, a overview of the anatomy of an MP3 file. As we went through in the last post, an MP3 file may optionally start with an ID3 tag containing metadata about the track. After that comes the meat of the file: a sequence of MP3 frames. Each frame contains a specific amount of encoded audio data (the actual amount is governed by a number of properties encoded in the frame header). The total duration of the file is simply the sum of the durations of the individual frames. Each MP3 frame starts with a marker sequence of 11 1-bits followed by three bytes of flags describing the contents of the frame and then the audio data itself.
Based on this information, many posts on various internet forums suggest inspecting the first frame to find its bitrate, and then dividing the bit size of the entire file by the bitrate to get the total duration. There are a few problems with this though. The biggest is that whole MP3 files can generally be divided into two groups: constant bitrate (CBR) files and variable bitrate (VBR) ones. Bitrate refers to the number of bits used to represent audio data for a certain time interval. As the name suggests, files encoded with a constant bitrate use the same number of bits per second to represent audio data throughout the entire file. For the naive length estimation method, this is okay (though not perfect, because it doesn't account for the frame headers and any potential unused space in between frames). In variable bitrate MP3s though, each frame can have a different bitrate, which allows the encoder to work more space-efficiently (because portions of the audio that are less complex can be encoded at a lower bitrate). Because of this, the naive estimation doesn't work at all (unless, by coincidence, the bitrate of the first frame happens to be close to the average bitrate for the entire file). In order to accurately get the duration for a VBR file, we need to go through every single frame in the file and sum their individual durations. So that's what we're gonna do.
The overall structure of the MP3 decoder is going to be fairly similar to the ID3 one. We can even take advantage of the existing ID3 decoder to skip over the ID3 tag at the beginning of the file, thereby avoiding any false syncs (the `parse_tag` function needs to be amended to return the remaining binary data after the tag in order to do this). From there, it's simply a matter of scanning through the file looking for the magic sequence of 11 1-bits that mark the frame synchronization point and repeating that until we reach the end of the file.
```elixir
def get_mp3_duration(data) when is_binary(data) do
{_, rest} = ID3.parse_tag(data)
parse_frame(rest, 0, 0, 0)
end
```
The `parse_frame` function takes several arguments in addition to the data. These are the accumulated duration, the number of frames parsed so far, and the byte offset in the file. These last two aren't strictly needed for parsing the file, but come in very useful if you have to debug any issues. The function has several different cases. The first looks for the sync bits at the start of the binary and, if it finds it, parses the frame header to caclulate the duration, adds it to the accumulator, and recurses. The next case skips a byte from the beginning of the binary and then recurses. And the final case handles an empty binary and simply returns the accumulated duration.
```elixir
def parse_frame(
<<
0xFF::size(8),
0b111::size(3),
version_bits::size(2),
layer_bits::size(2),
_protected::size(1),
bitrate_index::size(4),
sampling_rate_index::size(2),
padding::size(1),
_private::size(1),
_channel_mode_index::size(2),
_mode_extension::size(2),
_copyright::size(1),
_original::size(1),
_emphasis::size(2),
_rest::binary
>>,
acc,
frame_count,
offset
) do
end
def parse_frame(<<_::size(8), rest::binary>>, acc, frame_count, offset) do
parse_frame(rest, acc, frame_count, offset + 1)
end
def parse_frame(<<>>, acc, _frame_count, _offset) do
acc
end
```
The main implementation of the `parse_frames` function isn't too complicated. It's just getting a bunch of numbers out of lookup-tables and doing a bit of math.
The first thing we need to know is what kind of frame we are looking at. MP3 frames are divided two ways, by the version of the frame and the layer of the frame. In the header, there are two fields, each two bits wide, that indicate the version and layer. But not every combination of those bits are valid. There are only three versions (and version 2.5 is technically an unnoficial addition at that) and three layers. We can use a couple functions to look up atoms representing the different versions/layers, since it's more convenient than having to use the raw numeric values in other places. We also return the `:invalid` atom for version `0b01` and layer `0b00` respectively, so that if we enocunter one when parsing a frame, we can immediately stop and look for the next sync point.
```elixir
defp lookup_version(0b00), do: :version25
defp lookup_version(0b01), do: :invalid
defp lookup_version(0b10), do: :version2
defp lookup_version(0b11), do: :version1
defp lookup_layer(0b00), do: :invalid
defp lookup_layer(0b01), do: :layer3
defp lookup_layer(0b10), do: :layer2
defp lookup_layer(0b11), do: :layer1
```
The next piece of information we need is the sampling rate which is the frequency (in Hertz) with respect to time at which individual audio samples are taken. In the header, it's also represented by two bits. As before, we pattern match in the function definition to find the actual sampling rate from the index, and return `:invalid` if the index is not permitted.
```elixir
defp lookup_sampling_rate(_version, 0b11), do: :invalid
defp lookup_sampling_rate(:version1, 0b00), do: 44100
defp lookup_sampling_rate(:version1, 0b01), do: 48000
defp lookup_sampling_rate(:version1, 0b10), do: 32000
defp lookup_sampling_rate(:version2, 0b00), do: 22050
defp lookup_sampling_rate(:version2, 0b01), do: 24000
defp lookup_sampling_rate(:version2, 0b10), do: 16000
defp lookup_sampling_rate(:version25, 0b00), do: 11025
defp lookup_sampling_rate(:version25, 0b01), do: 12000
defp lookup_sampling_rate(:version25, 0b10), do: 8000
```
The last piece of information we need from the header is the bitrate, or the number of bits that are used to represent a single second (or, in our case, the number of kilobits, for simplicity's sake). The header has a four bit field that represent which index in the lookup table we should use to find the bitrate. But that's not all the information that's necessary. In order to know which lookup table to use, we also need the version and layer of the frame. For each version and layer combination there is a different set of bitrates that the frame may use.
So, the `lookup_bitrate` function will need to take three parameters: the version, layer, and bitrate index. First off, indices 0 and 15 are reserved by the spec, so we can just return the `:invalid` atom regardless of the version or layer. For the other version/layer combinations, we simply look up the index in the appropriate list. A couple things to note are that in version 2, layers 2 and 3 use the same bitrates, and all layers for version 2.5 use the same bitrates as version 2.
```elixir
@v1_l1_bitrates [:invalid, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, :invalid]
@v1_l2_bitrates [:invalid, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, :invalid]
@v1_l3_bitrates [:invalid, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, :invalid]
@v2_l1_bitrates [:invalid, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, :invalid]
@v2_l2_l3_bitrates [:invalid, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, :invalid]
defp lookup_bitrate(_version, _layer, 0), do: :invalid
defp lookup_bitrate(_version, _layer, 0xF), do: :invalid
defp lookup_bitrate(:version1, :layer1, index), do: Enum.at(@v1_l1_bitrates, index)
defp lookup_bitrate(:version1, :layer2, index), do: Enum.at(@v1_l2_bitrates, index)
defp lookup_bitrate(:version1, :layer3, index), do: Enum.at(@v1_l3_bitrates, index)
defp lookup_bitrate(v, :layer1, index) when v in [:version2, :version25], do: Enum.at(@v2_l1_bitrates, index)
defp lookup_bitrate(v, l, index) when v in [:version2, :version25] and l in [:layer2, :layer3], do: Enum.at(@v2_l2_l3_bitrates, index)
```
One could do some fancy metaprogramming to generate a function case for each version/layer/index combination to avoid the `Enum.at` call at runtime and avoid some of the code repetition, but this is perfectly fine.
With those four functions implemented, we can return to the body of the main `parse_frame` implementation.
```elixir
def parse_frame(...) do
with version when version != :invalid <- lookup_version(version_bits),
layer when layer != :invalid <- lookup_layer(layer_bits),
sampling_rate when sampling_rate != :invalid <- lookup_sampling_rate(version, sampling_rate_index),
bitrate when bitrate != :invalid <- lookup_bitrate(version, layer, bitrate_index) do
else
_ ->
<<_::size(8), rest::binary>> = data
parse_frame(rest, acc, frame_count, offset + 1)
end
end
```
We call the individual lookup functions for each of the pieces of data we need from the header. Using Elixir's `with` statement lets us pattern match on a bunch of values together. If any of the functions return `:invalid`, the pattern match will fail and it will fall through to the `else` part of the with statement that matches anything. If that happens, we skip the first byte from the binary and recurse, looking for the next potential sync point.
Inside the main body of the with statement, we need to find the number of samples in the frame.
```elixir
def parse_frame(...) do
with ... do
samples = lookup_samples_per_frame(version, layer)
else
...
end
end
```
The number of samples per frame is once again given by a lookup table, this time based only on the version and layer of the frame. As before, the version 2.5 constants are the same as the ones for version 2.
```elixir
defp lookup_samples_per_frame(:version1, :layer1), do: 384
defp lookup_samples_per_frame(:version1, :layer2), do: 1152
defp lookup_samples_per_frame(:version1, :layer3), do: 1152
defp lookup_samples_per_frame(v, :layer1) when v in [:version2, :version25], do: 384
defp lookup_samples_per_frame(v, :layer2) when v in [:version2, :version25], do: 1152
defp lookup_samples_per_frame(v, :layer3) when v in [:version2, :version25], do: 576
```
Now, we have enough information to start calculating the actual byte size of the frame. This involves a bit of math that could be done all at once, but let's break it down for clarity.
First, we need to know the duration of a single sample. We have the sampling rate, which is the number of samples per second, so dividing 1 by that value gives us the duration of an individual sample.
Then, since we know the number of samples in the entire frame, we can multiply that by the duration of an individual sample to get the total duration for the entire frame (this is the same value we'll later add to the duration accumulator).
Next, we have the bitrate from the lookup function, but it's in kilobits per second. When determining the frame size, we want the unit to be bytes, so we first multiply by 1000 to get bits per second, and then divide by 8 to get bytes per second.
The bytes per second value can then be multiplied by the frame duration to get the number of bytes. Finally, the frame may have padding to ensure that the bitrate exactly matches its size and duration. The size of the padding depends on the layer: for layer 1 the padding is 4 bytes and for layers 2 and 3, 1 byte.
```elixir
defp get_frame_size(samples, layer, kbps, sampling_rate, padding) do
sample_duration = 1 / sampling_rate
frame_duration = samples * sample_duration
bytes_per_second = kbps * 1000 / 8
size = floor(frame_duration * bytes_per_second)
if padding == 1 do
size + lookup_slot_size(layer)
else
size
end
end
defp lookup_slot_size(:layer1), do: 4
defp lookup_slot_size(l) when l in [:layer2, :layer3], do: 1
```
One thing to note is that we `floor` the size before returning it. All of the division changes the value into a floating point, albeit one for which we know the decimal component will be zero. `floor`ing it turns it into an actual integer (`1` rather than `1.0`) because using a floating point value in a binary pattern will cause an error.
With that implemented, we can call it in the `parse_frame` implementation to get the number of bytes that we need to skip. We also perform the same calculation to get the frame duration. Then we can skip the requisite number of bytes of data, add the values we calculated to the various accumulators and recurse.
```elixir
def parse_frame(...) do
with ... do
# ...
frame_size = get_frame_size(samples, layer, bitrate, sampling_rate, padding)
frame_duration = samples / sampling_rate
<<_skipped::binary-size(frame_size), rest::binary>> = data
parse_frame(rest, acc + frame_duration, frame_count + 1, offset + frame_size)
else
...
end
end
```
And with that, we can accurately find the duration of any MP3 file!
```elixir
iex> data = File.read!("test.mp3")
iex> MP3.get_mp3_duration(data)
452.20571428575676 # 7 minutes and 32 seconds
```

View File

@ -0,0 +1,61 @@
```
title = "M1 Mac mini Review"
tags = ["computers"]
date = "2021-01-13 21:43:42 -0400"
short_desc = "The M1 Mac mini is my favorite computer. Let me tell you why."
slug = "m1"
```
I've had an M1 Mac mini for about a month now, so I thought I'd write up my experiences with it. The configuration I got has 16GB of RAM and 256GB of storage (the base amount). The reasoning for the bare minimum storage is that Apple charges an arm and a leg for extra space, and I intend this to primarily be a stopgap machine until there are higher-power Apple Silicon equipped laptops. If I really desperately need extra space down the line, I can buy a cheap external SSD (that will continue to be useful after this computer is gone). The 16GB of RAM, however, is necessary to do any iOS development (Xcode and all the various associated services can by themselves easily consume almost 8 gigs). So far, I've moved just about all of my non-work desktop computing over to it, and it's been absolutely fantastic.
<!-- excerpt-end -->
The first thing I did after setting up my account was to install Firefox. When the M1 Macs first became available there weren't yet native versions of big applications like Firefox and Chrome available, but fortunately by the time I got mine, a beta version of Firefox that was recompiled for ARM was available.
Just playing around, visiting a few websites, I was happy to see everything appeared to be working. You might think this would be a given, but when I tried Firefox on the macOS 11 at the beginning of the beta cycle (on my Intel machine, even), video playback was almost completely non-functional.
With Firefox reinstalled, I started signing back into a bunch of my accounts. With long, random passwords, this is rather tedious, so before proceeding I wanted to install my password manager. For reasons that don't bear getting into, I use an old version of 1Password 6, not the most recent. Of course, this means that it hasn't been recompiled to be a Universal app; it only runs on Intel. Fortunately, Rosetta comes to the rescue. After copying the .app from another computer, I double clicked it to launch it, just like any other app, after which the OS presented a dialog asking if I wanted to install Rosetta.
Presumably it's shipped separately and not bundled with the base OS because of the bandwidth requirements of shipping an x86_64 version of every system library that not everyone may need. Additional copies of system libraries are needed because within a single process only one architecture can be used: either everything must be Intel or everything must be Apple Silicon.
Using Rosetta for 1Password worked perfectly. Aside from a slight delay when launching the app for the first time (due to Rosetta pre-translating the Intel binary into ARM). Once it launched, everything worked exactly as I expect. Even IPC between 1Password under Rosetta and the natively-running Firefox extension works flawlessly.
Other Intel-only apps I've tried have also worked similarly well. IINA, a video player built on top of mpv, works perfectly. Inkscape, in my brief testing, also works as well as it does on Intel, which is to say, it functions but is not particularly pleasant (fortunately Inkscape some time ago released a native macOS version that no longer depends on XQuartz, removing a potential barrier). I'm also currently running MacVim under Rosetta (in which I'm writing this post) and it's working without issue. coc.nvim is a Vim plugin which hosts Node.js extensions that provide autocompletion and LSP support. The part of the plugin that runs inside Vim runs, of course, through Rosetta. Node, which I installed through Homebrew, supports ARM natively, and, as with 1Password, IPC accross the architecture boundary works perfectly. The only other significant app I tested under Rosetta is the game Sayonara Wild Hearts. The version in the App Store is Intel-only, and it's built with Unity 2018.4.7f1, which was released in August of 2019. It works flawlessly running at 1440p @ 144Hz. I can't say the exact framerate, because the game doesn't have a counter built in and I'm too lazy to find a standalone macOS one, but it's definitely well above 60FPS and runs very smoothly, without any noticable input latency or hitching.
The only performance issue I encountered with Rosetta was using an older version of Node.js that was not compiled to run natively. Performance when running under Rosetta was significantly worse than both running natively on the M1 and natively on my Intel laptop. I tried to run `npm install` but called it quits after almost 5 minutes and switched to a newer version of Node that was compiled for ARM. Presumably this significant performance difference is due to Node's usage of the V8 VM, which does just-in-time compilation. This requires Rosetta to constantly re-translate x86_64 code into ARM, rather than being able to translate everything ahead-of-time and then execute natively, without interruption.
Of the Mac apps I've tried, the sole app that hasn't worked is MonitorControl, an app that uses the Display Data Channel to control the backlight brightness on monitors that macOS itself does not support. Although the app is running under Rosetta, that isn't the problem. It seems that the system APIs used to access the DDC either aren't implemented on ARM or aren't supported by the driver for Apple's own GPU. Hopefully this is merely an oversight that can be fixed in a future macOS update.
One of the first things I did upon receiving the machine was following [these](https://soffes.blog/homebrew-on-apple-silicon) popular instructions to install Homebrew. I installed two copies of it: one running natively on ARM, installed in `/opt/homebrew/`, and the other forced to run in x86_64 under Rosetta in the classic location. There's also a shell alias to let me easily access the Rosetta version by running `ibrew` which wraps the Homebrew command in `arch -x86_64`.
I expected the native version of Homebrew to be fairly unstable, and that I would have to fallback on the Intel version frequently. In a pleasant surprise, that has not been the case. Almost everything I've tried to install with Homebrew has worked perfectly with the ARM version, even those packages which themselves are x86 only and run under Rosetta. In fact, the only thing that hasn't worked with native Homebrew has been iPerf.
```sh
$ brew list --formula | wc -l
87
$ ibrew list --formula | wc -l
1
```
Up to this point, everything I'd done had been not so different from setting up any previous Mac. The first thing I did that really gave me a sense of how much faster the M1 is was installing Xcode. The non-App Store version of Xcode is distributed in an ~11GB .xip file, which is a compressed, cryptographically-signed format that is notoriously slow to extract. I didn't time it, but when extracting Xcode on my Intel laptop, I'd start the extraction process and then go back to doing something else for 10 to 15 minutes. On the M1, it took between 5 and 10 minutes. Still not terribly quick, but a substantial improvement. With Xcode installed, I was eager to start playing around with my iOS projects.
Tusker, my Mastodon app for iOS, is easily the biggest iOS project I have, at about 21 thousand lines of Swift code and a couple dozen .xib files (which surprisingly are the source of a significant fraction of the compile time). On my Intel laptop, which is an 8-core 16" MacBook Pro (with 32GB of RAM, double that of the mini), a clean build in debug mode takes 47.9 seconds. On the Mac mini, the same takes 29.1 seconds, an incredible 39% improvement. When performing a full archive of the app, which builds it in release mode with all optimizations enabled, my Intel machine takes 83.9 seconds and the M1 takes 59.9 seconds, making for a 28% improvement.
It's difficult to overstate how impressive this is. With Intel having long since fallen off the curve of Moore's law, a 30 to 40 percent in a single (customer-facing) hardware generation is incredible.
While developing, I don't actually spend that much time doing full rebuilds, though. So, how about a slightly more practical example of why this is such a big deal? One thing I've previously talked about (as have many others) as making perhaps the single biggest difference in the enjoyablity of programming is the speed of the feedback loop. That is to say, how fast is it from the time when I make a change to my code and hit the Build & Run button to the time at which I can actually see the effect of that change. The smaller the window of time in between those two, the less time there is for me to get distracted, the faster I can figure out whether the change has done what I desired, and the faster I can move on to making the next change. To test this, again using Tusker, I built and launched the app, made a 1-line trivial change to the project (adding `view.backgroundColor = .red` to a certain view controller), and then rebuilt and launched the app once more. This was more difficult to time accurately, but the M1 was about twice as fast as the Intel machine (~5 seconds down from ~10). Because of this, as I've continued to use this computer, iOS development is the task for which it's made the largest difference in my day-to-day activities.
I ran a few more programming-related tests aside from iOS development, because that's not all I do. When I first got the machine, I was still in the midst of Advent of Code. The puzzle I had most recently completed was day 11, which was implenting a variation of Conway's Game of Life. I wrote my solution in Elixir on my laptop, and it worked perfeclty well but was bottlenecked by the fact that looking up the n-th element of a list in Elixir is an O(n) operation. I thought these would make for a decent improvised Elixir benchmark. On my Intel MBP, my solution for part 1 of the puzzle ran in 5.9 seconds and part 2 in 8.0 seconds. With BEAM (the VM used by Erlang/Elixir) running natively on the M1, those times came down to 2.6 seconds and 3.5 seconds respectively, or over _twice_ as fast in both cases.
Out of curiosity (I didn't expect to actually be able to use this machine for work, mainly because of the memory limitation), I also tried compiling the front-end code for the project I work on at my job. It's composed of approximately 190 thousand lines in total of TypeScript, Vue, and Sass. Here the performance improvement was less impressive, though not insignificant. Compiling everything from scratch takes 85 seconds on my laptop and 69 seconds on the Mac mini. Additionally, when compiling from scratch, the fans in my laptop spin up and become quite loud (not to mention the chasis gets unpleasantly hot) about 60 seconds in, whereas the Mac mini remains silent for the entire duration. While running Webpack in watch mode and making a trivial change (inserting a `console.log` call in a Vue component) the time it takes for Webpack to finish recompiling is 19 seconds on my laptop and just 11 on the M1. I'm not certain, but I suspect at least part of the reason the performance improvement with Webpack is so much less than with other tasks, particularly during a clean build, is because Node.js is entirely single-threaded.
As for the hardware itself? It's fantastic.
Like I went over, performance is great; it easily beats my 16" Intel MacBook Pro in every task I used it for. The other remarkable thing is the noise. It has a single fan, but if you told me it was completely fanless, I would believe you. Not once have I heard the fan turn on. The environment I'm using it in isn't incredibly quiet, but it's really not that loud. The only way I can tell the fan in this computer is spinning is if I put my hand behind the rear exhaust vent or if I press my ear up against its surface.
Unlike on the laptop variants of the M1 machine, where the port selection is a measly two USB-C ports, the mini also has an Ethernet port, two type-A USB ports, a HDMI output, and a separate AC input. On a laptop, the I/O would be very limiting; trying to connect to an external desk setup would require some adapters or a big, fancy (not to mention expensive) Thunderbolt dock. But the mini has enough ports that I can directly connect everything from my setup that I'd normally connect to my laptop (granted, I do use my monitor's built-in USB hub for both machines).
I've had only a few issues I've experienced that _may_ be attributable to hardware. The first is that when I'm playing back audio to my Bluetooth headphones, they periodically cut out for a fraction of a second before resuming audio playback. I regularly use these same headphones with my Intel Mac on which I haven't experienced this issue while running either Catalina or Big Sur. The other issue is that when the Mac mini is connected to my 1440p/144Hz display, it loses the 144Hz setting almost every time I wake it from sleep. Odly, just changing the refresh rate while the resolution is set to "Default for display" does nothing. I have to first change the resolution to be scaled down, and then back to the default before changing the refresh rate actually takes effect. The final issue, which has only happened once in the past month, is that when I woke the computer up from sleep, it had stopped outputting video over the HDMI port. It was still sending video over a USB-C to DisplayPort adapter, but not HDMI. Unplugging and reconnecting the HDMI cable didn't fix the issue, nor did power cycling the monitor. I had to fully restart the Mac mini to resolve the issue.
But, all in all, for a product that's the first generation of both a new (to macOS) CPU and GPU architecture, this has been an phenomenally good experience. I am _incredibly_ eager to see both what a higher-performance variant of the M1 looks like and future generations of this architecture.

View File

@ -0,0 +1,24 @@
```
title = "Twitter and Game Design Techniques"
tags = ["social media"]
date = "2021-02-25 22:46:42 -0400"
short_desc = "The technique Twitter borrows from game design to keep you engaged."
slug = "twitter-game-design"
```
A few weeks ago, I read [a thread](https://twitter.com/kchironis/status/1355585411943260162) on Twitter by a game designer at Riot Games. The thread is about the concept of "celebration" in the world of game design. It refers to the techniques used to reinforce "good" behavior in video games (good not in any objective sense, but simply whatever is desired by the game designer). It was an interesting thread, but I moved on shortly after reading it and didn't give it much more thought. Until last week when I was reading [an article](https://craigwritescode.medium.com/user-engagement-is-code-for-addiction-a2f50d36d7ac) about how social media is designed to be addictive. With the thread still floating around in the back of my mind, what I realized while thinking about the design of social media platforms was that Twitter uses the exact same techniques as video games.
<!-- excerpt-end -->
I'll use Twitter as an example, because I'm very familiar with it. At a high level, what does Twitter want you to do? It wants you to keep using Twitter. Why? So it can show you more ads. But, the platform needs some way of figuring out what ads to show you, because being able to target types of ads at people who are more likely to be interested lets Twitter charge higher prices to advertisers. Twitter doesn't just want you to be spending time on the platform, it wants you to interact and be _engaged_.
The most common interaction people have when just browsing Twitter is clicking the Like button. Clicking the Like button also serves as a useful signal to Twitter's ad targeting algorithms that you probably have some interest in the topics of whichever tweet you liked.
So, Twitter has an action it wants you to perform. But it doesn't want to tell you to do it, just for you to get into the habit of taking the action in the regular course of using the platform. The same problem game designers are faced with. And Twitter uses the same techniques.
As part of the design of a video game, you need to get the player to do certain things that will lead them to progress through the game. But, you don't want to just throw a wall of text in their face to explain everything in great detail, you want to be more subtle about it. So, you design the game so that eventually the player will try the thing you want them to do. Then, when the player does the Good Thing, you signal your affirmation and say, "Yes, good job!" But you have to be subtle about it. You want the game to really _feel_ fun for the player, not to give the impression that it's coddling them. Instead, you use little celebratory cues that the player will perceive without even thinking about. These could be little animations, particle effects, screen shakes, or even auditory cues. There are lots of possibilities, but the key component is that celebrations don't have to be thought about by the user.
When you click the Like button on a tweet, a few things happen: the heart button itself turns solid red, a small particle effect plays and the number of likes rolls up. The same techniques game designers use. The animation is eye-catching without being distracting and the like count increasing lets you subconsciously connect the action you just took to the effect it had.
I can't know if Twitter does it with the deliberate intent of making users form habits, but I can't help but feel like that is a consequence, even if a small one.

View File

@ -0,0 +1,86 @@
```
title = "The Intricate Image Caching Architecture of Tusker"
tags = ["swift"]
date = "2021-04-08 18:25:42 -0400"
short_desc = "The three hardest problems in computer science are naming, caching, and off-by-one errors."
slug = "image-caching"
```
A fairly important part of Tusker (my iOS Mastodon app) is displaying images. And a bunch of varieties of images: user avatars, post attachments, custom emojis, user profile headers, as well as a few other types of rarely-shown images that get lumped in with attachments. And displaying lots of images in a performant way means caching. Lots of caching.
In the beginning, there was nothing. Then I started displaying images and almost immediately realized there would need to be some amount of caching. Otherwise, just scrolling down a little bit and then back up would result in images being re-loaded that were present mere seconds ago.
The very first implementation was super simple. It was basically just a dictionary of image URLs to the `Data` for the image at that URL. This fulfilled the primary goals of being 1) super easy to build and 2) mostly working for the simplest of use cases. But, the blog post doesn't end here, so clearly there are some issues remaining.
<!-- excerpt-end -->
<aside class="inline">
Before I get to the pitfalls of the first version, let me explain a couple properties of Mastodon that make some of my caching strategies viable at all.
When Mastodon (and Pleroma) give you API responses, any referenced images are provided as URLs. These are URLs that point directly to the files, not to some intermediary endpoint[^1]. The URL corresponds to exactly one file, and which file it corresponds to will never change. When, for example, a user uploads a new avatar, the avatar URL returned from the various API requests changes.
[^1]: Unfortunately, this property is not true of Honk; the avatar URL for a Honk user looks like `https://example.com/a?a=<USER ID>`. Though at the time I was first building this image caching system, Honk didn't even exist. And even today, it doesn't really cause a problem. Honk users almost always have avatars that are procedurally generated by the software. Therefore my assumption is still largely true.
This property means that I can have very long expiry times on the cache. The image data at a certain URL won't change (unless it's deleted), so I can just keep it for as long as I need to. And when a user changes their avatar, a new URL will be used that isn't in the cache, so it will have to be fetched.
</aside>
Now, back to the implementation. The first strategy has an obvious issue: memory usage will grow indefinitely. Luckily, there's a builtin solution for this: [NSCache](https://developer.apple.com/documentation/foundation/nscache). NSCache is essentially a dictionary that works with the OS to automatically remove its contents when the system needs memory for something else.
This worked well enough for a little while, but there's another fairly low-hanging optimization. Because URLs aren't reused, images can be cached for a very long time. Even across app launches, if the cache were persisted to disk. Enter the [Cache](https://github.com/hyperoslo/Cache) library. It provides memory- and disk-based caches (the memory one just wraps NSCache). While needing to load things from disk is relatively rare (because once an object is loaded from the on-disk cache, it will be kept in the in-memory cache), it's still a nice improvement during app launch and for the eventuality that Tusker is asked by the system to give back some memory.
This setup served me fairly well, and (aside from bugfixes) the image caching architecture went untouched for a while. Until I started worked on improving the app's behavior in degraded network conditions.
When running with the Network Link Conditioner in a super low-bandwidth preset, I launched the app to see what would happen. After a few API requests, all the posts loaded. But none of the images yet (I had purged the on-disk cache in order to test this scenario). Then the user avatars started loading in, one by one. Even for the same user.
The next optimization, then, is obvious. Why many request when few do trick? So, whenever something needs to load an image, instead of only checking if the URL already exists in the cache, I can also check whether there are any in-flight requests for that URL. If there are, then instead of starting a new request, the completion handler just gets tacked on to the existing request. With this in place, when you launch the app under poor network conditions, every instance of a specific user's avatar will load in simultaneously with the net outcome being that the app overall is finished loading sooner.
The network request batching mechanism also has one more feature. When something calls it to either kickoff a network request or add a completion handler to one that's already running, it receives back an object (called `Request` in my code, because that's what they are from the API consumer's point-of-view) which can be used to cancel the request. This is so that, if, say, a table view cell is reused, the requests for the cell's old data can be cancelled. But because the actual network requests are batched together, calling the cancel method on the request object doesn't necessarily cancel the underlying request (what I call a `RequestGroup`). The individual completion handler for the "cancelled" request will be removed, but the actual URL request won't be cancelled if there are still other active handlers.
There's also one more feature of the batching system. In some cases (primarily [table view prefetching](https://developer.apple.com/documentation/uikit/uitableviewdatasourceprefetching)) it's useful to pre-warm the cache. This can either be by just loading something from disk, or by starting a network request for the image (either the request will finish by the time the data is needed, in which case the image will be in the in-memory cache or it will still be in-progress, in which case the completion handler that actually wants the data will be added to the request group). For this, there are also completion handler-less requests. They are also part of the RequestGroup and contribute to keeping the underlying network request alive. Cancelling a callback-less request is trivial because, without the completion handler, each one that belongs to the same URL is identical.
And this was how caching worked in Tusker for almost a year and a half. But, of course, this couldn't last forever. A few months ago, I was doing a bunch of profiling and optimizing to try to improve scroll view performance and reduce animation hitches.
The first thing I noticed was that while I was just scrolling through the timeline, there was a lot of time being spent in syscalls _in the main thread_. The syscalls were open, stat, and fstat and they were being called from `NSURL`'s `initFileURLWithPath:` initializer. This method was being called with the cache key (which in my case was the URL to the remote image) in order to check if the key string has a file extension so that the extension can be used for the locally cached file. It was being called very frequently because in order to check if an image exists in the disk cache, it needs to check if there's a file on-disk at the path derived from the cache key, which includes the potential file extension of the key.
Another thing the `initFileURLWithPath:` initializer does is, if the path does not end with a slash, determine if it represents a directory by querying the filesystem. Since that initializer was also used to construct the final path to the cached file on-disk, it was doing even more pointless work. Because the cache is the only thing writing to that directory and all it's writing are files, it should never need to ask the filesystem.
There were a couple super low-hanging optimizations here:
First was using NSString's `pathExtension` property instead of turning the cache key into an NSURL to get the same property. The NSString property merely _interprets_ the string as a file path, rather than hitting the disk, so it can be much faster.
The second thing was, as the documentation suggests, using the `initFileURLWithPath:isDirectory:` initializer instead. It allows you to specify yourself whether the path is to a directory or not, bypassing the filesystem query.
I sent both of these improvements upstream, because they were super simple and resulted in a nice performance improvement for free. But, while I was waiting for my changes to be merged, I came up with another optimization. This one was complex enough (though still not very) that I didn't feel like sending it upstream, so I finally decided to just write my own copy of the library[^2] so I could make whatever changes I wanted.
[^2]: Don't worry, it's under the MIT license.
To avoid having to do disk I/O just to check if something is cached, I added a dictionary of cache keys to file states. The file state is an enum with three cases: exists, does not exist, and unknown. When the disk cache is created, the file state dictionary is empty, so the state for every key is effectively unknown. With this, when the disk cache is asked whether there is an object for a certain key, it can first consult its internal dictionary. If file state is exists or does not exist, then no filesystem query takes place. If the state is unknown, it asks the OS whether the file exists and saves the result to the dictionary, so the request can be avoided next time. The methods for adding to/removing from the cache can then also update the dictionary to avoid potential future filesystem queries.
Combined with the improvements I'd sent to the upstream library, this eliminated almost all of the syscalls from the scrolling hot path. Sadly though, scrolling performance, while better, still wasn't what I had hoped.
The next thing I realized was that I was being incredibly ineffecient with how images were decoded from raw data.
This [WWDC session](https://developer.apple.com/videos/play/wwdc2018/219/) from 2018 explains that although UIImage looks like a fairly simple model object, there's more going on under the covers that can work to our advantage, if we let it.
The UIImage instance itself is what owns the decoded bitmap of the image. So when a UIImage is used multiple times, the PNG/JPEG/etc. only needs to be decoded once.
But, in both the memory and disk caches, I was only storing the data that came back from the network request. This meant that every time something needed to display an image, it would have to re-decode it from the original format into a bitmap the system could display directly. This showed up in profiles of the app as a bunch of time being spent in the ImageIO functions being called by internal UIKit code.
To fix this, I changed the in-memory cache to store only UIImage objects[^3], which only decode the original data once and share a single bitmap across every usage. The first time an image is retrieved from the network (or loaded from disk), a UIImage is constructed for it and stored in the memory cache. This resulted in a significant performance improvement. When running on an iPhone 6s (the device I use for performance testing), scrolling felt noticeably smoother. Additionally, this has the very nice added benefit of reducing memory consumption by a good deal.
[^3]: Mostly. Unlike other categories of images, post attachments are not cached on disk, only in memory. This is because, generally speaking, users won't see the same attachment often enough that it's worth caching them across app launches. It would just be throwing away disk space for no benefit.<br>But the original need does need to be available, because constructing a UIImage from an animated GIF throws away all but the first frame. So, for attachments specifically, the original data is kept in memory. (Another obvious optimization here would be to only store the original data for GIFs in memory, and discard it for other attachments. I intend to do this eventually, I just haven't gotten around to it as of the time I'm writing this.)
We can still go one step farther with caching image objects, though. The aforementioned WWDC talk also mentions that the size of the bitmap stored internally by each UIImage is proportional to the dimensions of the input image, not to the size of the view it's being displayed in. This is because if the same image is shown in multiple views of different sizes, it wants to retain as much information as possible so the image looks as good as it can. Another key effect of using larger-than-necessary images is that the render server needs to do more work to scale down those images to the actual display size. By doing that ourselves, ahead of time, we can keep it from repeatedly doing extra work.
This strategy is a reasonable default, but we, the app developer, know better. Depending on the category of image, it may only be shown at one particular size. In my case, user avatars are almost always shown at a resolution no larger than 50pt &times; 50pt. So, instead of keeping a bunch of full size bitmaps around, when creating the image that's going to be cached in-memory, we can use CoreGraphics to downscale the input image to a maximum dimension of 50 points[^4]. And, because the original image data is still cached on disk, if the user goes to a screen in the app where user avatars are displayed larger than usual, we can just load the original data. This is relatively uncommon compared to just scrolling through the timeline, so the slight performance hit here is a worthwhile tradeoff for the improvement in the more common case.
[^4]: CoreGraphics technically wants a _pixel_ size, so we multiply 50 by `UIScreen.main.scale` and use that as the max pixel dimension. This could become a minor problem on Catalyst, where screens with different scales are possible (though I don't know how macOS display scales map to the Catalyst version of UIKit…), or if Apple added proper multi-display support to iPadOS.
Before we reach the end, there's one final bit of image caching Tusker does. Some time last year, I added an accessibility/digital wellness preference which changes the app to only display images in grayscale. I use the CoreImage framework to actually do this conversion[^5]. CoreImage is GPU-accelerated and so is reasonably speedy, but it still adds a not-insignificant amount of time, which can be felt on slower devices. To try and mitigate this, the images are also cached post-grayscale conversion.
[^5]: In an ideal world, this could be done with something like a fragment shader at render-time, but I couldn't find any reasonable way of doing that. Oh well.
And that finally brings us to how image caching in Tusker works today. It started out very simple, and the underlying concepts largely haven't changed, there's just been a steady series of improvements. As with most things related to caching, what seemed initially to be a simple problem got progressively more and more complex. And, though there are a lot of moving parts, the system overall works quite well. Images are no longer the bottleneck in scrolling performance, except in the rarest of cases (like using grayscale images on the oldest supported devices. Either of those individually are fine, but together they're just too much). And, memory usage overall is substantially reduced making the app a better platform citizen.

View File

@ -0,0 +1,34 @@
```
title = "Let's Build a Programming Language: Part 0"
tags = ["build a programming language", "rust"]
date = "2021-04-12 17:27:42 -0400"
short_desc = "And learn Rust along the way."
slug = "lets-build-a-programming-language"
```
I've been learning [Rust](https://www.rust-lang.org/) for a little while now, and I'm a firm believer that the best way to learn a new language is to actually build something with it. Not something like an example project from a book or tutorial for the language, but something non-trivial that you actually find interesting.
In that vein, I've been thinking a lot lately about a building a small programming language. It's something that I find interesting, and Rust is a good fit for it. It's also something that lets me start out relatively small and simple, but still always have opportunities for growing more complex.
This post, and the posts that follow it are going to be a sort of experiment. Ordinarily, with an ongoing project, I would just post about it on the fediverse. This time, I'm going to try to write blog posts about it. I expect I'll continue to post in-the-moment stuff on the fediverse, but I also want to write more semi-regular posts about how it's going overall. They probably won't be super detailed, and they certainly won't be tutorials, but they will be more detailed and more explanatory than much of what I post on the fediverse. And, as with what I would otherwise write on the fediverse, these posts aren't going to be perfect. I'm not an expert at this; there are going to be mistakes, and I'm going to end up backtracking. But I figure that's slightly more interesting to read about than if I did everything correctly on the first try.
<!-- excerpt-end -->
With that, a few thoughts about the programming language I intend to build:
The biggest thing is that I don't have a super comprehensive plan. I have some general ideas about what I want to do (and how to do it), but mostly I'm just going to see what happens. I don't really need to use the end product for anything, I just want to have some fun building it.
The language is going to be interpreted. Building a compiler from scratch is not something I'm interested in (for now), and writing just a frontend for, say, LLVM, doesn't seem terribly interesting either.
It will be weakly typed, because building a good, sound type system is more complicated than I want to deal with. There may be some static typing features, but it won't be a focus.
It's going to be mostly imperative. I'll probably build more functional-language features at some point, but it's going to be imperative first.
I'm also going to write everything by hand. No parser generators or combinator libraries. On a related note, there isn't going to be a formal grammar or spec. The definition of the language will be whatever the parser does.
And with that, here goes nothing.
```sh
$ cargo new toylang
```

View File

@ -0,0 +1,100 @@
```
title = "Part 1: Lexing"
tags = ["build a programming language", "rust"]
date = "2021-04-13 17:00:42 -0400"
short_desc = "Turning a string into a sequence of tokens."
slug = "lexing"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
The first part of the language I've built is the lexer. It takes the program text as input and produces a vector of tokens. Tokens are the individual units that the parser will work with, rather than it having to work directly with characters. A token could be a bunch of different things. It could be a literal value (like a number or string), or it could be an identifier, or a specific symbol (like a plus sign).
<!-- excerpt-end -->
I've decided to represent tokens themselves as an enum because there are a bunch of cases without any data (e.g., a plus sign is always just a plus sign) and some with (e.g., a number literal token has a value).
When I was reading the [Rust book](https://doc.rust-lang.org/stable/book/), I was excited to see Rust enums have associated values. Enums with associated data is one of my favorite features of Swift, and I'm glad to see Rust has them too.
```rust
#[derive(Debug)]
enum Token {
Integer(u64),
Plus,
}
```
For now, I'm only starting with integer literals and the plus sign. More tokens will come once I've got basic lexing and parsing in place.
Most of the work of turning a string into tokens is done in the *drumroll please...* `tokenize` function.
It creates an initially empty vector of tokens, and starts iterating through characters in the string.
Single character tokens like plus are the easiest to handle. If the current character matches that of a token, consume the character (by advancing the iterator) and add the appropriate token.
```rust
fn tokenize(s: &str) -> Vec<Token> {
let mut tokens: Vec<Token> = Vec::new();
let mut it = s.chars().peekable();
while let Some(c) = it.peek() {
if *c == '+' {
it.next();
tokens.push(Token::Plus);
}
}
tokens
}
```
Already I've encountered a Rust thing. Inside the while loop, `c` is a reference to a char, so in order to check its value, you have to dereference it. I had expected that you'd be able to compare a value of some type to a reference of that same type (with the language deref'ing the reference automatically), but I can see how forcing the programmer to be explicit about it makes sense.
Next, to parse numbers literals, I check if the current character is in the digit range:
```rust
const DIGITS: RangeInclusive<char> = '0'..='9';
fn tokenize(s: &str) -> Vec<Token> {
// ...
while let Some(c) = it.peek() {
// ...
} else if DIGITS.contains(c) {
let n = parse_number(&mut it).unwrap();
tokens.push(Token::Integer(n));
}
}
}
```
You may note that even though the integer token takes a signed integer, I'm totally ignoring the possibility of negative number literals. That's because they'll be implemented in the parser along with the unary minus operator.
If the character is indeed a digit, a separate function is called to parse the entire number. This is the first thing that I've encountered for which mutable borrows are quite nice. The `parse_number` function operates on the same data as the `tokenize` function, it needs to start wherever `tokenize` left off, and it needs to tell `tokenize` how much it advanced by. Mutably borrowing the iterator has exactly these properties.
```rust
fn parse_number<T: Iterator<Item = char>>(it: &mut T) -> Option<i64> {
let digits = it.take_while(|c| DIGITS.contains(c));
let s: String = digits.collect();
s.parse().ok()
}
```
Writing the declaration of `parse_number` was a bit rough, I'll admit. I haven't read the generics chapter of the book yet, so I stumbled through a number of compiler error messages (which are very detailed!) until I arrived at this. Having emerged victorious, what I ended up with makes sense though. The specific type of the iterator doesn't matter, but it still needs to be known at the call site during compilation.
The actual implementation takes as many digits as there are from the iterator, turns them into a string, and parses it into an integer. It returns an optional because the `parse` method could fail if the string there were no digit characters at the beginning of the string (this will never happen in the one case I'm calling it, but in case it's reused in the future and I forget...).
Lastly, the `tokenize` function also ignores whitespace just by calling `it.next()` whenever it encounters a whitespace char.
And with that, we can tokenize simple inputs:
```rust
fn main() {
println!("tokens: {:#?}", tokenize("12 + 34"));
}
```
```sh
$ cargo run
tokens: [Integer(12), Plus, Integer(34)]
```

View File

@ -0,0 +1,100 @@
```
title = "Part 2: A Primitive Parser"
tags = ["build a programming language", "rust"]
date = "2021-04-14 17:00:42 -0400"
short_desc = "Building a small AST from the stream of tokens."
slug = "parsing"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
Now that the lexer is actually lexing, we can start parsing. This is where the Tree in Abstract Syntax Tree really comes in. What the parser is going to do is take a flat sequence of tokens and transform it into a shape that represents the actual structure of the code.
<!-- excerpt-end -->
The actual nodes in the AST are represented as an enum with a different case for each node type. The individual node types could also be structs that share a common trait, but so far the enum works fine.
```rust
enum Node {
Integer(i64),
BinaryOp {
left: Box<Node>,
right: Box<Node>,
},
}
```
The `BinaryOp` node doesn't have an operator type for now because the only supported one is addition, but it'll be added in the future. Also, the operands of the binary operator are both boxed (stored on the heap) because otherwise the `Node` type would be recursive and its size not knowable at compile-time.
There's a helper function that takes a slice of tokens and calls another function that does all of the actual work of parsing.
The parser is a simple recursive descent parser, so the function that does the actual parsing mutably borrows (for the same reasons as the lexer) an iterator that iterates over tokens. The actual iterator it receives is a peekable one, with the underlying iterator type specified with a generic.
I haven't the faintest idea why the lifetime is needed here, because I still haven't read that chapter of the book. But hey, adding it fixed the compiler error and it seems to work fine.
```rust
fn do_parse<'a, T: Iterator<Item = &'a Token>>(it: &mut std::iter::Peekable<T>) -> Option<Node> {
}
```
The `do_parse` function first ensures that there is a node (otherwise it returns `None`) and then constructs the appropriate node based on the type of the token. For now, that means just integer literals. If there's any other type of token, it panics because it can't be turned into a node (again, actual error reporting will come later).
```rust
fn do_parse<'a, T: Iterator<Item = &'a Token>>(it: &mut std::iter::Peekable<T>) -> Option<Node> {
if it.peek().is_none() {
return None;
}
let mut node: Node = match it.next().unwrap() {
Token::Integer(n) => Node::Integer(n),
tok => panic!("unexpected token: {:?}", tok),
};
}
```
After that, it checks if there are any tokens remaining. If there aren't, the node is returned as-is.
If there is another token, and it's a plus sign, it creates a new binary operator node, making the previously-created `node` to the left operand. To get the right operand, it calls `do_parse` (putting the recursion into recursive descent) to create a node from the remaining tokens (if it doesn't find a right-hand side token, it simply panics). If the next token is of any other type, it's not able to combine it with the existing node into a new node, so it panics.
```rust
fn do_parse<'a, T: Iterator<Item = &'a Token>>(it: &mut std::iter::Peekable<T>) -> Option<Node> {
// ...
match it.next() {
Some(Token::Plus) => {
node = Node::BinaryOp {
left: Box::new(node),
right: Box::new(do_parse(it).expect("expression after binary operator")),
}
}
Some(tok) => panic!("unexpected token: {:?}", tok),
None => (),
}
Some(node)
}
```
And with that, it can parse a simple expression!
```rust
fn main() {
let tokens = tokenize("12 + 34 + 56");
let node = parse(tokens);
println!("node: {:#?}", node);
}
```
```sh
$ cargo run
node: Some(
BinaryOp {
left: Integer(12),
right: BinaryOp {
left: Integer(34),
right: Integer(56),
},
},
)
```
The eagle-eyed may notice that while we have parsed the expression, we have not parsed it correctly. What's missing is operator precedence and associativity, but that will have to wait for next time.

View File

@ -0,0 +1,81 @@
```
title = "Part 3: Basic Evaluation"
tags = ["build a programming language", "rust"]
date = "2021-04-15 17:00:42 -0400"
short_desc = "A bad calculator."
slug = "evaluation"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
Last time I said operator precedence was going to be next. Well, if you've read the title, you know that's not the case. I decided I really wanted to see this actually run[^1] some code[^2], so let's do that.
[^1]: evaluate
[^2]: a single expression
<!-- excerpt-end -->
First, there needs to be something to actually store values during the evaluation process. For this, I used yet another enum. It only has one case for now because we can currently only lex and parse integer values and one arithmetic operator.
```rust
enum Value {
Integer(i64),
}
```
There's also a helper function to extract the underlying integer from a value in places where we're certain it's going to be an integer:
```rust
impl Value {
fn expect_integer(&self, &msg) -> i64 {
match self {
Value::Integer(n) => *n,
_ => panic!("{}", msg),
}
}
}
```
The compiler warns about the unreachable match arm, but it'll be useful once there are more types of values. (Once again, actual error reporting will wait.)
The actual evaulation starts in the `eval` function which takes a reference to the node to evaluate and returns a `Value` representing its result.
For integer nodes, the value of the AST node is wrapped in a Value and returned directly. For binary operator (i.e. addition) nodes the left- and right-hand values are extracted and another function is called to perform the operation.
```rust
fn eval(node: &Node) -> Value {
match node {
Node::Integer(n) => Value::Integer(*n),
Node::BinaryOp { left, right } => eval_binary_op(left, right),
}
}
```
This `eval_binary_op` function takes each of the nodes and calls `eval` with it. By doing this, it recurses through the the AST evaluating each node in a depth-first manner. It then turns each value into an integer (panicking if either isn't what it expects) and returns a new Value with the values added together.
```rust
fn eval_binary_op(left: &Node, right: &Node) -> Value {
let left = eval(left).expect_integer("left hand side of binary operator must be an integer");
let right = eval(right).expect_integer("right hand side of binary operator must be an integer");
Value::Integer(left + right)
}
```
And with that surpisingly small amount of code, I've got a very dumb calculator that can perform arbitrary additions:
```rust
fn main() {
let tokens = tokenize("1 + 2 + 3");
if let Some(node) = parse(tokens) {
println!("result: {:?}", eval(&node));
}
}
```
```sh
$ cargo run
result: Integer(6)
```
Next time, I'll add some more operators and actually get around to operator precedence.

View File

@ -0,0 +1,192 @@
```
title = "Part 4: Operator Precedence"
tags = ["build a programming language", "rust"]
date = "2021-04-16 17:00:42 -0400"
slug = "operator-precedence"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
I've gone through the lexer, parser, and evaluator and added subtraction, multiplication, and division in addition to, uh... addition. And they kind of work, but there's one glaring issue that I mentioned back in part 2. It's that the parser has no understanding of operator precedence. That is to say, it doesn't know which operators have a higher priority in the order of operations when implicit grouping is taking place.
<!-- excerpt-end -->
Currently, an expression like `2 * 3 + 4` will be parsed as if the `3 + 4` was grouped together, meaning the evaluation would ultimately result in 14 instead of the expected 10. This is what the AST for that expression currently looks like:
```plaintext
*
/ \
2 +
/ \
3 4
```
But, the multiplication operator should actually have a higher pririty and therefore be deeper in the node tree so that it's evaluated first.
Another closely related issue is [associativity](https://en.wikipedia.org/wiki/Operator_associativity). Whereas operator precedence governs implicit grouping behavior when there are operators of _different_ precedences (like addition and multiplication), operator associativity defines how implicit grouping works for multiple operators _of the same precedence_ (or multiple of the same operator).
Looking at the AST for an expression like "1 - 2 - 3", you can see the same issue is present as above:
```plaintext
-
/ \
1 -
/ \
2 3
```
In both of these cases, what the parser needs to do is the same. It needs to implicitly group the middle node with the left node, rather than the right one. This will result in node trees that look like this:
```plaintext
+ -
/ \ / \
* 4 - 3
/ \ / \
2 3 1 2
```
To accomplish this, I added precedence and associativity enums as well as methods on the `BinaryOp` enum to get each operation's specific values so that when the parser is parsing, it can make a decision about how to group things based on this information.
The `Precedence` enum has a derived implementation of the `PartialOrd` trait, meaning the cases are ordered from least to greatest in the same order they're written in the code, so that the precedence values can be compared directly with operators like `<`. Addition/subtraction and multiplication/division currently share precedences. Also, every operator currently has left associativity.
```rust
enum BinaryOp {
Add,
Subtract,
Multiply,
Divide,
}
#[derive(PartialEq, PartialOrd)]
enum Precedence {
AddSub,
MulDiv
}
#[derive(PartialEq)]
enum Associativity {
Left,
Right,
}
impl BinaryOp {
fn precedence(&self) -> Precedence {
match self {
BinaryOp::Add | BinaryOp::Subtract => Precedence::AddSub,
BinaryOp::Multiply | BinaryOp::Divide => Precedence::MulDiv,
}
}
fn associativity(&self) -> Associativity {
Associativity::Left
}
}
```
In the `do_parse` function, things have been changed up. First, there's a separate function for checking if the token that follows the first token in the expression should combine with the first node (i.e., is a binary operator):
```rust
fn is_binary_operator_token(token: &Token) -> bool {
if let Token::Plus | Token::Minus | Token::Asterisk | Token::Slash = token {
true
} else {
false
}
}
```
So instead of matching on individual tokens, `do_parse` just calls that function. If the next token is a binary operator, it consumes the operator token, calls `do_parse` recursively to get the right-hand node and then calls another function to combine the two nodes.
```rust
fn do_parse<'a, T: Iterator<Item = &'a Token>>(it: &mut Peekable<T>) -> Option<Node> {
// ...
if let Some(next) = it.peek() {
if is_binary_operator_token(next) {
let operator_token = it.next().unwrap();
let right = do_parse(it).expect("expression after binary operator");
node = combine_with_binary_operator(node, operator_token, right);
} else {
panic!("unexpected token: {:?}", next);
}
}
Some(node)
}
```
But, before I get to the `combine_with_binary_operator` function, there's another function that decides whether a binary operator should be grouped to the left with another node by following the rule I described earlier.
```rust
fn should_group_left(left_op: &BinaryOp, right: &Node) -> bool {
match right {
Node::BinaryOp { op: right_op, .. } => {
right_op.precedence() < left_op.precedence()
|| (right_op.precedence() == left_op.precedence()
&& left_op.associativity() == Associativity::Left)
}
_ => false,
}
}
```
The `combine_with_binary_operator` function can then use this (after converting the binary operator token into a `BinaryOp` value) to decide what it should do.
```rust
fn combine_with_binary_operator(left: Node, token: &Token, right: Node) -> Node {
let op: BinaryOp = match token {
// ...
};
if should_group_left(&op, &right) {
if let Node::BinaryOp {
left: right_left,
op: right_op,
right: right_right,
} {
Node::BinaryOp {
left: Box::new(Node::BinaryOp {
left: Box::new(left),
op,
right: right_left,
}),
op: right_op,
right: right_right,
}
} else {
panic!();
}
} else {
Node::BinaryOp {
left: Box::new(left),
op,
right: Box::new(right),
}
}
}
```
If there are two binary operators and it does need to be grouped to the left, it performs the same transformation I described above, constructing a new outer binary operator node and the new left-hand inner node. Diagramatically, the transformation looks like this (where uppercase letters are the binary operator nodes and lowercase letters are values):
```plaintext
Original expression: x A y B z
A B
/ \ / \
x B -> A z
/ \ / \
y z x y
```
If it does not need to be grouped left, the function simply creates a new binary operator node, leaving the left and right sides as-is.
And, after adding the new operators to the `eval_binary_op` function, it can now **correctly** compute simple arithmetic expressions!
```rust
fn main() {
let tokens = tokenize("2 * 3 + 4");
if let node = parse(&tokens) {
println!("result: ", eval(&node));
}
}
```
```sh
$ cargo run
result: Integer(10)
```

View File

@ -0,0 +1,88 @@
```
title = "Part 5: Fixing Floats"
tags = ["build a programming language", "rust"]
date = "2021-04-17 17:00:42 -0400"
short_desc = "A small gotcha in Rust's TakeWhile iterator."
slug = "fixing-floats"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
In the process of adding floating point numbers, I ran into something a little bit unexpected. The issue turned out to be pretty simple, but I thought it was worth mentioning.
<!-- excerpt-end -->
My initial attempt at this was a simple modification to the original `parse_number` function from the lexer. Instead of stopping when it encounters a non-digit character, I changed it to continuing collecting characters when it encounters a decimal point for the first time.
```rust
fn parse_number<T: Iterator<Item = char>>(it: &mut T) -> Option<Token> {
let mut found_decimal_point = false;
let digits = it.take_while(|c| {
if DIGITS.contains(c) {
true
} else if *c == '.' && !found_decimal_point {
found_decimal_point = true;
true
} else {
false
}
});
// ...
}
```
This seemed to work, and produced tokens like `Float(1.2)` for the input "1.2". But then I tried it with the string "1.2.3", to make sure that lexing would fail when it encountered a dot after the number literal. But it didn't. It failed because it didn't expect the '3' character. The dot seemed to vanish into thin air.
I came up with a simpler test case[^1]:
[^1]: I'm not entirely sure why the `take_some` function is needed here. Trying to call `take_while` directly from `main` causes a compiler error on the next line at the call to `it.peek()` because the iterator is being used after being moved into `take_while`. Does having a separate function somehow fix this? I wouldn't think so, but I'm not a Rust expert. I [posted about it](https://social.shadowfacts.net/notice/A6FitupF6BiJmFEwim) on the fediverse, and if you have an answer, I'd love to hear it.
```rust
fn main() {
let is = vec![1, 2, 3, 4];
let it = is.iter().peekable();
let taken = take_some(it);
println!("taken: {:?}, next: {:?}", taken, it.peek());
}
fn take_some<I: Iterator<Item = i32>>(&mut it: Peekable<I>) -> Vec<i32> {
it.take_while(|i| **i < 3).collect()
}
```
To my surprise, it printed `taken: [1, 2], next: Some(4)`. This time the `3` disappeared.
I inquired about this behavior on the fediverse, and learned that I missed a key line of the docs for the `take_while` method. Before it invokes the closure you passed in, it calls `next()` on the underlying iterator in order to actually have an item for the closure to test. So, it ends up consuming the first element for which the closure returns `false`.
I would have expected it to use the `peek()` method on peekable iterators to avoid this, but I guess not. No matter, a peeking version is easy to implement:
```rust
fn take_while_peek<I, P>(peekable: &mut Peekable<I>, mut predicate: P) -> Vec<I::Item>
where
I: Iterator,
P: FnMut(&I::Item) -> bool,
{
let mut vec: Vec<I::Item> = vec![];
while let Some(it) = peekable.peek() {
if predicate(it) {
vec.push(peekable.next().unwrap());
} else {
break;
}
}
vec
}
```
I can then switch to using the new function in `parse_number_literal` and it no longer consumes extra characters.
```rust
fn parse_number<T: Iterator<Item = char>>(it: &mut T) -> Option<Token> {
let mut found_decimal_point = false;
let digits = take_while_peek(it, |c| {
// ...
});
// ...
}
```

View File

@ -0,0 +1,100 @@
```
title = "Part 6: Grouping"
tags = ["build a programming language", "rust"]
date = "2021-04-18 14:42:42 -0400"
slug = "grouping"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
Parsing groups is pretty straightforward, with only one minor pain point to keep in mind. I'll gloss over adding left and right parentheses because it's super easy—just another single character token.
<!-- excerpt-end -->
To actually parse the group from the token stream, in the `parse_expression` function I look for a left paren at the beginning of an expression, and call `parse_group` if one is found.
```rust
fn parse_expression<'a, I: Iterator<Item = &'a Token>>(it: &mut Peekable<I>) -> Option<Node> {
// ...
let mut node: Node = match it.peek().unwrap() {
// ...
Token::LeftParen => parse_group(it).unwrap(),
}
// ...
}
```
The `parse_group` function is also pretty simple. It consumes the left paren and then calls `parse_expression` to parse what's inside the parentheses. Afterwards, assuming it's found something, it consumes the right paren and returns a new `Group` node (which has just one field, another boxed `Node` that's its content.
```rust
fn parse_group<'a, I: Iterator<Item = &'a Token>>(it: &mut Peekable<I>) -> Option<Node> {
match it.peek() {
Some(Token::LeftParen) => (),
_ => return None,
}
it.next();
if let Some(inner) = parse_expression(it) {
match it.peek() {
Some(Token::RightParen) => (),
tok => panic!("expected closing parenthesis after group, got {:?}", tok),
}
it.next();
Some(Node::Group {
node: Box::new(inner),
})
} else {
panic!("expected expression inside group");
}
}
```
This looks pretty good, but trying to run it and parse an expression like `(1)` will crash the program. Specifically, it'll panic with a message saying `unexpected token: RightParen`.
At first, this was pretty confusing. Shouldn't the right paren be consumed the `parse_group` function? Running with `RUST_BACKTRACE=1` reveals what the problem actually is.
It's panicking inside the recursive call to `parse_expression` coming from `parse_group`, before that function even has a chance to cosume the right paren. Specifically, `parse_expression` is seeing a token after the first part of the expression and is trying to combine it with the existing node and failing because a right paren is not a binary operator.
What should happen is that `parse_expression` should see the paren following the expression, realize that the expression is over, and not do anything with it. That way, when the recursive `parse_expression` returns, `parse_group` will be able to consume the right paren as it expects.
To do this, there's a constant list of tokens which are considered to end the current expression. Then, in `parse_expression`, in addition to checking if the next token after an expression is a binary operator, we can check if the token is an expression end. And if so, avoid panicking.
```rust
const EXPRESSION_END_TOKENS: &[Token] = &[Token::RightParen];
fn parse_expression<'a, T: Iterator<Item = &'a Token>>(it: &mut Peekable<T>) -> Option<Node> {
// ...
if let Some(next) = it.peek() {
if is_binary_operator_token(next) {
// ...
} else if EXPRESSION_END_TOKENS.contains(next) {
// no-op
} else {
panic!("unexpected token: {:?}", next);
}
}
Some(node)
}
```
And now it can parse grouped expressions:
```rust
fn main() {
let tokens = tokenize("(1)");
if let node = parse(&tokens) {
println!("node: {:#?}", &node);
}
}
```
```sh
$ cargo run
node: Group {
node: Integer(1),
}
```
(I won't bother discussing evaluating groups because it's trivial.)

View File

@ -0,0 +1,143 @@
```
title = "Part 7: Cleaning Up Binary Operators"
tags = ["build a programming language", "rust"]
date = "2021-04-19 17:00:42 -0400"
short_desc = "A minor fight with the Rust borrow checker."
slug = "cleaning-up-binary-operators"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
The code from [part 4](/2021/operator-precedence/) that checks whether a pair of binary operators should be grouped to the left or right works, but I'm not particularly happy with it. The issue is that it needs to pattern match on the right node twice: first in the `should_group_left` function, and then again in `combine_with_binary_operator` if `should_group_left` returned true.
<!-- excerpt-end -->
As a reminder, the code currently looks like this:
```rust
fn combine_with_binary_operator(left: Node, token: &Token, right: Node) -> Node {
let op: BinaryOp = match token {
// ...
};
if should_group_left(&op, &right) {
if let Node::BinaryOp {
left: right_left,
op: right_op,
right: right_right,
} {
Node::BinaryOp {
left: Box::new(Node::BinaryOp {
left: Box::new(left),
op,
right: right_left,
}),
op: right_op,
right: right_right,
}
} else {
panic!();
}
} else {
Node::BinaryOp {
left: Box::new(left),
op,
right: Box::new(right),
}
}
}
fn should_group_left(left_op: &BinaryOp, right: &Node) -> bool {
match right {
Node::BinaryOp { op: right_op, .. } => {
right_op.precedence() < left_op.precedence()
|| (right_op.precedence() == left_op.precedence()
&& left_op.associativity() == Associativity::Left)
}
_ => false,
}
}
```
See that `panic!()` in the else branch? The compiler thinks it (or some return value) is necessary there, because the pattern match could fail. But as the programmer, I know better. I know that if we're in the true branch of the outer if statement, then `should_group_left` returned true and the pattern match can never fail.
This is why I just call `panic!` without even a message: because I know that code is unreachable.
But it would be even better not to have it at all.
Basically, what I want the `should_group_left` function to do is pattern match on the right node, and if it meets the conditions for being left-grouped, to get the values inside the right binary operator node out without having to do another pattern match.
<aside class="inline">
Swift handles this rather nicely, because it allows you to combine multiple if... clauses? together with commas, including mixing and matching boolean conditions and pattern matching, requiring them all to succeed for the body of the if to be executed.
```swift
if should_group_left(op, right),
case let .binaryOp(rightLeft, rightOp, rightRight) = right {
// ...
}
```
Other languages with flow typing, like TypeScript or Kotlin, handle similar issues with things like custom [type guards](https://www.typescriptlang.org/docs/handbook/advanced-types.html#user-defined-type-guards), which inform the compiler "if this function returns true, the following type constraint holds".
</aside>
The best solution I was able to come up with was changing `should_group_left` to take ownership of the right node and return a `Result<(Box<Node>, BinaryOp, Box<Node>), Node>`[^1]. If it returns an Ok result, all the values are available. If it returns an "error", ownership of the right node is returned back to the caller.
[^1]: It doesn't really need to be a `Result` specifically, I just didn't bother writing my own enum just for this.
```rust
fn should_group_left(
left_op: &BinaryOp,
right: Node,
) -> Result<(Box<Node>, BinaryOp, Box<Node>), Node> {
match right {
Node::BinaryOp {
left: right_left,
op: right_op,
right: right_right,
} => {
let should_group = // ...
if should_group {
Ok((right_left, right_op, right_right))
} else {
Err(Node::BinaryOp {
left: right_left,
op: right_op,
right: right_right,
})
}
}
_ => Err(right),
}
}
```
Even this isn't ideal, because in the else branch in the first match arm, I still need to reconstruct the original `right` node, since it's been moved by the destructuring. I spent a while playing around with using references for various things in this function, but ultimately couldn't come up with anything better than this. If you have any ideas, let me know.
At any rate, at the call site in `combine_with_binary_operator`, this works pretty well:
```rust
fn combine_with_binary_operator(left: Node, token: &Token, right: Node) -> Node {
let op = match token {
// ...
};
match should_group_left(&op, right) {
Ok((right_left, right_op, right_right)) => Node::BinaryOp {
left: Box::new(Node::BinaryOp {
left: Box::new(left),
op,
right: right_left,
}),
op: right_op,
right: right_right,
},
Err(right) => Node::BinaryOp {
left: Box::new(left),
op,
right: Box::new(right),
},
}
}
```

View File

@ -0,0 +1,130 @@
```
title = "Part 8: Variable Lookups and Function Calls"
tags = ["build a programming language", "rust"]
date = "2021-04-25 11:15:42 -0400"
slug = "variable-lookups-and-function-calls"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
Arithmetic expressions are all well and good, but they don't really feel much like a programming language. To fix that, let's start working on variables and function calls.
<!-- excerpt-end -->
First step: lexing.
There are two new token types: identifier and comma.
```rust
enum Token {
// ...
Identifier(String),
Comma,
}
```
The comma is just a single comma character. The identifier is a sequence of characters that represent the name of a variable or function. An identifier starts with a letter (either lower or uppercase) and is followed by any number of letters, digits, and underscores.
The main `tokenize` function checks if it's looking at a letter, and, if so, calls the `parse_identifier` function. `parse_identifier` simply accumulates as many valid identifier characters as there are and wraps them up in a token.
```rust
fn parse_identifier<I: Iterator<Item = char>>(it: &mut Peekable<I>) -> Option<Token> {
let chars = take_while_peek(it, |c| {
LOWERCASE_LETTERS.contains(c)
|| UPPERCASE_LETTERS.contains(c)
|| DIGITS.contains(c)
|| *c == '_'
});
if chars.is_empty() {
None
} else {
let s = String::from_iter(chars);
Some(Token::Identifier(s))
}
}
```
The next step is parsing.
There are two new kinds of AST nodes: lookup nodes and function call nodes. The only data lookup nodes store is the name of the variable they refer to. Function call nodes store the nodes for their parameters, in addition to the function name.
When parsing an expression, an identifier token results in either a lookup or function call node, depending on whether it's followed by a left-paren.
```rust
fn do_parse<'a, I: Iterator<Item = &'a Token>>(it: &mut Peekable<I>) -> Option<Node> {
// ...
let mut node: Node = match it.peek().unwrap() {
// ...
Token::Identifier(value) => {
it.next();
match it.peek() {
Some(Token::LeftParen) => Node::Call {
name: value.clone(),
params: parse_function_params(it),
}
_ => Node::Lookup {
name: value.clone(),
},
}
}
};
}
```
Actually parsing function parameters is left to another function. After consuming the opening parenthesis, it checks if the next token is the closing right-paren. If it is, the right-paren is consumed and an empty vector is returned for the paramters.
If it isn't, the function enters a loop in which it parses a parameter expression and then expects to find either a comma or right-paren. If there's a comma, it's consumed and it moves on to the next iteration of the loop. If it's a closing parenthesis, it too is consumed and then the loop is exited and the parameter list returned. Upon encountering any other token, it panics.
```rust
fn parse_function_params<'a, I: Iterator<Item = &'a Token>>(it: &mut Peekable<I>) -> Vec<Node> {
it.next(); // consume left paren
if let Some(Token::RightParen) = it.peek() {
it.next();
vec![]
} else {
let mut params = vec![];
loop {
let param_node = do_parse(it).expect("function parameter");
params.push(param_node);
match it.peek() {
Some(Token::Comma) => {
it.next();
}
Some(Token::RightParen) => {
it.next();
break;
}
tok => {
panic!("unexpected token {:?} after function parameter", tok);
}
}
}
params
}
}
```
And lastly, to make this work correctly, the comma token is added to the list of expression-end tokens.
With that, parsing function calls and variable lookups is possible:
```rust
fn main() {
let tokens = tokenize("foo(bar)");
if let node = parse(&tokens) {
println!("{:?}", node);
}
}
```
```sh
$ cargo run
Call {
name: "foo",
params: [
Lookup {
name: "bar",
},
],
}
```

View File

@ -0,0 +1,97 @@
```
title = "Part 9: Statements"
tags = ["build a programming language", "rust"]
date = "2021-05-03 17:46:42 -0400"
slug = "statements"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
So the parser can handle a single expression, but since we're not building a Lisp, that's not enough. It needs to handle multiple statements. For context, an expression is a piece of code that represents a value whereas a statement is a piece of code that can be executed but does not result in a value.
<!-- excerpt-end -->
In the AST, there's a new top-level type: `Statement`. For now, the only type of statement is one that contains an expression and nothing else.
```rust
enum Statement {
Expr(Node),
}
```
The top level `parse` function has also changed to reflect this. It now returns a vector of statements, instead of a single expression node. The `do_parse` function continues to work exactly as it has, but is renamed `parse_expression` to since that's what it's actually doing.
```rust
fn parse(tokens: &[Token]) -> Vec<Statement> {
let mut it = tokens.iter().peekable();
let mut statements: Vec<Statement> = vec![];
while let Some(_) = it.peek() {
match parse_statement(&mut it) {
Some(statement) => statements.push(statement),
None => (),
}
}
statements
}
```
The `parse_statement` function does exactly what the name suggests.
```rust
fn parse_statement<'a, I: Iterator<Item = &'a Token>>(it: &mut Peekable<'a, I>) -> Option<Statement> {
if it.peek().is_none() {
return None;
}
let node = parse_expression(it).map(|node| Statement::Expr(node));
node
}
```
With that in place, parsing multiple statements is easy. The only change is that, after successfully parsing a statement, we need to consume a semicolon if there is one. Then, the `parse` loop will continue and the next statement can be parsed.
```rust
fn parse_statement<'a, I: Iterator<Item = &'a Token>>(it: &mut Peekable<'a, I>) -> Option<Statement> {
// ...
match it.peek() {
Some(Token::Semicolon) => {
it.next();
}
Some(tok) => {
panic!("unexpected token {:?} after statement", tok);
}
None => (),
}
node
}
```
I intend to make semicolons optional and allow newline-delimited statements, but that is more complicated and will have to wait for another time. For now, this is good enough:
```rust
fn main() {
let tokens = tokenize("1 + 2; foo();");
print("statements: {:?}", parse(&tokens));
}
```
```sh
$ cargo run
statements: [
Expr(
BinaryOp {
left: Integer(1),
op: Add,
right: Integer(2),
},
),
Expr(
Call {
name: "foo",
params: [],
},
),
]
```

View File

@ -0,0 +1,113 @@
```
title = "Part 10: Variable Declarations"
tags = ["build a programming language", "rust"]
date = "2021-05-09 19:14:42 -0400"
slug = "variable-declarations"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
Now that the parser can handle multiple statements and the usage of variables, let's add the ability to actually declare variables.
<!-- excerpt-end -->
First off, the lexer now lexes a couple new things: the `let` keyword and the equals sign.
When the parser tries to parse a statement and sees a let token, it knows it's looking at a variable declaration. After the let token it expects to find a identifier (the variable name), an equals sign, and then an expression for the initial value of the variable. The variable name and the initial value expression then make up a `Declare` AST node.
```rust
fn parse_statement<'a, I: Iterator<Item = &'a Token>>(it: &mut Peekable<'a, I>) -> Option<Statement> {
// ...
let node = match token {
Token::Let => {
let name: String;
if let Some(Token::Identifier(s)) = it.peek() {
name = s.clone();
it.next();
} else {
panic!("expected identifier after let");
}
expect_token!(it, Equals, "expected equals after identifier after let");
let value = parse_expression(it).expect("initial value in let statement");
Some(Statement::Declare { name, value })
}
// ...
};
// ...
}
```
`expect_token!` is a simple macro I wrote to handle expecting to see a specific token in the stream and panicking if it's not there, since that's a pattern that was coming up frequently:
```rust
macro_rules! expect_token {
($stream:ident, $token:ident, $msg:expr) => {
if let Some(Token::$token) = $stream.peek() {
$stream.next();
} else {
panic!($msg);
}
};
}
```
Next, to actually evaluate variable declarations, the evaulator needs to have some concept of a context. Right now, every expression can be evaluated without any external state. But, when a variable is declared, we want it to be accessible later on in the code, so there needs to be somewhere to store that information.
For now, the only information we need is the map of variable names to their values.
```rust
struct Context {
variables: HashMap<String, Value>,
}
```
There are also a few methods for `Context`, one to construct a new context and one to declare a variable with an initial value.
```rust
impl Context {
fn new() -> Self {
Self {
variables: HashMap::new(),
}
}
fn declare_variable(&mut self, name: &str, value: Value) {
if self.variables.contains_key(name) {
panic!("cannot re-declare variable {}", name);
} else {
self.variables.insert(name.into(), value);
}
}
}
```
Every `eval_` function has also changed to take a reference to the current context[^1] and the main `eval` function creates a context before evaluating each statement.
[^1]: For now a simple mutable reference is fine, because there's only ever one context: the global one. But, in the future, this will need to be something a bit more complicated.
With that, declaration statements can be evaluated just by calling the `declare_variable` method on the context:
```rust
fn eval_declare_variable(name: &str, value: &Node, context: &mut Context) {
let val = eval_expr(value, context);
context.declare_variable(name, val);
}
```
And we can actually set and read variables now[^2]:
```rust
fn main() {
let code = "let foo = 1; dbg(foo)";
let tokens = tokenize(&code);
let statements = parse(&tokens);
eval(&statements);
}
```
```sh
$ cargo run
Integer(1)
```
[^2]: The `dbg` function is a builtin I added that prints out the Rust version of the `Value` it's passed.

View File

@ -0,0 +1,101 @@
```
title = "Part 11: Lexical Scope"
tags = ["build a programming language", "rust"]
date = "2021-06-29 19:14:42 -0400"
short_desc = "Evaluating if statements and dealing with nested scopes."
slug = "lexical-scope"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
After adding variables, I added boolean values and comparison operators, because why not. With that in place, I figured it would be a good time to add if statements. Parsing them is straightforward—you just look for the `if` keyword, followed by a bunch of stuff—so I won't go into the details. But actually evaluating them was a bit more complicated.
<!-- excerpt-end -->
The main issue is that of lexical scope, which is where the context (i.e., what variables are accessible) at each point in the program is defined by where in the original source code it is.
Let's say you have some code:
```txt
let a = 1
if (condition) {
print(a)
let b = 2
}
print(b)
```
Entering the body of the if statement starts a new scope in which variables defined in the any encompassing scope can be accessed, but not vice versa. `a`, defined in the outer scope, can be read from the inner scope, but `b`, defined in the inner scope, cannot be accessed from the outer scope.
What this means for me is that, in the evaluator, it's not just enough to have access to the current scope. All the parent scopes are also needed.
There are a couple ways I could approach this. One way would be to have something like a vector of contexts, where the last element is the current context. Accessing a parent context would just mean walking backwards through the vector. And to enter a new scope, you'd construct a new context and push it onto the vector, evaluate whatever you wanted in the new scope, and then remove it from the vector afterwards. This would work, but it risks needing to replace the vector's backing storage every time a context is entered. It's probably a premature optimization, but I decided to take a different approach to avoid the issue.
Another way of doing it is effectively building a singly-linked list, where each `Context` stores an optional reference to its parent. But a simple reference isn't enough. The `Context` struct would need a generic lifetime parameter in order to know how long the reference to its parent lives for. And in order to specify what type the reference refers to, we would need to be able to know how long the parent context's parent lives for. And in order to spell that type out we'd have to know how long the parent's parent's parent—you get the idea.
The solution I came up with was to wrap the context struct in an `Rc`, a reference-counted pointer. So, instead of each context having a direct reference to its parent, it owns an `Rc` that's backed by the same storage. Though, that's not quite enough, because the context needs to be mutable so code can do things like set variables. For that reason, it's actually an `Rc<RefCell<Context>>`. I understand this pattern of interior mutability is common practice in Rust, but coming from languages where this sort of things is handled transparently, it's one of the weirder things I've encountered.
Now, on to how this is actually implemented. It's pretty simple. The `Context` struct stores the `Rc` I described above, but inside an `Option` so that the root context can have `None` as its parent.
```rust
struct Context {
parent: Option<Rc<RefCell<Context>>>,
variables: HashMap<String, Value>,
}
```
Then, instead of the various eval functions taking a reference directly to the context, they get a reference to the `Rc`-wrapped context instead.
```rust
fn eval_binary_op(left: &Node, op: &BinaryOp, right: &Node, context: &Rc<RefCell<Context>>) -> Value {
let left_value = eval_expr(left, context);
// ...
}
```
The constructors for `Context` also change a bit. There's one that doesn't have a parent, called `root`, in addition to `new` which does.
```rust
impl Context {
fn root() -> Self {
Self {
parent: None,
variables: HashMap::new(),
}
}
fn new(parent: Rc<RefCell<Context>>) -> Self {
Self {
parent: Some(parent),
variables: HashMap::new(),
}
}
}
```
Unlike the evaluation functions, `Context::new` takes an owned `Rc`, avoiding the infinite-lifetimes problem from earlier. This requires that, when constructing a new context, we just need to clone the existing `Rc`.
```rust
fn eval_if(condition: &Node, body: &[Statement], context: &Rc<RefCell<Context>>) {
let body_context = Context::new(Rc::clone(context));
let body_context_ref = Rc::new(RefCell::new(body_context));
}
```
After the new context is constructed, it too is wrapped in a `RefCell` and `Rc` for when the condition and body are evaluated. This is a little bit unweidly, but hey, it works.
Actually evaluating the if is simple enough that I won't bother going through it in detail. It just evaluates the condition expression (confirming that it's a boolean; there shall be no implicit conversions!) and, if it's true, evaluates each of the statements in the body.
```rust
fn main() {
let code = r#"let a = 1; if (a == 1) { dbg(a); }"#;
let tokens = tokenize(&code);
let statements = parse(&tokens);
eval(&statements);
}
```
```txt
$ cargo run
Integer(1)
```

View File

@ -0,0 +1,91 @@
```
title = "Debugging My Gemini NWProtocolFramer Implementation"
tags = ["swift", "gemini"]
date = "2021-07-07 23:32:42 -0400"
slug = "gemini-client-debugging"
```
I recently ran into an issue with the Network.framework Gemini client I'd [previously implemented](/2020/gemini-network-framework/) that turned out to be somewhat perplexing. So, I thought I'd write a brief post about it in case anyone finds it interesting or helpful.
<!-- excerpt-end -->
The gist of the issue is that when connecting to a certain host, the connection would hang indefinitely. The issue was 100% reproducible in my app, both in the simulator and on an actual device, in all manner of network conditions. What led me to believe that it was an issue with my implementation was that the problem only happened with a single host (I suspect the incompatibility was with whatever server software was being used) and the fact that I could not reproduce the issue with any other client.
My initial attempts at debugging were fruitless. Every handler and callback I had was just never getting called. There was no error that was being swallowed or anything, just silence. Eventually I set a breakpoint in the `handleInput` method of my `NWProtocolFramerImplementation`.
Stepping through[^1], I saw the status code get parsed successfully. Then it moved on to parsing the response meta, a string of up to 1024 bytes followed by a carriage return and line feed.
[^1]: Debugging this is kind of painful because much of the actual work is done inside the closure passed to `NWProtocolFramer.Instance.parseInput`. It's invoked synchronously, so it does affect control flow, but if you click Step Over at the wrong time, you can accidentally skip a whole critical chunk of code.
Next, I paused inside the `parseInput` closure that's responsible for parsing the meta string. The buffer that I received was 11 bytes long. I was immediately wary because I'd seen that number before in this context—it's the length of `text/gemini`. Indeed, printing out `String(bytes: buffer, encoding: .utf8)` revealed that it was indeed the meta string I was expecting. But it was just the meta string, not including the CR/LF pair that's supposed to follow it.
Continuing to step through, I saw the closure return 0, indicating that no bytes were consumed, because the CRLF was missing. After that, the main body of the `handleInput` method would see that the meta wasn't found and would itself return the number of bytes it expected to receive before it should be invoked again. It does this by adding 1 to the length of the buffer from which meta parsing failed. So, the next time `handleInput` is called by the framework, there should be at least 12 bytes available.
After hitting resume, the debugger trapped again in the `parseInput` closure for the meta. I checked `buffer.count` and found... 11 bytes. I know `handleInput` returned 12, so why did I still only have 11 bytes?
The realization I came to, after puzzling over this for a couple days, is that `parseInput` behaves a little weirdly, though in a way that's technically correct. It seems that even if more data is available in some internal buffer of the framer's—which we know there must be because `handleInput` was invoked again after we returned 12 earlier—we won't receive all of it. It's not _wrong_, 11 bytes is indeed at least 2 and no more than 1026, but it's certainly unintuitive.
To verify this behavior, I tweaked my code to call `parseInput` with a minimum length of 13 instead of 2 the second attempt to parse meta. Lo and behold, it worked. The buffer now had 13 bytes of data: the full meta string and the CR/LF.
And that explains the user-facing issue that led to all this. A few attempts at parsing would fail, but then the server would stop sending data because there was nothing left, so `handleInput` would never be called, leaving the Gemini request in a waiting state forever.
So, to properly fix the issue what needs to happen is we have to be smarter about the `minimumIncompleteLength` on subsequent attempts to parse the metadata. To do this, I saved the length of the last buffer for which meta parsing was attempted as an instance variable in the framer implementation. With that, we can determine how many bytes we _actually_ need before trying again.
```swift
class GeminiProtocol: NWProtocolFramerImplementation {
// ...
private var lastAttemptedMetaLength: Int? = nil
// ...
func handleInput(framer: NWProtocolFramer.Instance) -> Int {
// ...
if tempMeta == nil {
let min: Int
if let lastAttemptedMetaLength = lastAttemptedMetaLength {
min = lastAttemptedMetaLength + 1
} else {
min = 2
}
_ = framer.parseInput(minimumIncompleteLength: min, maximumLength: 1024 + 2) { (buffer, isComplete) -> Int in
guard let buffer = buffer else { return }
self.lastAttemptedMetaLength = buffer.count
// ...
}
}
guard let meta = tempMeta else {
if let attempted = self.lastAttemptedMetaLength {
return attempted + 1
} else {
return 2
}
}
// ...
}
// ...
}
```
The minimum incomplete length still defaults to 2 because on the first attempt to parse, we don't have previous attempt info and the only thing we know is that there must be a carriage return and line feed (as best as I can interpret it, the Gemini spec doesn't say that there must be meta text, so it could be zero bytes).
With that, the original issue is finally fixed and requests to the problematic server complete successfully.
My best guess for why this was happening in the first place and only in such specific circumstances is this: the specific server software being used was sending the meta text over the wire separately from the CRLF. This could mean they arrive at my device separately and are thus stored by in two separate "chunks". Then, when `parseInput` is called, Network.framework simply starts looking through the stored chunks in order. And, since the first chunk is longer than `minimumIncompleteLength`, it's the only one that's returned.
There's a note in the Network.framework header comments[^2] for `nw_framer_parse_input` that leads me to believe this. It says, with regard to the `temp_buffer` parameter:
[^2]: Annoyingly it's not present in the Swift docs nor the Objective-C docs. Seriously, this is not good. There's a wealth of information in the header comments that's not present in the regular docs. If you feel so inclined, you can dupe FB9163518: "Many NWProtocolFramer.Instance methods are missing docs that are present in the generated headers".
> If it is NULL, the buffer provided in the completion will not copy unless a copy is required to provide the minimum bytes as a contiguous buffer.
The possibility of a copy being needed to form a contiguous buffer implies that there could be discontiguous data, which lines up with my "chunks" hypothesis and would explain the behavior I observed.
<aside>
Fun fact, the C function corresponding to this Swift API, `nw_framer_parse_input`, takes a maximum length, but it also lets to pass in your own temporary buffer, in the form of a `uint8_t*`. It's therefore up to the caller to ensure that the buffer that's pointed to is at least as long as the maximum length. This seems like a place ripe for buffer overruns in sloppily written protocol framer implementations.
</aside>
Anyhow, if you're interested, you can find the current version of my Gemini client implementation (as of this post) [here](https://git.shadowfacts.net/shadowfacts/Gemini/src/commit/3055cc339fccad99ab064f2daccdb65efa8024c0/GeminiProtocol/GeminiProtocol.swift).

View File

@ -0,0 +1,41 @@
```
title = "On SwiftUI"
tags = ["swift"]
date = "2021-08-25 15:34:42 -0400"
slug = "swiftui"
```
Over the past several days, I built a complete, functioning app in SwiftUI, and, well, I have some thoughts.
<!-- excerpt-end -->
The app I built is a little TOTP app that works exactly the way I want, because Authy is bad[^1]. And it plays to SwiftUI's strengths pretty well. It's got a straightforward UI, with very few custom interface elements. The data model is simple and how it interacts with the interface is clearly delineated.
[^1]: It only shows one code at a time and requires too many taps to switch. It also has no way of exporting or backing up the TOTP secrets.
For the most part, writing the app has been a good experience. The vast majority of it I wrote in about 20 hours worth of work spread over four days. As of writing this, it's about 1700 lines of Swift. The app itself is open source [here](https://git.shadowfacts.net/shadowfacts/OTP), so if you want to see the codebase that I'm rambling on about, you can take a look.
The process was generally enjoyable. It's what everyone says about SwiftUI: you can iterate incredibly quickly, there's vastly less boilerplate than UIKit, and it's much easier to build simple, reusable views.
This was also the first project where I actually managed to use the Xcode Previews feature. Although it always seems slick in WWDC demos, I was never able to get it to work reliably. But this time was different. The fact that Xcode pauses previews whenever something outside of the view body changes remains annoying, but at least while I was only modifying the body it did update reasonably quickly. I guess it really just works best for smaller projects.
Building almost all of the UI was easy. The design is simple enough that it doesn't have to use any fancy tricks or workarounds—not a `GeometryReader` in sight. Plumbing up the model was similarly painless, both for editing and displaying data (aside from some oddities involving timing and refreshing codes).
But, here's where the complaints start. Although the layout and design were uncomplicated, building some of the interactions were not. While working on it, SwiftUI felt incredibly powerful and uncomfortably restrictive simultaneously. For example:
Want to make give a context menu action the destructive style? Ok. Want to make a nested menu destructive? Nope.
Want to drag and drop one view onto another view? Go ahead. How about dropping that view onto one that's inside a `List`? Woah there.
Making a text field focused programatically? Sure. And what about making it focused immediately on appearance? Not so fast.
To be clear, many of the issues I've encountered with SwiftUI are almost certainly bugs that will (hopefully) be fixed eventually. But where this differs from UIKit is that the architecture of UIKit lets you fix and workaround things yourself, whereas SwiftUI operates as a black box.
To use the same examples, but in UIKit: to make a context menu element destructive or not is exactly the same regardless of whether it has children, adding a drop interaction to a UIView is the same inside a table view as it is outside, and focusing a text field uses the same method no matter when in the lifecycle it's called.
I don't think this is just because I have more experience with UIKit. It's not that there are lots of UIKit tricks I know to make this work, in constrast with SwiftUI. There is a fundamental difference between the two frameworks. The delta between doing any of the things I mentioned in one context versus in the other is zero. Because of the fundamental design of UIKit, all of these things basically just work. Yes, implementation wise, the framework engineers may still need to take special care to ensure that they work. But, from an API design point of view, nothing different is required.
The impression I get from Apple is that SwiftUI wants to make hard things easy. That's a great goal, but currently it comes at the expense of making easy things complicated. And every time I do anything substantial with SwiftUI, I constantly feel this tension. SwiftUI both empowers me and hinders me, every step of the way.
I don't know what the solution is, if there is one (part of me thinks these sorts of issues are intrinsic to declarative tools), but I really hope there is one. When it works, I really enjoy SwiftUI and I want to be able to enjoy it more without running into unresolvable framework issues quite so frequently.

View File

@ -0,0 +1,221 @@
```
title = "A Mac Menu Bar App to Toggle Natural Scrolling"
tags = ["swift"]
date = "2021-09-02 22:31:42 -0400"
slug = "scrollswitcher"
```
There are two ways you can configure the scroll direction on a computer: normal or inverted (what Apple calls "natural"). If you, like most, use a mouse with a wheel, the normal scrolling scheme is probably what you use. Under it, moving the mouse wheel down (i.e., so that a point on the top of the wheel moves down/closer to you) causes the content to move _up_ and the viewport (the window into the content) to move down. Similarly on a multitouch trackpad, under normal scrolling, as your two fingers move down, the content moves up and the viewport down.
The other option—natural scrolling—flips this, so as your fingers move down on the trackpad, _the content moves down_ and viewport moves up, and similarly for rotating the mouse wheel. When using a mouse, this feels to most people obviously backwards. But this setting doesn't exist without reason. It's transposing the paradigm of touchscreens on to the trackpad, where your finger remains pinned to the point in the content where you first touched. You move the content directly, rather than moving the viewport.
This generally isn't a big deal; most people just find the mode they prefer, change the setting, and then never touch it again. But what if you prefer both?
<!-- excerpt-end -->
Why might you want both options for this preference? Well, I use my laptop both docked and undocked. When it's at my desk and connected to a monitor, I also use a keyboard and mouse, for which I want normal scrolling. But when it's undocked and I'm using the trackpad, I vastly prefer natural scrolling.
<aside>
Yes, I recognize that I'm probably in the tiny minority of people who prefer this. Could I try myself to accept the wrong way of scrolling on either the trackpad or mouse and then just leave the setting alone? [Probably](https://www.youtube.com/watch?v=MFzDaBzBlL0). But I don't really want to.
</aside>
Unfortunately, macOS only has one setting shared between both mice and trackpads. Yes, despite appearing in two different places in System Preferences (under both Trackpad and Mouse), with the implication that they're two different settings, both checkboxes are backed by the same underlying value. So, we need some workaround.
Since the preference can't be different for mouse and trackpad, I at least want some way of toggling it quickly, so that when I dock/undock my laptop, I can correct it quickly. A menu bar app seemed like the perfect fit, as I don't want something cluttering up my dock and there's almost no UI (but I do want more than just a global keyboard shortcut).
A bit of cursory googling reveals that the actual preference is stored in a global user default, `com.apple.swipescrolldirection`. It's a boolean value, and true means natural scrolling is enabled. Reading it is easy, but unfortunately setting it is a bit more complicated. Just using `defaults write -g com.apple.swipescrolldirection -bool YES` on the command line does not change the actual value—although when you open the Mouse preferences pane[^1], you can see the checkbox state has indeed opened. Places on the internet mention this and say you need to log out and back in to correct the input behavior. But, that isn't necessary when you change in System Preferences, so clearly something more is going on.
[^1]: Or, if the Mouse prefs pane was already opened in the current session, relaunch the System Preferences app. Preference panes are not reloaded when you close and re-enter them, so manually writing the default causes the UI to desync.
To try and figure out what else System Preferences was doing, I launched Console.app and started streaming log messages. I toggled the scroll direction checkbox a bunch of times in both the Mouse and Trackpad pref panes before stopping log streaming. Searching the logs for "mouse" and "trackpad" revealed nothing useful, but searching for "scroll" turned up one beautiful nugget of information:
<img src="/2021/scrollswitcher/console.png" alt="Console.app showing a message from disnoted reading register name: SwipeScrollDirectionDidChangeNotification">
`SwipeScrollDirectionDidChangeNotification`. Sure sounds like the name of a notification that would be fired to inform other parts of the OS about the change.
With at least an inkling of the direction I needed to go in, I started building the app with the initial goal of displaying the current scrolling mode. I created a new Mac app in Xcode and set the `LSUIElement` key in the `Info.plist` to `YES`, hiding the app from the dock. I also removed the Xcode-created storyboard and then created a system status bar item with a dummy icon.
## Fun Problem #1: Storyboards, or the lack thereof
Here is where I encountered the first (small) problem: the menu item never appeared. My code that created the menu item in `applicationDidFinishLaunching` was being hit, as evidenced by a breakpoint, and the menu item was being created, but it just didn't show up. I recalled noticing that the docs of `NSStatusBar.statusItem(withLength:)` mentioned that the `NSStatusItem` had to be retained by the caller, otherwise it would be removed when deallocated. But that shouldn't have been the problem, as I was storing it in a property on my app delegate. Unless...
It turns out using `@main` on your app delegate does not strongly retain it unless there's a storyboard, which I had deleted. To fix it, I had to replace the `@main` with a custom `main.swift` which creates the `NSApplication` instance and strongly retains the delegate.
## Reading the Scroll Direction
With the menu item now displaying, it was time to read the current scroll direction. The most direct mapping from what I did on the command line would be to use Foundation's `Process` to actually run the `defaults` command and then examine the output. But, because the value we're after is stored in the [global domain](https://developer.apple.com/documentation/foundation/nsglobaldomain), it should be—and indeed is—accessible to us directly through `UserDefaults`.
```swift
let defaultsKey = "com.apple.swipescrolldirection"
enum Direction: Int, Equatable {
case normal, natural
static var current: Direction {
let naturalEnabled = UserDefaults.standard.bool(forKey: defaultssKey)
return naturalEnabled ? .natural : .normal
}
}
```
Then, using the current direction, I could set the menu bar item's icon. SF Symbols' `scroll.filled` for natural scrolling and `scroll` for normal.
<figure>
<img src="/2021/scrollswitcher/scrolls.png" alt="filled and outlined scroll icons in the menubar">
<figcaption>Scrolls, get it?</figcaption>
</figure>
## Setting the Scroll Direction
<aside class="inline">
I briefly toyed with using `UserDefaults` to also set the scroll direction directly, without having to shell out to the `defaults` command. The obvious thing to do would be to construct a `UserDefaults` for the global suite. But sadly, no luck:
```txt
Using NSGlobalDomain as an NSUserDefaults suite name does not make sense and will not work.
```
My next idea was calling `setPersistentDomain(_:forName:)` on the standard `UserDefaults` with the a dictionary containing the new scroll direction for the global domain. This was, not to overstate things, a colossal failure. The docs for this method say:
> Calling this method is equivalent to initializing a user defaults object with `init(suiteName:)` passing `domainName`, and calling the `set(_:forKey:)` method for each key-value pair in domain.
What actually happened was calling with `UserDefaults.globalDomain` reset a whole bunch of global defaults (though seemingly not all) across my entire computer. This included (but may not have been limited to): a number of General system prefs (including what first tipped me off that something went wrong, the accent color), and the Xcode 13 setting for showing file extensions.
Pro tip: do not call `setPersistentDomain` with `NSGlobalDomain`.
</aside>
Setting the scroll direction via the command line isn't too bad. I just construct a `Process`, configure it to run `defaults` with the right arguments, and then launch it:
```swift
private func setDirection(_ new: Direction) {
let proc = Process()
proc.launchPath = "/usr/bin/defaults"
let newVal = new == .normal ? "NO" : "YES"
proc.arguments = ["write", "-g", "com.apple.swipescrolldirection", "-bool", newVal]
proc.launch()
proc.waitUntilExit()
if proc.terminationStatus != 0 {
fatalError("uh oh, exit code: \(proc.terminationStatus)")
}
}
```
With that wired up to run when the menu item was clicked, I tried toggling the scroll direction. Unfortunately, it failed, because `defaults` didn't run successfully. But, it had at least printed an error message:
```txt
[User Defaults] Couldn't write values for keys (
"com.apple.swipescrolldirection"
) in CFPrefsPlistSource<0x6000017a1200> (Domain: kCFPreferencesAnyApplication, User: kCFPreferencesCurrentUser, ByHost: No, Container: (null), Contents Need Refresh: Yes): setting preferences outside an application's container requires user-preference-write or file-write-data sandbox access
```
Everyone's favorite sandbox, snatching defeat from the jaws of victory.
## Fun Problem #2: Sandboxing
Unfortunately, both of the mentioned entitlements seem to be private (the only mentions of them I can find on the internet are from people running into some Catalyst error). So, I needed to disable sandboxing for this app altogether.
Disabling sandboxing turned out to be annoyingly confusing. Most of the documentation you can find on the internet is outdated and says you can simple flip the "App Sandbox" switch in the Capabilities section of the Xcode project. Slight problem: as of Xcode 13 (if not earlier), that switch no longer exists. And flat out removing the App Sandbox entry from Signing & Capabilities did not disable it, the same error occurred. Setting App Sandbox to `NO` in the entitlements file was similarly ineffective.
After banging my head against this for a while (it seems like this has not been discussed on the internet recently enough to be of any help), I looked in target's Build Settings. Where I found the "Enable App Sandbox" flag—and it was set to Yes. Setting it to No finally fixed the issue, and the default was actually getting set.
The updated value could successfully be read from the app itself, as well as from outside. And that left me where I got stuck before on the command line: the preference was being updated, but nothing else on the system was aware of the change.
## Into the Caves
I knew the name of the notification I needed to fire, but not what to do with it—the normal `NotificationCenter` only works in-process, doesn't it? I decided the best course of action was to go spelunking through the System Preferences binary to try and figure out what it was doing. But not actually the System Preferences binary: there's a separate binary for each preference pane. A little bit of poking around the filesystem led me to the `/System/Library/PreferencePanes/` directory where all the builtin ones live. `Mouse.prefPane` looked exactly like what I wanted. Opening it in [Hopper](https://www.hopperapp.com/), I could search the strings for the notification name. Following the references to the string back through the CFString led me to the `-[MouseController awakeFromNib]` method.
Looking at the disassembly, we can see exactly what it's doing:
<div class="article-content-wide"><img src="/2021/scrollswitcher/awakefromnib.png" alt="Hopper showing the disassembly for -[MouseController awakeFromNib]"></div>
It's adding an observer to `NSDistributedNotificationCenter`'s `defaultCenter` for the `SwipeScrollDirectionDidChangeNotification`—the notification name I saw earlier in the Console. The other place it's referenced from (`[MTMouseScrollGesture initWithDictionary:andReadPreferences:]`) is doing the same thing: adding an observer. So, it looks like this notification isn't what triggers the actual change to the actual scroll input handling deep in the guts of the system. If that were the case, I'd expect to see the preference pane _send_ the notification, not just receive it.
But, it still may be useful. Looking at the implementation of the `_swipeScrollDirectionDidChangeNotification:` method it's setting as the callback for the action, we can see that it's probably updating the checkbox value. That `setState:` call sure seems like it's on the `NSButton` used for the natural scrolling checkbox.
<div class="article-content-wide"><img src="/2021/scrollswitcher/notificationhandler.png" alt="Hopper showing the disassembly for -[MouseController _swipeScrollDirectionDidChangeNotification:]"></div>
[`NSDistributedNotificationCenter`](https://developer.apple.com/documentation/foundation/nsdistributednotificationcenter) is described as being like the regular notification center but for inter-process communication, which sounds like what we want. It has pretty much the same API as the regular one, so we can just send that notification when we change the scroll mode.
```swift
extension Notification.Name {
static let swipeScrollDirectionDidChangeNotification = Notification.Name(rawValue: "SwipeScrollDirectionDidChangeNotification")
}
```
```swift
private func setDirection(_ new: Direction) {
let proc = Process()
// omitted
DistributedNotificationCenter.default().postNotificationName(.swipeScrollDirectionDidChangeNotification, object: nil, userInfo: nil, deliverImmediately: false)
}
```
With that in place, clicking the menu bar item both sets the default and causes the System Preferences UI to update to match. Going the other way is similarly easy, since, although I couldn't find it in `Mouse.prefPane`, something is emitting that notification when the value changes. I just call `addObserver` and register myself for the notification and update the icon when it's received.
<div>
<video controls style="max-width: 100%; margin: 0 auto; display: block;" title="Screen recording of menu bar item changing highlighted state in sync with the natural scrolling checkbox in System Preferences.">
<source src="/2021/scrollswitcher/sync.mp4" type="video/mp4">
</video>
</div>
## Back Into the Caves
That's all well and good, but clicking the menu item still doesn't actually change what happens when you move two fingers on the trackpad. It clearly works when clicking the checkbox in System Preferences, so there must be something else it's doing that we're not. Internally, this feature seems to be consistently referred to as the "swipe scroll direction" (even though it affects non-swipe scrolling), so, back in Hopper, we can search for procedures named like that. There's one that immediately looks promising `setSwipeScrollDirection`, that just delegates to an externally implemented `_setSwipeScrollDirection`.
<div class="article-content-wide"><img src="/2021/scrollswitcher/setswipescrolldirection.png" alt="Hopper showing the assembly for setSwipeScrollDirection"></div>
Looking at the references to the function, I saw it was called by the `-[MouseController scrollingBehavior:]` setter. That seems like the function that I wanted, but since it was implemented elsewhere, I had no idea what parameters it took. So, where's it implemented?
I used `otool -L` to print all the frameworks the prefpane was linked against, and then started guessing.
```sh
$ otool -L /System/Library/PreferencePanes/Mouse.prefPane/Contents/MacOS/Mouse
/System/Library/PreferencePanes/Mouse.prefPane/Contents/MacOS/Mouse:
/System/Library/Frameworks/PreferencePanes.framework/Versions/A/PreferencePanes (compatibility version 1.0.0, current version 1.0.0)
# rest omitted
```
Actually getting the framework binaries is a bit tricky, since, starting with macOS Big Sur, the binaries are only present in the `dyld` shared cache. The process is a little bit annoying, but not terribly complicated. [This article](https://lapcatsoftware.com/articles/bigsur.html) by Jeff Johnson explains how to build `dyld_shared_cache_util`, which you can use to transform the shared cache back into a directory with all the framework binaries.
```sh
$ dyld_shared_cache_util -extract ~/Desktop/Libraries/ /System/Library/dyld/dyld_shared_cache_x86_64
```
It took a couple guesses, but I found that the `_setSwipeScrollingDirection` function is defined in `PreferencePanesSupport.framework`.
<div class="article-content-wide"><img src="/2021/scrollswitcher/preferencepanessupport.png" alt="Hopper showing the disassembly for _setSwipeScrollDirection in PreferencePanesSupport"></div>
Hopper thinks it takes an int, but we can clearly see the parameter's being used as a bool. `rcx` is initialized to `kCFBooleanFalse` and set to `kCFBooleanTrue` if the parameter is true, and that's the value being passed to `CFPreferencesSetValue`. Perfect.
Now—finally—that should be everything we need.
Back in the Xcode project, I added a bridging header that defines the externally implemented function and lets me call it from Swift.
```c
#import <stdbool.h>
extern void setSwipeScrollDirection(bool direction);
```
Then, from Swift, we can simply call the function.
```swift
private func setDirection(_ new: Direction) {
let proc = Process()
// omitted
DistributedNotificationCenter.default().postNotificationName(.swipeScrollDirectionDidChangeNotification, object: nil, userInfo: nil, deliverImmediately: false)
setSwipeScrollDirection(new == .natural)
}
```
Lastly, in order to make the extern function actually go to the right place, the app needs to be linked against `/System/Library/PrivateFrameworks/PreferencePanesSupport.framework`. And with that, clicking the menu item toggles the preference and immediately updates the user input behavior.
I can't really take a screen recording of that, so you'll have to take my word that it works.
If you're interested in the complete code, it can be found [here](https://git.shadowfacts.net/shadowfacts/ScrollSwitcher). It's not currently packaged for distribution, but you can build and run it yourself. Because it needs the sandbox disabled, it won't ever been in the App Store, but at some point I might slap an app icon on it and published a notarized, built version. So, if anyone's interested, let me know.
As it currently exists, the app—which I'm calling ScrollSwitcher—covers 90% of my needs. I don't generally dock/undock more than a one or twice a day, so just being able to click a menu bar item is plenty fast. That said, I may still extend it for "fun". One obvious improvement would be automatically changing the state when an external mouse is connected/disconnected. That shouldn't be too hard, right? Right?

View File

@ -0,0 +1,127 @@
```
title = "Automatically Changing Scroll Direction Based on USB Devices"
tags = ["swift"]
date = "2021-09-19 15:17:42 -0400"
slug = "auto-switch-scroll-direction"
```
[Last time](/2021/scrollswitcher/) I wrote about programmatically toggling natural scrolling on macOS and building a menu bar app to let me do that quickly. It works very well, but there's still a little bit of friction with having to click the menu bar icon—or, more accurately, forgetting to click it and then scrolling backwards and realizing you forgot. As I mentioned at the end of my previous post, one obvious way to extend it, now that the ability to programmatically set direction is in place, would be toggling it automatically based on what's currently connected. This turned out to not be terribly complicated, but dealing with IOKit was somewhat annoying, so I thought I'd write it up.
<!-- excerpt-end -->
## Watching for USB Device Changes
Some cursory googling quickly led me to the IOKit documentation, which describes an `IODeviceManager` that sounds exactly like what I wanted. Unfortunately, the docs are rather lacking—there's not a single example of how to actually set it up (and it's not helped by the fact that it's all old CoreFoundation style, nor that the Swift overlay is very poor).
But, with a combination of the docs and the open source [ManyMouse project](https://github.com/icculus/manymouse/blob/main/macosx_hidmanager.c)[^1], I managed to get it working.
[^1]: Archived: <https://archive.is/NvmMN>
```swift
manager = IOHIDManagerCreate(kCFAllocatorDefault, 0, /* kIOHIDManagerOptionNone */)
```
First, you need to create a manager using `IOHIDManagerCreate`. It takes an allocator and the options to use. I'm using the literal 0 here because the constants for the options are not imported into Swift. You also need to retain the manager for as long as you want to observe changes, so I'm storing it here in an instance var on my app delegate.
After that, you tell the manager what sorts of devices you care about. You do this by creating a dictionary that matches against the properties of a device.
```swift
var dict = IOServiceMatching(kIOHIDDeviceKey)! as! [String: Any]
dict[kIOHIDDeviceUsagePageKey] = kHIDPage_GenericDesktop
dict[kIOHIDDeviceUsageKey] = kHIDUsage_GD_Mouse
IOHIDManagerSetDeviceMatching(manager, dict as CFDictionary)
```
The `IOServiceMatching` function takes the name of a service type, for which we pass the redundantly named HID device key. It returns a `CFDictionary`, which I convert into a Swift dictionary, so that I can set properties on it more easily than dealing with `CFDictionarySetValue` from Swift. The filter is further refined by limiting it to the USB Generic Desktop usage page[^2], and specifically the mouse usage (the USB HID spec makes no differentiation between kinds of pointing devices, they're all just mice), before setting the matching dictionary on the manager.
[^2]: USB HID usage page and usage tables can be found here: <https://www.usb.org/sites/default/files/documents/hut1_12v2.pdf>.
After that, we register callbacks for device addition and removal and add the manager to the run loop.
```swift
IOHIDManagerRegisterDeviceMatchingCallback(manager, hidDeviceAdded(context:result:sender:device:), nil)
IOHIDManagerRegisterDeviceRemovalCallback(manager, hidDeviceRemoved(context:result:sender:device:), nil)
IOHIDManagerScheduleWithRunLoop(manager, CFRunLoopGetCurrent(), CFRunLoopMode.commonModes.rawValue)
```
One important thing to note about the callbacks is that, since this is all old CoreFoundation-style code and not Objective-C, they're not blocks, they're C function pointers. Specifically, [`IOHIDDeviceCallback`](https://developer.apple.com/documentation/iokit/iohiddevicecallback)s. This means that, when providing one from Swift, the value can either be a global function or a closure that does not capture anything. In either case, this requirement is because a C function pointer needs to point to a specific point in our binary, which can't encode additional information like captures or the method receiver. To help alleviate this, the callback registration functions take a third parameter, a void pointer to some context that will be provided to the function when it's called. All of the contextual information I need in the callbacks can be stored on the app delegate, though, so I don't bother with trying to make the `void *context` shenanigans play nicely with Swift.
The callback functions receive an `IOHIDDevice`, the functions for which appear to only be documented in the headers. The first thing I do in the callbacks is get the name and usage for the device:
```swift
func hidDeviceAdded(context: UnsafeMutableRawPointer?, result: IOReturn, sender: UnsafeMutableRawPointer?, device: IOHIDDevice) {
guard let name = IOHIDDeviceGetProperty(device, kIOHIDProductKey as CFString) as? String,
let usage = IOHIDDeviceGetProperty(device, kIOHIDPrimaryUsageKey as CFString) as? UInt32 else {
fatalError()
}
}
```
The get property function returns an optional `CFTypeRef` so we have to try and cast it to the appropriate type.
The usage we need because, even though the manager's filter is set to only allow Mouse devices through, the callback is still sometimes invoked with a device of type `kHIDUsage_GD_SystemControl` for reasons that I can't discern. So, as a precaution, I silently ignore devices which don't have the right usage:
```swift
func hidDeviceAdded(context: UnsafeMutableRawPointer?, result: IOReturn, sender: UnsafeMutableRawPointer?, device: IOHIDDevice) {
// ...
guard usage == kHIDUsage_GD_Mouse else { return }
}
```
## Determining Device Types
The next task is determining whether the given device is a mouse or trackpad. Unfortunately, as I mentioned, the USB HID spec doesn't differentiate between mice and trackpads, and I couldn't find any `IOHIDDevice` properties that did either. So, we have to come up with our own heuristics based on the information we do have access. Here's where it gets really dumb:
```swift
func deviceNameIsProbablyTrackpad(_ name: String) -> Bool {
return name.lowercased().contains("trackpad")
}
```
Yep. I figure this should cover most (at least Apple ones, which do call themselves "trackpad"s). And what makes something a mouse? Well, if it's not a trackpad. Yes, it's a bit ridiculous, but it works well enough.
```swift
func hidDeviceAdded(context: UnsafeMutableRawPointer?, result: IOReturn, sender: UnsafeMutableRawPointer?, device: IOHIDDevice) {
// ...
let delegate = NSApp.delegate as! AppDelegate
if deviceNameIsProbablyTrackpad(name) {
delegate.trackpadCount += 1
} else {
delegate.mouseCount += 1
}
}
```
We track the actual counts of trackpads and mice rather than just whether one is connected or not, because in the device removed callback, it saves have to re-enumerate all connected devices in case there were multiple of one type connected and only one was removed.
The device removed callback does pretty much the same thing, just subtracting instead of adding 1 to the respective counts.
## Automatically Switching Scroll Direction
After the counts are updated, the delegate is notified that devices have changed and told to update the scroll direction. This is done by sending void to a `PasstroughSubject` on the app delegate. I use Combine rather than just calling a method on the app delegate because, when the `IOHIDManager` is initially added, it fires the added callback a whole bunch of times for every device that's already connected. Additionally, when a USB hub is added/removed we get callbacks for all of the individual devices and I don't want to flip the scroll direction a bunch of times. So, when the app delegate listens to the subject, it uses Combine's debouncing to only fire every 0.1 seconds at most.
Actually changing the scroll direction is next, which requires figuring out what direction to change it to, based on the current device counts. This has a slight complication: laptops.
On a laptop, the trackpad is always connected, so if we were to switch to natural scrolling whenever a trackpad was connected, that would be useless. Similarly, for desktops, you can imagine a case where a mouse is always connected, so just switching to normal when a mouse is present would be similarly ineffectual in some cases. The solution? Doing both, and having a preference to let the user choose.
```swift
private func updateDirectionForAutoMode() {
switch Preferences.autoMode {
case .disabled:
return
case .normalWhenMousePresent:
setDirection(mouseCount > 0 ? .normal : .natural)
case .naturalWhenTrackpadPresent:
setDirection(trackpadCount > 0 ? .natural : .normal)
}
}
```
This way, on a laptop you can set it to "normal when mouse present" and on a desktop with an always-connected mouse you can set it to "natural when trackpad present".
I've been using this updated version of ScrollSwitcher for about a week now, and it's worked flawlessly. I haven't had to open System Preferences or even click the menu bar icon. If you're interested, the full source code for the app can be found here: <https://git.shadowfacts.net/shadowfacts/ScrollSwitcher>.

View File

@ -0,0 +1,98 @@
```
title = "M1 Max MacBook Pro Review"
tags = ["computers"]
date = "2022-01-06 15:37:42 -0400"
short_desc = "Apple finally made a truly great laptop."
slug = "m1-max"
```
Heres the review, if youre not going to read any farther than the first sentence: this is a damn good computer. I've had my M1 Max MBP (32 GPU cores, 64 GB RAM) for two months now and, aside from the time spent migrating things off my previous computer, its been the only “real” computer Ive used in that time.
<!-- excerpt-end -->
Before I get into the details, some context: My previous primary computer was a 16” Intel MacBook Pro (8 cores, 32 GB of memory, and a Radeon 5500M). I got it almost immediately when it came out in the fall of 2019, coming from the original 2012 15” Retina MacBook Pro. When I first got it, it was the best (for definitions of best relating to the specifics I care about, primarily CPU performance) laptop Apple had made. Whats more it was the first laptop to get rid of the accursed butterfly keyboard. I had zero complaints about it when I first got it, and for a good while afterwards.
But then, last winter, shortly after they came out, I [used](/2021/m1/) an M1 Mac mini (8-core, 16 GB RAM) for a couple months. For all I'd fawned over the 2019 MBP, the M1 Mac mini (as well as other people's reviews of the corresponding laptops) gave me a newfound disappointment of all of my laptop's shortcomings. Well, most of the issues I had actually stem from one particular shortcoming: the Intel processor. It was by no means a slouch, it was just pitifully constrained by the thermal design of the laptop. Any sustained workload would cause it to drop down to the base clock—though not throttle below, an improvement over previous generations—and leave a good deal of performance on the table (particularly when playing games, with the GPU also dumping heat into the shared cooler). The abysmal battery life and incredible noise and heat that went along with it didn't do anything to help (a laptop whose processor routinely runs at 100°C cannot rightfully be called a *lap*top). The M1, by contrast, had none of those issues. In addition to having better single-core performance, it was almost comically more power-efficient. At the end of my M1 review, I said that I was incredibly excited to see what the future of the new Mac architecture held, and I was not disappointed.
## Hardware
First off, the most important part: SoC performance. This machine handily beats my old laptop in literally everything I do.
I could list a bunch of artificial benchmarks for you *oooh* and *ahhh* at, but that's not generally representative of what I actually use it for.
A full release build of Tusker, my iOS app for Mastodon, takes about 94 seconds on my Intel laptop. Its 44% faster on the M1 Max, taking 53 seconds. Debug builds with incremental compilation see a similar improvement. And, as was the case with the M1, where the single-core performance really shines is in reducing the feedback loop between making a code change and being able to see that reflected in the running app.
As with the Mac mini, everything just *feels* faster. No doubt some of that is a placebo, but not entirely. I can put my old and new laptops side by side and launch the same app, and the Apple Silicon one will appear on screen several seconds sooner. This snappiness extends to within apps too, especially ones using Catalyst which always felt a little bit unresponsive before.
The other incredible improvement Apple Silicon brings is the power and thermal efficiency. Its not quite as miraculous as the M1 Mini, whose fans I could never get to spin up, but its still a vast improvement. I have to be pushing both the CPU and the GPU fairly hard (e.g., by playing a graphically intensive game) in order to get the fans to be audible. Ordinary, CPU-focused workloads dont cause the fans to ramp up and barely make the laptop feel warm. I once accidentally left a script running that was consuming 100% CPU all day because the fans werent there to signal that something was amiss.
Compare that to the Intel MBP whose fans sound like theyre about to take flight if you so much as look at the machine wrong (seriously, the fans spun up to audible levels and the chassis felt burning hot while it was just in target disk mode).
This has big ramifications on the battery front. When I'd be using my Intel laptop on the go and not be expecting to charge for a while, I'd use [Turbo Boost Switcher](http://tbswitcher.rugarciap.com/) to disable Intel Turbo Boost and limit the CPU clock speed to the base level. This sligtly degrades performance, but substantially improves battery life. The Apple Silicon laptop, by contrast, achieves in normal mode the battery life that the Intel machine did with turbo disabled. If I were to put this into Low Power Mode (which, as I understand it, partly works by limiting processor speed), I don't even know how long it would last.
### Display
The display on this computer is great. Having had a high-refresh rate external monitor for several years, my expectation was that the 120Hz support would be the thing Id enjoy the most. But, that hasnt actually been the case. Sure, 120Hz is great, but over the past couple months, Ive been using my laptop undocked more frequently, and what Ive come to really appreciate is the true retina pixel density.
If you dont know, Apple laptops starting with the 2016 MacBook Pro have used non-integer scaling factors. That is, by default they ran at point resolutions which were more than half of the pixel resolution in each dimension. So, a software pixel mapped to some fraction of a hardware pixel, meaning everything had to be imprecisely scaled before actually going to the panel. People have been complaining about this for years, and Id always dismissed it because I never observed the issue. But, in hindsight, thats because the vast majority of my laptop usage was with it docked to an external monitor and peripherals. In that scenario, the laptops builtin display ends up physically far enough away from my eyes that I dont perceive any blurriness. But, since I've been using this laptop more as an actual laptop—bringing the screen a good foot or two closer to my eyes—Ive noticed that text is undeniably crisper.
<aside>
Using the screen on this laptop, particularly when using it undocked and independent of an external monitor has firmly convinced me of something I previously believed: the ideal monitor would be 5k (i.e., 2560x1440 at the Retina 2x pixel density), 27" diagonally, and 120Hz. My current external monitor is 1440p, 27", and 144Hz and having used a monitors of that size for years and years, I think it's the best combination of screen real-estate and physical size of UI elements. Using a 5k iMac screen in the office[^1] convinced me that high-DPI is very nice, even if you're just looking at text all day. And finally seeing a screen that is both high DPI and high refresh rate has validated that belief. I really hope that someone makes a monitor that manages to include both.
[^1]: haha, remember those
</aside>
All that said, 120Hz is of course also great. Scrolling and mousing around and animations all feel smoother, because they are. In particular the trackpad feels even more responsive and natural. And the mini-LED backlight makes HDR content look *great*. Seriously, if I were watching a movie or something on my computer, Id prefer to watch it on the smaller builtin display than my big external monitor just for the vastly better contrast and black levels. Some people have complained about haloing (when you can see the backlight zone illuminated because theres a small, bright object against a dark background), but Ive never noticed except when deliberately looking for it by moving the cursor around on a black screen.
The one very minor complaint I do have is that ghosting on the display seems noticeably worse than my external monitor. I dont have any way of objectively testing this, but moving the mouse cursor around seems to leave a longer trail. But again, I only actually notice that when I'm looking for it. In normal usage, and even in playing games, it isn't apparent.
And, lastly about the display, lets talk about the notch, since everyone needs to have an opinion on it. The short version is I dont give a crap about it. The longer version is I really do not give a crap about it. Since the first week I had the computer, this is the only time Ive given it any thought. It sits in the middle of the menu bar where nothings visible anyway (helped by having the 16” rather than the 14”, where big menus or many menubar items are more likely to overflow their respective halves), so its never once caused a problem for me.
### Hardware Miscellany
MagSafe is wonderful, Im very happy its back. I get a little spark of joy when I walk up to my laptop and I see the little green dot on the connector. I pulled out my old laptop the other day to begin the process of erasing it before it can be sold, and because it had been sitting for so long, the battery was completely dead. I plugged in a USB-C charger and experienced a mild flash of annoyance that I had know way of knowing whether it was delivering power, other than to wait several minutes for the machine to boot up.
While I didnt hate the Touch Bar as much as some people, I never found it to be better than plain old function keys. Nonetheless, Im perfectly happy that its gone. My stupid minor gripe about the Touch Bar was that, when Im using my computer docked with an external monitor and keyboard, the Touch Bar would remain on and active. That doesnt sound so bad, but it becomes an annoyance as I interact with apps and see the software buttons on the Touch Bar changing and flashing in the corner of my eye. The removal of the Touch Bar has dealt with that annoyance and has made absolutely no difference to my productivity when using the laptop on its own, so Im happy.
The hardware changes with this machine can be divided into two categories: Apple Silicon-related and not. The non-Apple Silicon changes by themselves are fairly small, but they represent a marked quality-of-life improvement when just using the computer.
## Software
With the M1 Mac mini, I had both ARM-native Homebrew and Intel-under-Rosetta Homebrew installed, in case I needed to install tools from brew that only ran under Rosetta. This time, thats been entirely unnecessary[^2]. The one Rosetta package I installed last time, iPerf, now runs natively. In general, I've had to use Rosetta for far fewer things than I expected (with the notable exception of games), and even when I have, it's been impressively stable and performant.
[^2]: Ive also largely switched from Homebrew to MacPorts, but thats a blog post for another time.
### Games
Video games are where the Apple Silicon software story gets complicated. Gaming on the Mac has always been a tenuous proposition and ARM has added a whole set of fun, new complications.
Heres my rule of thumb for guessing how well a game will run on macOS: If its compiled for ARM or it uses Metal directly (not through a translation layer like OpenGL[^3] or MoltenVK), itll probably run great. If not, all bets are off. If a games either built for ARM or uses Metal, chances are someone has put at least some effort into getting it to work on Macs, so letting it have the most powerful CPU and GPU thats ever been in a Mac laptop (which come close to being the most powerful in a Mac, period) gives it a really good chance of running well.
[^3]: No, OpenGL itself is not a translation layer. But OpenGL on Apple Silicon works by translating everything to Metal.
Rise of the Tomb Raider and Shadow of the Tomb Raider both run under Rosetta and use Metal and they run shockingly well. Each can manage a fairly consistent 60 FPS at 1440p on high graphical settings. The HDR support in Shadow of the Tomb Raider even worked with the built-in display. It wasnt quite the difference in visual fidelity that I expected, but I was pleasantly surprised it worked at all.
Minecraft runs well, once youve got a JVM installed thats built for ARM and the LWJGL natives swapped out with ones compiled for ARM, since the set Minecraft ships isnt. ([These](https://gist.github.com/nikhiljha/7313ac5553aafb1c8596b1fca0f4cdff) instructions explain how to replace the native libs when using MultiMC[^4]. Though I use the Temurin JDK, not whatever's in Homebrew.)
[^4]: Those only apply for Minecraft versions recent enough to use LWJGL 3. Getting earlier versions running natively is possible, but a fair bit more involved. Perhaps a subject for [another time](/2022/lwjgl-arm64/).
Cities: Skylines doesnt see much of a graphical difference, but does benefit from the faster CPU. My old laptop cant simulate the most recent city I built at 3x speed without things like vehicle movement appearing noticeably jerky whereas the Apple Silicon one can handle it. That said, I havent spent enough time playing C:S on it to know if the ceiling (that is to say, how much farther I could grow my city before I encountered performance issues) is substantially higher.
I haven't tried many others, but I'm fairly confident that anything that previously ran on macOS and isn't too graphically demanding will run well. That includes games like Hades and Into the Breach.
### Software Miscellany
Beyond games, 1Password 6 remains the only app I regularly use that needs Rosetta and it continues to work flawlessly.
Being able to run iOS apps natively is awesome, even if I don't use it terribly often. The iOS app I use most on the Mac is Overcast, my preferred podcast player. It's quite nice to be able to get notifications when new episodes are available right on my computer, rather than having to check my phone, as well as being able to listen without using the web interface.
Electron apps that are compiled for ARM, such as Spotify, are more responsive too. But it's mostly a reflection of the performance/efficiency of Apple Silicon that it's able to compensate for the bloat that is Electron. However, when it comes to Electron/browser-based apps that aren't compiled for ARM (*cough* Steam *cough*), things aren't as good. Steam does run and functions properly, but interacting with anything it uses embedded Chromium for (most of the application) is painfully slow and unresponsive.
One of my few complaints about the M1 Mac mini was resolved with the release of macOS Monterey: Apple Silicon Macs can now use DDC commands to control external displays. Not being able to control the of my external monitors was never a huge issue, but it was a persistent inconvenience that Im glad has been resolved.
## Conclusion
Overall, this is a fantastic computer. Apple Silicon means it's vastly faster and more efficient than any previous Mac laptop. As with last year, I'm impresed how much software is already native—just a year and a half into the Mac's ARM transition—and how well Rosetta 2 works for software that isn't. Beyond Apple Silicon, this laptop is an upgrade in every single way over the few preceding generations which felt like a big regression. Two laptops ago, I was using the 7.5 year old 2012 Retina MacBook Pro: the first laptop of a new generation of MacBooks. I'm hopeful that with all these long-standing issues resolved, this machine will last a similarly long time.

View File

@ -0,0 +1,102 @@
```
title = "Fixing Scroll Indicators in Non-Opaque WKWebViews"
tags = ["swift"]
date = "2022-01-14 19:08:42 -0400"
short_desc = "3 out of 5 stars, would swizzle again"
slug = "wkwebview-scroll-indicators"
```
**Update: Since this post was published, the situation has changed and the workaround presented here is no longer valid. See the [follow-up](/2022/wkwebview-scroll-indicators-again/).**
Here's a stupid bug I ran into recently: if you've got a WKWebView in an iOS app and it shows something that's not a normal webpage[^1], the scroll indicator appearance switching doesn't work. What I mean by that is, while the indicators appear correctly (with a dark color) when the system is in light mode, they do not take on a light color when dark mode is enabled. This renders the scroll indicators invisible against dark backgrounds, which can be annoying if you're using the web view to display potentially lengthy content.
[^1]: I say this because the only way I've tested it is by generating some HTML and giving it to `loadHTMLString(_:baseURL:)`. It's entirely possible this is not a factor.
<!-- excerpt-end -->
Let's say you've got a web view with some content that you want to match the system color scheme. The simplest way to do that is to set the web view's background color to `.systemBackground` and add the following to the page's stylesheet (to make the default text color change with the theme):
```css
:root {
color-scheme: light dark;
}
```
If you haven't changed the page's `background-color` in CSS, this will correctly make both the background and text colors follow the system theme. Unfortunately, it has no effect on the scroll indicator. Fixing that, it turns out, is rather involved.
The obvious thing to do would be to just set the `indicatorStyle` to `.default` on the web view's internal scroll view, which should theoretically make the indicators automatically adjust to the color of the content inside the scroll view. Try this, however, and you'll find that it does not work.
Regardless of where you set the property (in `viewDidLoad`, `viewWillAppear`, etc.) it appears to never be respected. Launching the app and checking `webView.scrollView.indicatorStyle` when paused in the view debugger reveals why: it's been changed to `.black`:
```txt
(lldb) po [(UIScrollView*)0x7fcc9e817000 indicatorStyle]
UIScrollViewIndicatorStyleBlack
```
Because WebKit—including, surprisingly, some the iOS platform-specific parts of it—is open source, we can go poking through it and try to figure out why this is happening. Looking for files with names like `WKWebView` in the WebKit source code reveals the promising-sounding [`WKWebViewIOS.mm`](https://github.com/WebKit/WebKit/blob/c2eb26fec22295b1bbd1d790f8492072b2c05447/Source/WebKit/UIProcess/API/ios/WKWebViewIOS.mm#L540-L556). Searching in that file for `indicatorStyle` reveals this lovely method:
```objective-c++
- (void)_updateScrollViewBackground
{
auto newScrollViewBackgroundColor = scrollViewBackgroundColor(self, AllowPageBackgroundColorOverride::Yes);
if (_scrollViewBackgroundColor != newScrollViewBackgroundColor) {
_scrollViewBackgroundColor = newScrollViewBackgroundColor;
auto uiBackgroundColor = adoptNS([[UIColor alloc] initWithCGColor:cachedCGColor(newScrollViewBackgroundColor)]);
[_scrollView setBackgroundColor:uiBackgroundColor.get()];
}
// Update the indicator style based on the lightness/darkness of the background color.
auto newPageBackgroundColor = scrollViewBackgroundColor(self, AllowPageBackgroundColorOverride::No);
if (newPageBackgroundColor.lightness() <= .5f && newPageBackgroundColor.isVisible())
[_scrollView setIndicatorStyle:UIScrollViewIndicatorStyleWhite];
else
[_scrollView setIndicatorStyle:UIScrollViewIndicatorStyleBlack];
}
```
"Update the indicator style based on the lightness/darkness of the background color." Ah. That explains it. It's examining the background color and using its lightness to explicitly set the scroll view's indicator style to either white or black.
And why's it overriding the `.default` that we set? Well, that method is called from (among other places) `_didCommitLayerTree`, which, as I understand it, is called whenever the remote WebKit process sends the in-process web view a new layer tree to display—so basically continuously.
Knowing that WebKit is using the page's background color to set the indicator style, the answer seems simple, right? Just set the page's background color in CSS to match the current color scheme. Wrong: setting `background-color: black;` on the `body` does not alter the scroll indicator color.
And why is that? It's because `scrollViewBackgroundColor` ignores all other factors and returns a transparent black color if the web view is non-opaque. And since the `isVisible` method returns false for fully transparent colors, the scroll indicator style is being set to black.
Now, this is where the [minor iOS crimes](https://social.shadowfacts.net/notice/ABya4BV7zlJZrNKMkK) come in. If you can't make the framework do what you want, change (read: swizzle) the framework.
So, in the `didFinishLaunching` app delegate callback, I swizzle the `_updateScrollViewBackground` method of `WKWebView`. My swizzled version calls the original implementation and then sets the scroll indicator mode back to `.default`, superseding whatever WebKit changed it to. And this finally makes the scroll indicator visible in dark mode.
```swift
private func swizzleWKWebView() {
let selector = Selector(("_updateScrollViewBackground"))
var originalIMP: IMP?
let imp = imp_implementationWithBlock({ (self: WKWebView) in
if let originalIMP = originalIMP {
let original = unsafeBitCast(originalIMP, to: (@convention(c) (WKWebView, Selector) -> Void).self)
original(self, selector)
}
self.scrollView.indicatorStyle = .default
} as (@convention(block) (WKWebView) -> Void))
originalIMP = class_replaceMethod(WKWebView.self, selector, imp, "v@:")
if originalIMP == nil {
os_log(.error, "Missing originalIMP for -[WKWebView _updateScrollViewBackground], did WebKit change?")
}
}
```
A couple things to note about this code:
`originalIMP`, is optional because `class_replaceMethod` returns nil if the method does not already exist, though it still adds our new implementation. If this happens, I log a message because it probably means that WebKit has changed and hopefully this hack is no longer necessary (or that it may need updating).
The `unsafeBitCast` is necessary because an [`IMP`](https://developer.apple.com/documentation/objectivec/objective-c_runtime/imp) is in fact a C function pointer but it's imported into Swift as an OpaquePointer.
The Swift closure is cast to an Objective-C block because, although `imp_implementationWithBlock` is imported into Swift as taking a parameter of type `Any`, what it really needs, as the name implies, is a block.
The "v@:" string is the Objective-C [type encoding](https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtTypeEncodings.html#//apple_ref/doc/uid/TP40008048-CH100) of the method's signature (it returns void, takes the self instance, and the selector for the method being called).
And with that annoying workaround, the scroll indicator is finally visible in dark mode (and updates correctly when switching color schemes). Given that `WKWebView` gives some consideration to the system color scheme, I'm inclined to think this is a bug, so hopefully it'll be fixed eventually. Alternatively, as I noted, that part of WebKit is in fact open source, so an intrepid developer could fix it themself...

View File

@ -0,0 +1,151 @@
```
title = "Run LWJGL 2 Natively on Apple Silicon"
tags = ["minecraft"]
date = "2022-01-17 10:28:42 -0400"
short_desc = "Yes, I wanted to go back and play Minecraft 1.7.10. Don't judge."
slug = "lwjgl-arm64"
```
Running Minecraft 1.13 and later natively on Apple Silicon Macs isn't terribly complicated. Since those versions use LWJGL 3 which includes arm64 macOS as a supported platform, you can just [download](https://www.lwjgl.org/customize) the appropriate version, replace the LWJGL version info in MultiMC, and you're off to the races. The end of maintenance for LWJGL 2, however, long predates the ARM transition and so getting Minecraft versions prior to 1.13 up and running requries a bit more work.
<!-- excerpt-end -->
First off: This article assumes you're using MultiMC and already have the ARM [Zulu 8 JDK](https://www.azul.com/downloads/?version=java-8-lts&os=macos&architecture=arm-64-bit&package=jdk) installed[^1].
[^1]: If you want to play modded, a Java 8 runtime is necessary even if you've already got a newer JRE installed since older versions of FML are not compatible with the Project Jigsaw changes introduced in Java 9, even though Minecraft itself is.
There are guides on the internet that tell you to download some precompiled libraries and run Minecraft through a wrapper script, but doing that doesn't sit right with me. Didn't your mother ever tell you not to download executables from strangers on the internet[^2]? So, I wanted to actually compile LWJGL 2 and its natives from source myself.
[^2]: Yes, playing with mods is doing just that. However, I feel there's a big difference between downloading mods that are part of widely played modpacks and downloading native binaries from completely random people.
You can find the source code for my modified version of LWJGL 2 [here](https://github.com/shadowfacts/lwjgl2-arm64) (and you can [compare](https://github.com/LWJGL/lwjgl/compare/master...shadowfacts:master) against upstream to see I haven't made any malicious changes).
If you're interested, here's a brief overview of the changes that were necessary. If not, [skip ahead](#building-everything).
First, there are a bunch of changes to the Ant `build.xml`. `javah` was replaced with `javac -h`, source/target versions less than 1.7 aren't supported when compiling with a Java 17 JDK. In the `build.xml` for the native library, the big changes are the macOS SDK being in a different place and that we're compiling against the Zulu JVM, not the Apple one.
In `MacOSXSysImplementation.java`, a since-removed class was being used to ask the OS to open URLs. That's been replaced with a JNI function (see `org_lwjgl_MacOSXSysImplementation.m`).
In `MemoryUtilSun.java`, a accessor implementation that was using a removed Sun-internal class was removed (it was only used as a fallback, so it wasn't replaced with anything).
Some applet code that was using a removed Java archive format was commented out (irrelevant because the applet build is disabled altogether).
Lastly in Java-land, there were a bunch of casts from `java.nio.ByteBuffer` to `Buffer` added in places where methods ByteBuffer inherits from Buffer (e.g., `flip`) are being called. This is because, in between Java 8 and 17, ByteBuffer itself got overriden implementations and so trying to use a LWJGL jar built against Java 17 on Java 8 would result in a `NoSuchMethodError`.
In the native code, there are a few more changes.
First, an `JAWT_MacOSXDrawingSurfaceInfo` struct is defined. It's used for the NSView-backed drawing mode that Minecraft uses on Mac. This was previously defined in the JDK headers, however it's no longer present. An [old version](https://github.com/phracker/MacOSX-SDKs/blob/master/MacOSX10.7.sdk/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers/jawt_md.h#L42) of the Apple JVM headers say that it is being removed, however it is clearly still implemented internally (fortuntely for us).
The other significant change is a bunch of AppKit calls being wrapped in `dispatch_sync` blocks since they were previously being called from background threads, which AppKit Does Not Like. This solution is rather less than ideal. I suspect `dispatch_sync` is part of the [significant performance delta](https://social.shadowfacts.net/notice/AF0rlm8MrA8fSo1Qa8) between LWJGL3 versions and earlier ones, because it blocks the render thread until the work on the main thread completes. I tried using the `-XstartOnFirstThread` JVM argument so that the Minecraft client thread would be the same as the AppKit main thread, however that only caused more problems.
Finally, high DPI mode is disabled altogether. I spent some time trying to get it working, but whenever I'd launch the game it would only render at 50% scale. And frankly, I don't care enough about high DPI mode to spend even more time debugging it.
To get Minecraft up and running, there's actually a second library that we also need to compile for arm64: jinput. My fork's source code can be found [here](https://github.com/shadowfacts/jinput-arm64) ([compare against upstream](https://github.com/jinput/jinput/compare/master...shadowfacts:master)).
The changes for this one are much simpler, thankfully. A bunch of stuff that we don't need is disabled in the various maven files.
Then, one (1) unused import for a no-longer-extant Java stdlib class was removed.
Lastly, the `build.xml` for the macOS platform specific module was changed similar to the LWJGL one to make it use the macOS SDK in the correct location and link agains the Zulu 8 JDK rather than the system JavaVM framework.
## Building Everything
You'll need [Ant](https://ant.apache.org/) and [Maven](https://maven.apache.org/) installed if you don't have them already. You also need to have a current version of Xcode downloaded.
### LWJGL
1. Clone the repo: `git clone https://github.com/shadowfacts/lwjgl-arm64.git`
2. Run `ant generate-all` to generate some required Java files from templates.
3. Run `ant jars` to build the LWJGL jar
4. Run `ant compile_native` to build the native libraries
5. In the `libs/macosx/` folder inside your LWJGL clone, select `liblwjgl.dylib` and `openal.dylib` in Finder and right-click and select Compress. Then rename the created `Archive.zip` to `lwjgl-platform-natives-osx.jar`
### jinput
1. Clone the repo: `git clone https://github.com/shadowfacts/jinput-arm64.git`
2. Run `mvn package` to build everything
## Setup MultiMC
In MultiMC, create an instance of whichever pre-LWJGL3 version you want and make sure it's configured to use the ARM Java 8 you installed. Then, in the Version section of the Edit Instance window, click the Open Libraries button in the sidebar. To the folder that opens, copy the `lwjgl-platform-natives-osx.jar` you created earlier. Then, copy `osx-plugin-2.0.10-SNAPSHOT-natives-osx.jar` from inside the `plugins/OSX/target` folder of the jinput directory as well.
Next, in the Version section of the Edit Instance window, select LWJGL 2 and click the Customize and then Edit buttons in the sidebar.
The file that opens needs to be modified to point to local versions of the libraries we compiled. Replace its contents with the JSON below.
<details>
<summary>Click me to expand for full LWJGL 2 version JSON.</summary>
```json
{
"formatVersion": 1,
"libraries": [
{
"name": "net.java.jinput:jinput-platform:2.0.10-SNAPSHOT",
"natives": {
"linux": "natives-linux",
"osx": "natives-osx",
"windows": "natives-windows"
},
"MMC-hint": "local",
"MMC-filename": "osx-plugin-2.0.10-SNAPSHOT-natives-osx.jar"
},
{
"downloads": {
"artifact": {
"sha1": "39c7796b469a600f72380316f6b1f11db6c2c7c4",
"size": 208338,
"url": "https://libraries.minecraft.net/net/java/jinput/jinput/2.0.5/jinput-2.0.5.jar"
}
},
"name": "net.java.jinput:jinput:2.0.5"
},
{
"downloads": {
"artifact": {
"sha1": "e12fe1fda814bd348c1579329c86943d2cd3c6a6",
"size": 7508,
"url": "https://libraries.minecraft.net/net/java/jutils/jutils/1.0.0/jutils-1.0.0.jar"
}
},
"name": "net.java.jutils:jutils:1.0.0"
},
{
"name": "org.lwjgl.lwjgl:lwjgl-platform:2.9.4-nightly-20150209",
"natives": {
"linux": "natives-linux",
"osx": "natives-osx",
"windows": "natives-windows"
},
"MMC-hint": "local",
"MMC-filename": "lwjgl-platform-natives-osx.jar"
},
{
"name": "org.lwjgl.lwjgl:lwjgl:2.9.4-nightly-20150209",
"MMC-hint": "local",
"MMC-filename": "lwjgl.jar"
},
{
"downloads": {
"artifact": {
"sha1": "d51a7c040a721d13efdfbd34f8b257b2df882ad0",
"size": 173887,
"url": "https://libraries.minecraft.net/org/lwjgl/lwjgl/lwjgl_util/2.9.4-nightly-20150209/lwjgl_util-2.9.4-nightly-20150209.jar"
}
},
"name": "org.lwjgl.lwjgl:lwjgl_util:2.9.4-nightly-20150209"
}
],
"name": "LWJGL 2",
"releaseTime": "2017-04-05T13:58:01+00:00",
"type": "release",
"uid": "org.lwjgl",
"version": "2.9.4-nightly-20150209",
"volatile": true
}
```
</details>
From there, you should be able to launch Minecraft natively on ARM:
<img src="/2022/lwjgl-arm64/minecraft.png" alt="Minecraft 1.7.10 running natively on ARM">

View File

@ -0,0 +1,140 @@
```
title = "Using lol-html (or any Rust crate) in Swift"
tags = ["swift", "rust"]
date = "2022-01-20 12:44:42 -0400"
short_desc = "Works on real devices, the simulator, and Mac Catalyst."
slug = "swift-rust"
```
I recently started building a new iOS app and found myself with a need to parse HTML in order to extract some information. My goto tool for this in the past has been [SwiftSoup](https://github.com/scinfu/SwiftSoup). In this app, I have to deal with larger documents than I'd used it for previously, and unfortunately, its performance leavse something to be desired. Much of the issue comes from the fact that I only want to extract the first paragraph of a document, but SwiftSoup always needs to parse the entire thing—for large documents, potentially a lot of unnecessary work[^1]. And, as far as I could find, there are no streaming HTML parsers written in Swift. One I did find, however, was CloudFlare's [lol-html](https://github.com/cloudflare/lol-html). It's specifically designed for speed and low latency, exactly what I want. But it's written in Rust.
[^1]: I [benchmarked](https://git.shadowfacts.net/shadowfacts/frenzy-ios/src/commit/e242510c5e601ec309acc4ab5a53972f4a2878cd/ReaderTests/ReaderTests.swift#L27-L53) it, and for an average-length document, using lol-html to extract the first paragraph winds up being two orders of magnitude faster than SwiftSoup. And that dramatic difference only increases for longer documents.
<!-- excerpt-end -->
Getting a Rust library compiled into a form that it could be used from Swift didn't turn out to be as complicated as I expected, but both Apple Silicon and Mac Catalyst introduced ~~fun~~ wrinkles.
This [blog post](https://mozilla.github.io/firefox-browser-architecture/experiments/2017-09-06-rust-on-ios.html) from Mozilla was helpful in getting started, but things have changed somewhat in the years since it was written.
The first thing you need to do is install the appropriate targets for the Rust toolchain to let it build targeting iOS devices.
`aarch64-apple-ios` works for all actual devices. Additionally, to build for the iOS Simulator, you also need the `aarch64-apple-ios-sim` target (if you're on Apple Silicon) or `x86_64-apple-ios` (for Intel Macs).
```sh
$ rustup target add aarch64-apple-ios aarch64-apple-ios-sim x86_64-apple-ios
```
To build a Rust project, you need a library crate with the `crate-type` set to `staticlib` so that it can be statically linked by the iOS app. The Rust library also needs an API that's callable from C, i.e. using `#[no_mangle]` and `extern "C"` (outside the scope of this post). Fortunately for me, lol-html already includes such an API.
Building for iOS is done by running `cargo build` with the appropriate `--target` option. For example:
```sh
$ cargo build --release --target aarch64-apple-ios-sim
```
With the Rust library built, the next step is configuring Xcode to use it. In the app target's build settings, the Header Search Paths needs to be updated to include the path to the C headers that correspond to the C-API that the Rust library exposes. In my case, that's `lol-html/c-api/include/`.
That'll get it working if you want to call it from C or Objective-C code in your project. To make it accessible from Swift, you need to add a bridging header that imports whatever library headers you need. For lol-html, there's only `lol_html.h`. This will make Rust library's functions directly accessible from all of the app target's Swift files.
This is enough to compile it successfully, but to actually link the output into a runnable app, the we need to tell the linker where to find the library.
With a normal library, you could add the static library .a to the Xcode target's "Frameworks, Libraries, and Embedded Content". But, because Cargo puts the build products in separate directories depending on which platform it targets (e.g., `target/aarch64-apple-ios/release/liblolhtml.a`), we need to do it slightly differently. Just adding one of the liblolhtml.a's to the Xcode target would make the linker try to always link against that specific one regardless of which platform the iOS app is building for. Instead, I modified the "Other Linker Flags" build setting to include `-llolhtml` and then update the "Library Search Paths" settings on a per-platform basis to tell it 1) that it needs to link against something called `liblolhtml.a` and 2) where exactly that file can be found.
Configuring the Library Search Paths build setting is kind of annoying, because the Xcode UI doesn't fully match what the `.pbxproj` file can actually describe. Clicking the plus button next to a build setting in Xcode lets you pick for which SDKs the setting value applies. But we also need to narrow that down to specific architectures, because the Intel and Apple Silicon simulator builds need different versions of the library.
The easiest way I've found to do this is to go into the Build Settings tab of the Xcode target, find Library Search Paths, expand it, and click the little plus button next to each of Debug and Release. (If you click on the "Any Architecture | Any SDK" dropdown, you'll see what I mean about not being able to actually specify the architecture from the UI.)
<img src="<%= metadata.permalink %>/search-paths-empty.png" alt="The Library Search Paths setting in Xcode showing empty values under Debug and Release">
Then, open the `project.pbxproj` file in a text editor. I recommend closing the Xcode proejct before making any changes to this file. Search for the newly added line starting with `"LIBRARY_SEARCH_PATHS[arch=*]"` and replace it with the following. There will be two occurrences of that line (for the debug and releaes configurations) and both need to be replaced.
```txt
"LIBRARY_SEARCH_PATHS[sdk=iphoneos*]" = "$(PROJECT_DIR)/lol-html/c-api/target/aarch64-apple-ios/release/";
"LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*][arch=arm64]" = "$(PROJECT_DIR)/lol-html/c-api/target/aarch64-apple-ios-sim/release/";
"LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*][arch=x86_64]" = "$(PROJECT_DIR)/lol-html/c-api/target/x86_64-apple-ios/release/";
```
You'll need to substitute the `lol-html/c-api` part for the actual path to the library you're using. This will tell Xcode to to use the `aarch64-apple-ios` version for all actual iOS device targets, and the appropriate simulator version depending on the architecture.
After that, you should be able to re-open the project in Xcode and see all the configurations you added in Build Settings.
<img src="<%= metadata.permalink %>/search-paths-ios.png" alt="The Library Search Paths setting in Xcode showing values for any iOS SDK, and for arm64 and x86_64 variants of the simulator SDK">
With that, you should be able to use the Rust library from your Swift code and successfully build and run your app in both the simulator and on a real device.
## Mac Catalyst
My first attempt at getting Catalyst builds to work was just by using the normal Mac targets for the Rust library (e.g., `aarch64-apple-darwin`). But that results in a link error when Xcode builds the app, because it considers binaries built for Catalyst to be distinct targets from regular macOS.
The separate Rust targets for Catalyst are `aarch64-apple-ios-macabi` and `x86_64-apple-ios-macabi` for ARM and Intel respectively. As of writing, these are [tier 3 targets](https://doc.rust-lang.org/nightly/rustc/platform-support.html#tier-3), which means the Rust project doesn't provide official builds. This, in turn, means to use them you have to build the standard library from source yourself.
Doing so requires a Rust Nightly feature, [build-std](https://doc.rust-lang.org/cargo/reference/unstable.html#build-std), to let Cargo include the standard library in the crate graph for compilation. So, with Nightly installed (`rustup toolchain install nightly`) and the std source downloaded (`rustup component add rust-src --toolchain-nightly`), you can run the following command to build for a specific target with the standard library built from source:
```sh
$ cargo +nightly build -Z build-std=std,panic_abort --release --target aarch64-apple-ios-macabi
```
This separate set of platform/arch combinations requires another set of additions to the Xcode project file, in the sample place as before:
```txt
"LIBRARY_SEARCH_PATHS[sdk=macosx*][arch=arm64]" = "$(PROJECT_DIR)/lol-html/c-api/target/aarch64-apple-ios-macabi/release";
"LIBRARY_SEARCH_PATHS[sdk=macosx*][arch=x86_64]" = "$(PROJECT_DIR)/lol-html/c-api/target/x86_64-apple-ios-macabi/release";
```
With that added, the build setting should have values configured for iOS, ARM and Intel Simulators, and ARM and Intel Catalyst:
<img src="<%= metadata.permalink %>/search-paths-catalyst.png" alt="The Library Search Paths setting in Xcode showing values for all platform and architecture combinations">
I initially thought handling universal builds (i.e., combining arm64 and x86_64 into one binary) of the Catalyst app would be complicated, like I would have to lipo them together myself, but it turned out to be entirely painless. Just having the built static libraries for both architectures present in their expected locations is enough. Xcode's build process takes care of linking each architecture of the app with the respective version of the Rust library and then combining those into one universal package.
## Build Script
Keeping track of all of those Rust build targets and making sure to rebuild the right ones if anything changes is rather annoying, so I wrote a little script for Xcode to run to take care of it.
It uses the environment variables provided by Xcode to figure out which platform and architecture(s) are being targeted and build the appropriate Rust targets.
```bash
pushd "$PROJECT_DIR/lol-html/c-api/"
build() {
echo "Building lol-html for target: $1"
~/.cargo/bin/cargo build --release --target $1
}
build_std() {
echo "Building lol-html with std for target: $1"
~/.cargo/bin/cargo +nightly build -Z build-std=panic_abort,std --release --target $1
}
if [ "$PLATFORM_NAME" == "iphonesimulator" ]; then
if [ "$ARCHS" == "arm64" ]; then
build "aarch64-apple-ios-sim"
elif [ "$ARCHS" == "x86_64" ]; then
build "x86_64-apple-ios"
else
echo "error: unknown value for \$ARCHS"
exit 1
fi
elif [ "$PLATFORM_NAME" == "iphoneos" ]; then
build "aarch64-apple-ios"
elif [ "$PLATFORM_NAME" == "macosx" ]; then
if grep -q "arm64" <<< "$ARCHS"; then
build_std "aarch64-apple-ios-macabi"
fi
if grep -q "x86_64" <<< "$ARCHS"; then
build_std "x86_64-apple-ios-macabi"
fi
else
echo "error: unknown value for \$PLATFORM_NAME"
exit 1
fi
```
One thing to note is that when building the universal Mac target, `$ARCHS` has the value `arm64 x86_64`. So I check whether the string contains the target architecture, rather than strictly equals, and don't use elif in the Mac branch so that both architectures are built.
I have it configured to not bother with any of the dependency analysis stuff, because Cargo takes care of only actually rebuilding if something's changed and when nothing has, the time it takes is negligible so running on every incremental build is fine.
With the script added to Build Phases in Xcode (for some reason it needs to come not just before Link Binary with Libraries but also before Compile Sources), I can run `cargo clean` in the Rust project directory and then seamlessly build and run from Xcode.

View File

@ -0,0 +1,42 @@
```
title = "Re-Fixing WKWebView Scroll Indicators"
tags = ["swift"]
date = "2022-01-30 21:23:42 -0400"
short_desc = "Swizzling wasn't worth it, I should have just waited a few more weeks."
slug = "wkwebview-scroll-indicators-again"
```
As my luck would have it, just a few weeks after I published my [last post](/2022/wkwebview-scroll-indicators/) on this topic, the iOS 15.4 beta came out which broke that hack and once again made my scroll indicators invisible in dark mode.
<!-- excerpt-end -->
Some time ago, a bug was filed against WebKit because setting `scrollIndicatorStyle` on a web view's scroll view was broken on iOS 15. The fix for this bug landed in iOS 15.4 and it subtly changed the behavior of WKScrollView when it comes to the indicator style.
The bug was fixed by tracking whether the web view client has overriden the scroll indicator style and, if so, blocking the web view from resetting it internally. Unfortunately, it [does this](https://github.com/WebKit/WebKit/blob/1dbd34cf01d8b5aedcb8820b13cb6553ed60e8ed/Source/WebKit/UIProcess/ios/WKScrollView.mm#L247) by checking if the new indicator style is not `.default`. So, even if you set it to `.default` to make it automatically switch based on system appearance, the scroll view will interpret that to mean the indicator style hasn't been overriden and continue erroneously setting it based on background color (or, in my case, the non-opaqueness of the web view).
The solution is simple, if annoying. You need to check the current user interface style and select the appropriate scroll indicator style yourself.
```swift
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
updateScrollIndicatorStyle()
}
override func traitCollectionDidChange(_ previousTraitCollection: UITraitCollection?) {
super.traitCollectionDidChange(previousTraitCollection)
updateScrollIndicatorStyle()
}
private func updateScrollIndicatorStyle() {
guard #available(iOS 15.4, *) else {
// different workaround pre-iOS 15.4
return
}
if traitCollection.userInterfaceStyle == .dark {
webView.scrollView.indicatorStyle = .white
} else {
webView.scrollView.indicatorStyle = .black
}
}
```
And, if you, like me were previously using the old, swizzling workaround, you need to disable in on iOS 15.4. If the old workaround remains active, studiously setting the indicator style to `.default` whenever WebKit would override it, it would merely undo all of our hard work.

View File

@ -0,0 +1,70 @@
```
title = "Swift Packages and Frameworks"
tags = ["swift"]
date = "2022-02-23 21:23:42 -0400"
slug = "swift-package-framework"
```
Tusker is divided up into two main parts: the app target itself and a separate framework which encapsulates everything that deals with the Mastodon API. I recently added a Swift Package to the app for uninteresting reasons. But, because the package is used both by the framework as well as the app itself, this caused a surprising number of problems.
<!-- excerpt-end -->
Adding the package to the app went perfectly well. I added it in Xcode, set the framework and app to depend on it, and then got to building. Everything worked and ran perfectly normally. But, when the time came to publish a new build to TestFlight, the issues started appearing.
Upon uploading, App Store Connect returned an error telling me that the framework for the Swift Package I'd added wasn't code signed. This was surprising for a couple reasons: first, Swift Packages are generally statically linked (meaning they're compiled directly into the binary that uses them) rather than shipping as separate, dynamically-linked frameworks. Second, my Xcode project is setup to automatically handle code signing. Why would it be skipping the framework?
The answer is that it wasn't. The framework for the framework for the package was getting signed perfectly fine. Just not the right one.
It seems having multiple targets that depend on a Swift package causes Xcode to dynamically link it. As with other frameworks, the framework for the package gets built and embedded in the `Frameworks/` folder of the app that depends on it.
But the app isn't the only thing that depends on it. The package framework was also getting embedded inside _my_ framework before it was in turn being embedded in the app.
```txt
Tusker.app
└── Frameworks
├── Pachyderm.framework
│ └── Frameworks
│ └── WebURL.framework
└── WebURL.framework
```
Xcode was properly signing the app's frameworks, it was not signing the nested frameworks. Hence the App Store Connect error.
"No problem," I naively thought, "I'll just add a Run Script build phase to codesign the nested one myself." Yes problem. Turns out App Store Connect entirely rejects nested frameworks, even if they're correctly signed.
So, I changed the script to entirely delete the nested Frameworks directory (this is fine at runtime because the runpath search paths includes the top-level Frameworks dir), which finally convinced App Store Connect to accept my build.
```sh
if [ "$(ls "$BUILT_PRODUCTS_DIR/Tusker.app/Frameworks/Pachyderm.framework/Frameworks/")" -ne "WebURL.framework" ]; then
echo "error: unexpected framework inside Pachyderm, make sure it's embedded directly in the app"
exit 1
fi
rm -rf "$BUILT_PRODUCTS_DIR/Tusker.app/Frameworks/Pachyderm.framework/Frameworks/"
```
You might think that's where this story ends, but, sadly, it's not. I noticed when downloading the new TestFlight build that the app was up to 25 megabytes in size. That might not sound like much, but the previous version was around 5MB and I hadn't added anything that should have caused a quintupling in size.
Looking at the archive and comparing it to a previous one, it was clear that almost all of the increase was coming from the package framework.
I'd previously tried out this Swift package in a small test app, so I went back to compare its size to the new version of Tusker. The test project was nowhere near as big—just two megabytes.
The sole relevant difference between the two projects, as far as I can tell, is whether the Swift package is linked statically or dynamically. My best guess for the app size difference is that when the package is linked dynamically, dead code elimination can't happen across module boundaries. Anything declared as `public`—or used by something `public`—must be kept, because you don't know which parts of it the ultimate consumer needs. When linked statically, `public`-but-unused code can be stripped, which can result in significant size savings if you're not using the entire API surface of a package.
## Addendum
I tried two (2) methods for getting Xcode to statically link everything, in hopes of bringing the binary size back down.
The first method was changing my own framework from being, well, a framework to a Swift package. In theory, this should mean that everything gets statically linked into just the app binary. This should be straightforward, but I want the framework/package code to live in the same Git repo alongside the app, as no one else uses it and versioning it separately is a pain. Xcode... does not like this.
You can create a new package in the same repo as an existing project from Xcode no problem. While doing so, you can add it to the existing xcworkspace without objection. But when you try to add the package as a dependency of the app, it just fails silently.
I can click the "Add Local" button in the add package window, I can select the package directory, and click the add button. And then nothing happens. The package doesn't show up in the "Swift Packages" tab of the project nor under the dependencies of the target to which I added it. So, compilation just fails because the module is missing.
After abandoning that idea, the other, similarly unsuccessful, tactic I tried was encapsulating all of the usages of the package's types within my framework in an opaque type and removing the dependency on the package from the app target. This was in the hopes that the package would be statically linked into the framework and have all the unnecessary bits stripped.
That did not work. I don't know why. There seems to be very little visibility (read: none at all) into how Xcode chooses to static versus dynamic linking for Swift packages.
That's where I gave up, so if you have any better ideas, please let me know. At the end of the day, I don't have the energy to spend more time fighting Xcode over 20 megabytes. Oh well. I should probably throw a report into the void that is Feedback Assistant.
**Update:** As of April 2022, I've [resolved](/2022/swift-package-framework-update/) this issue.

View File

@ -0,0 +1,34 @@
```
title = "Asahi Linux"
tags = ["computers"]
date = "2022-03-20 10:49:42 -0400"
short_desc = "A brief but pleasant experiment in running Linux on Apple Silicon."
slug = "asahi-linux"
```
The [alpha release](https://asahilinux.org/2022/03/asahi-linux-alpha-release/) of Asahi Linux, a project to run Linux on Apple Silicon computers, came out a couple days ago. And out of a combination of boredom and curiosity, I thought I'd give it a shot.
<!-- excerpt-end -->
## Installation
The installation process went very smoothly. The installer utility the Asahi team built functioned perfectly. The one pain point, however, was when shrinking the default partition to make room for the new Linux one, my whole machine locked up for about three minutes. No input was registered and the entire screen stopped updating. The installer does warn you about this beforehand, but it was kind of nerve-wracking nonetheless. After that, the installation went perfectly smoothly, and I'm now running the Asahi Linux Desktop (a modified version of Arch Linux) on my [M1 Max MacBook Pro](/2022/m1-max/).
## State of Linux
Overall, Linux runs quite well natively on the M1 Max. Despite the alpha state, I haven't (yet) run into any issues the Asahi team didn't warn about.
So far, almost all of the software I'e tried to install has worked. The sole exception was Rust, as the prebuilt version in the Arch repo uses jemalloc, which does not support the 16K page size of the M1 family. But, building Rust from source[^1] got it working fine.
[^1]: While building Rust from source, the laptop got the hottest I've ever felt it outside of playing graphically intensive games. The fan also became audible, though still nowhere near the jet-enigne levels of previous Intel laptops.
The biggest gap in Asahi right now is the lack of drivers for the chip's GPU. This means the machine uses software rendering (llvmpipe), which is a great deal slower than proper GPU rendering would be. However, the M1 Max is fast enough for software rendering to be usable for most tasks. Even watching 1080p youtube in Firefox was fine, and glxgears runs at about 700 FPS. Although moving the cursor seems to be a smooth 60 FPS[^2], everything else appears to be capped at 30. This could be a configuration issue, but I don't know enough to figure it out (and cursory googling doesn't reveal anything helpful).
[^2]: The internal display on the MBP is only detected as supporting 60Hz, rather than the full 120 it supports.
Another as-of-yet unsupported feature on Linux is CPU scaling, meaning all the CPU cores run at their full clock speed continuously, consuming all that extra power. I haven't had very much time to use it on battery, but in spite of this, the battery life seems usable (probably no worse than my old Intel MBP).
This has been a fairly pleasant experience. Though, after I finish writing this post, I'll be returning to macOS. A good deal of what I do is macOS-specific, not to mention that I enjoy having audio output.
The work of the Asahi team so far is incredibly impressive. What's more, some of their efforts, such as the m1n1 bootloader, are going towards supporting other operating systems as well as Linux. Maybe I should try [OpenBSD](https://marc.info/?l=openbsd-arm&m=164768992119719&w=2) next...

View File

@ -0,0 +1,13 @@
```
title = "Update: Swift Packages and Frameworks"
tags = ["swift"]
date = "2022-04-07 22:36:42 -0400"
slug = "swift-package-framework-update"
```
A while ago I [wrote](/2022/swift-package-framework/) about some trouble I had getting Xcode to cooperate with my efforts to bring my app file size back under control after adding a new Swift Package dependency. Well, I'm happy to say I finally have: the most recent TestFlight build of Tusker has a 6.7MB install size, down from 25MB.
Ultimately I did take the route of turning my framework into a Swift Package. I revisited it because I noticed in another project that local packages inside the same folder as the main project worked perfectly fine. The only difference I found was that the project where it worked used only an `.xcodeproj`, whereas Tusker used an `.xcworkspace`. So, I deleted the (for unrelated reasons, no longer necessary) workspace and found that, after quitting and relaunching Xcode, the local package worked perfectly fine.
I briefly started writing a feedback report, but upon further testing I found that xcworkspaces in general weren't the problem—a new project and workspace worked fine. So, I gave up trying to reproduce it and assumed there was just something weird about the 3.5 year old workspace.

View File

@ -0,0 +1,183 @@
```
title = "Part 12: Typed Variables"
tags = ["build a programming language", "rust"]
date = "2022-05-25 16:38:42 -0400"
slug = "typed-variables"
preamble = '<p style="font-style: italic;">This post is part of a <a href="/build-a-programming-language/" data-link="/build-a-programming-language/">series</a> about learning Rust and building a small programming language.</p><hr>'
```
Hi. It's been a while. Though the pace of blog posts fell off a cliff last year[^1], I've continued working on my toy programming language on and off.
[^1]: During and after WWDC21, basically all of my non-work programming energy shifted onto iOS apps, and then never shifted back. I do recognize the irony of resuming mere weeks before WWDC22.
<!-- excerpt-end -->
## Part 1: Type Theory is for Chumps
I spent a while thinking about what I wanted the type system to look like—I do want some level of static typing, I know that much—but it got to the point where I was tired of thinking about it and just wanted to get back to writing code. So, lo and behold, the world's simplest type system:
```rust
#[derive(Debug, PartialEq, Clone, Copy)]
enum Type {
Integer,
Boolean,
String,
}
impl Type {
fn is_assignable_to(&self, other: &Type) -> bool {
self == other
}
}
```
Then, in the `Context`, rather than variables just being a map of names to `Value`s, the map now stores `VariableDecl`s:
```rust
struct VariableDecl {
variable_type: Type,
value: Value,
}
```
So variable declaration and lookup now goes through a simple helper in the function that creates the `VariableDecl`.
For now, types at variable declarations are optional at parse time since I haven't touched type inference yet and I didn't want to go back and update a bunch of unit tests. They are, however, inferred at evaluation time, if one wasn't specified.
```rust
fn parse_statement<'a, I: Iterator<Item = &'a Token>>(it: &mut Peekable<'a, I>) -> Option<Statement> {
// ...
let node = match token {
Token::Let => {
let name: String;
if let Some(Token::Identifier(s)) = it.peek() {
name = s.clone();
it.next();
} else {
panic!("expected identifier after let");
}
let mut variable_type = None;
if let Some(Token::Colon) = it.peek() {
it.next();
variable_type = Some(parse_type().expect("type after colon in variable declaration"));
}
expect_token!(it, Equals, "equals in variable declaration");
let value = parse_expression(it).expect("initial value in variable declaration");
Some(Statement::Declare {
name,
variable_type,
value,
})
}
// ...
};
// ...
}
```
The `parse_type` function is super simple, so I won't go over it—it just converts a the tokens for string/int/bool into their respective `Type`s. I call `expect` on the result of that type and then again wrap it in a `Some`, which seems redundant, because if whatever followed the colon wasn't a type, there's a syntax error and I don't want to continue.
Actually evaluating the variable declaration is still pretty straightforward, though it now checks that the type the initialization expression evaluated to matches the declared type:
```rust
fn eval_declare_variable(
name: &str,
mutable: bool,
variable_type: &Option<Type>,
value: &Node,
context: &ContextRef,
) {
let val = eval_expr(value, context);
let variable_type = match variable_type {
Some(declared) => {
assert!(
val.value_type().is_assignable_to(declared),
"variable value type is not assignable to declared type"
);
*declared
}
None => val.value_type(),
};
context
.borrow_mut()
.declare_variable(name, mutable, variable_type, val);
}
```
## Part 2: Variable Variables
The other bit I added was mutable variables, so that I could write a small program that did something non-trivial.
To do this, I changed the `VariableDecl` struct I showed above to hold a `ValueStorage` rather than a `Value` directly.
`ValueStorage` is an enum with variants for mutable and immutable variables. Immutables variables simply own their `Value`. Mutable ones, though, wrap it in a `RefCell` so that it can be mutated.
```rust
enum ValueStorage {
Immutable(Value),
Mutable(RefCell<Value>),
}
```
Setting the value is straightforward, but getting them is a bit annoying because `Value` isn't `Copy`, since it may own a string. So, there are a couple of helper functions: one to access the borrowed value and one to clone it.
```rust
impl ValueStorage {
fn set(&self, value: Value) {
match self {
ValueStorage::Immutable(_) => panic!("cannot set immutable variable"),
ValueStorage::Mutable(cell) => {
*cell.borrow_mut() = value;
}
}
}
fn with_value<R, F: FnOnce(&Value) -> R>(&self, f: F) -> R {
match self {
ValueStorage::Immutable(val) => f(&val),
ValueStorage::Mutable(cell) => f(&cell.borrow()),
}
}
fn clone_value(&self) -> Value {
self.with_value(|v| v.clone())
}
}
```
This works, but isn't ideal. At some point, the complex `Value` types should probably changed to reference-counted so, even if they're still not copy-able, cloning doesn't always involve an allocation.
Lexing and parsing I won't go into detail on, since it's trivial. There's a new for `var` and whether a declaration starts with that or `let` controls the mutability.
Setting variables isn't complicated either: when parsing a statement, if there's an equals sign after an identifier, that turns into a `SetVariable` which is evaluated simply by calling the aforementioned `set` function on the `ValueStorage` for that variable.
And with that, I can write a little fibonacci program:
```txt
$ cat fib.toy
var a = 0
var b = 1
var i = 0
while (i < 10) {
print("iteration: " + toString(i) + ", a: " + toString(a));
let tmp = a
a = b
b = tmp + a
i = i + 1
}
$ cargo r -- fib.toy
iteration: 0, a: 0
iteration: 1, a: 1
iteration: 2, a: 1
iteration: 3, a: 2
iteration: 4, a: 3
iteration: 5, a: 5
iteration: 6, a: 8
iteration: 7, a: 13
iteration: 8, a: 21
iteration: 9, a: 34
```
I also added a small CLI using [`structopt`](https://lib.rs/structopt) so I didn't have to keep writing code inside a string in `main.rs`.

View File

@ -0,0 +1,121 @@
```
title = "Clarus Returns Home"
tags = ["misc"]
date = "2022-06-14 10:11:42 -0400"
short_desc = "Did you know Clarus the Dogcow is hiding in the macOS Ventura beta?"
slug = "clarus"
card_image_path = "/2022/clarus/clarus-smooth.png"
```
<figure>
<div style="display: flex; flex-direction: row; align-items: center; background-color: white;">
<img src="/2022/clarus/clarus-kare.png" alt="Susan Kare's pixel art dogcow icon" style="width: 50%; image-rendering: pixelated;">
<img src="/2022/clarus/clarus-smooth.png" alt="The high resolution dogcow icon that ships with macOS Ventura" style="width: 50%;">
</div>
<figcaption>How it started / How it's going</figcaption>
</figure>
Did you know that with macOS Ventura, Clarus the Dogcow has at long last returned home? Recently, while doing something else, I accidentally hit Cmd+Shift+P which opened the Page Setup dialog. I was greeted, surprisingly, with a new high-resolution version of the classic Clarus icon that I'd never seen before. I looked at it briefly, and then closed the dialog and went back to whatever I was doing before. I had assumed that because I'd been in a 3rd-party app at the time, that the Clarus icon was just some easter egg the developer had left. But a little while later, I got to thinking. What were the chances that someone went to the trouble of customizing the Page Setup dialog, of all things, just for an easter egg? Zero, it turns out. That dialog shows Clarus on the page preview in every app.
<!-- excerpt-end -->
<img src="/2022/clarus/page-setup.png" alt="The Page Setup dialog. The page preview on the left shows the high-resolution Clarus icon.">
I don't have a Monterey machine to test it at the moment (I, er, [accidentally](https://social.shadowfacts.net/notice/AKGSrBOxnVDVO0ueem) updated my laptop to the beta), but I _believe_ this is a new change with Ventura.
**Update:** I installed Monterey in a virtual machine to check, and, indeed, the Page Setup dialog there bears no sign of Clarus.
The next step, then—having been thoroughly nerd-sniped by this—was to figure out where the icon was coming from and if I could pull it out of whatever nook it was hidden in.
The first stop was [`NSPageLayout`](https://developer.apple.com/documentation/appkit/nspagelayout), the panel object that is responsible for displaying the panel. It was unlikely that the class would actually contain the implementation of the panel, but it was at least a starting point.
In order to actually look at the disassembled implementation of this AppKit class, I needed the actual AppKit framework binary. Since macOS Big Sur, all system framework binaries are stored merged in the `dyld` shared cache, rather than in separate files. But, I need them as separate files in order to actually inspect them.
Since the [last time](/2021/scrollswitcher/) I wrote about this, a couple things have changed. Before, I built the Apple `dyld_shared_cache_util` from one of the periodic `dyld` source dumps. This is annoying because you have to make a bunch of changes to the source code to get it to compile outside of an Apple-internal environment. It also may break whenever there's an OS update. So, I've switched to using [this utility](https://github.com/keith/dyld-shared-cache-extractor) which uses the `dyld_extractor.bundle` that ships with Xcode. The other difference since before is a minor one: the dyld shared cache has moved. Wheras before it was in `/System/Library/dyld/`, in the Ventura beta it's moved to `/System/Cryptexes/OS/System/Library/dyld/` (the Cryptex seems to be part of the [Rapid Security Response](https://threedots.ovh/blog/2022/06/a-quick-look-at-macos-rapid-security-response/) feature Apple announced).
With the shared cache extracted, I could load the AppKit binary into Hopper (I had to disable the Objective-C analysis, otherwise the app crashed when trying to load the binary) and start poking around. I searched for the `NSPageLayout` class that I'm interested in, and looked at the `runModalWithPrintInfo:` method, since that sounded like a good candidate for something that would lead to the bulk of the implementation. And, indeed, it was. The method appears to be a fairly simple wrapper around the `PMPrepare...` function that sounds like it lives in a separate private framework.
<div class="article-content-wide">
<img src="/2022/clarus/appkit.png" alt="Hopper window with AppKit showing the runModalWithPrintInfo: method">
</div>
<aside class="inline">
Curious about what those `_objc_msgSend$...` calls are? I would be to, if I haddn't watched the fascinating [Improve app size and runtime performance](https://developer.apple.com/wwdc22/110363) session from WWDC this year.
</aside>
The next step was figuring out where that prepare function is actually implemented. Running `otool -L` on the AppKit binary doesn't reveal anything obviously useful, but in the PrivateFrameworks directory extracted from the dyld shared cache, there's something called `PrintingPrivate.framework`, which sounds promising. Opening it up in Hopper, I saw that this is indeed the framework I was looking for.
<div class="article-content-wide">
<img src="/2022/clarus/printingprivate.png" alt="PrintingPrivate in Hopper showing the _PMPrepareAppKitPageSetupDialogWithPrintInfoPrivate function">
</div>
Looking at the implementation of the prepare function, what immediately jumps out is the call to `_LoadAndGetPrintingUIBundle`. This seems to be yet another layer of indirection with the actual thing implemented in a different bundle. There's also a call in the else branch to the similarly-named `_LoadAndGetPrintCocoaUIBundle`, but let's start with the first one in hopes that it's more common.
The implementation of that function goes through another helper function and it ends up loading a `PrintingUI.bundle` plugin from inside the PrintingPrivate framework bundle. This one isn't part of the dyld shared cache, so I can just open it right up in Hopper without any fuss.
If you look for the function PrintingPrivate calls, it turns out it winds up in a method on `PMPageSetupController`. This sounds promising, let's see what else that class can do.
What's this? A method called `updateClarus`? Could it be? Have we finally reached it?
<div class="article-content-wide">
<img src="/2022/clarus/printingui.png" alt="The PrintingUI binary in Hopper with the search panel showing a bunch of methods with 'clarus' in the name">
</div>
Yes! Clarus, I'm coming! One method that sounds particularly encouraging is `-[PMPageSetupController setClarusImageView:]`. If I can find out what's setting the image view, maybe that'll lead to where it's being configured with the image.
Unfortunately, the setter for that property isn't referenced anywhere in the PrintingUI binary. Nor is the getter. I was stuck here for a while, until I realized that the setter not being called anywhere was probably a sign that the UI was defined in a Nib and that an outlet was added from Interface Builder, even though it was never used.
Sure enough, in the plugin bundle's resources, there is a `PMPageSetup.nib`. And if the page setup UI is defined in a Nib, and Clarus is being shown in a image view, the image itself is probably located in the asset catalog.
Using the system `assetutil` program, one can list all of the files in a compiled asset catalog. And sure enough, there she is:
```shell
$ assetutil --info /System/Library/PrivateFrameworks/PrintingPrivate.framework/Versions/A/Plugins/PrintingUI.bundle/Contents/Resources/Assets.car | grep -i clarus
"Name" : "Clarus",
"RenditionName" : "ClarusSmooth2.pdf",
"Name" : "Clarus",
"RenditionName" : "ClarusSmooth2.pdf",
"Name" : "Clarus",
"RenditionName" : "ClarusSmooth2.pdf",
```
To actually extract the image from the asset catalog, I needed to use a third-party tool. [acextract](https://github.com/bartoszj/acextract) worked perfectly on the first try, though it did need couple of additional `@import`s to compile on Ventura since [Foundation no-longer re-exports CoreGraphics](https://twitter.com/illian/status/1534014772848365568).
<aside>
The `assetutil` manpage does contain a reference to a dump option (`-d`/`--dump`) which is curiously not present in the manpage nor is it recognized by the program. Perhaps an Apple-internal feature that is excluded from compilation in public builds?
</aside>
And with that, I finally gazed upon the 512 &times; 512px beauty that is Smooth Clarus:
<figure>
<img src="/2022/clarus/clarus-smooth.png" alt="Smooth Clarus">
</figure>
The version shown here I've added a white background to, so it's not invisible in dark mode. The <a href="/2022/clarus/Clarus256x256@2x.png" data-link="/2022/clarus/Clarus256x256@2x.png">original image</a> has a transparent background.
<aside class="inline">
The keen-eyed among you may notice that although, it had a `.pdf` extension in `assetutil` info, I've given it here as a PNG. I was confused by this too, but upon closer inspection I believe the PNG is what ships with the OS. Although the `.car` format is not documented, you can still open it up in a hex viewer and learn a bit about what it contains. Looking through it, there appears to be some metadata for each file, followed by the image data itself.
As `assetutil` showed, there are multiple entries for `ClarusSmooth2.pdf`—and one of them is followed by data that starts with the PDF file format header (`%PDF`). But, unfortunately, extracting that data into a separate file seems to result in a blank PDF. And I don't know enough about the format to figure out whether there is any vector data in it, or if it truly is empty.
**Update:** [Reid Ellis](https://mastodon.social/users/clith/statuses/108482914539340482) managed to extract the actual PDF data, so here's Clarus in all of the <a href="/2022/clarus/clarus.pdf" data-link="/2022/clarus/clarus.pdf">infinite smoothness</a>.
</aside>
Lastly, if you're writing a Mac app and would like to hide Clarus somewhere, you can load the bundle yourself and then pull the image out like so:
```swift
let bundle = Bundle(path: "/System/Library/PrivateFrameworks/PrintingPrivate.framework/Versions/A/Plugins/PrintingUI.bundle")!
try! bundle.loadAndReturnError()
let image = bundle.image(forResource: "Clarus")
```
I'm not sure if the Mac App Store would consider that using private SPI, so use it at your own risk.
It would be very cool to see Clarus return as an SF Symbol some day. If hundreds of icons for various Apple products can go in, so too can everyone's favorite dogcow.

View File

@ -0,0 +1,113 @@
```
title = "Adopting TextKit 2"
tags = ["swift"]
date = "2022-07-31 15:31:42 -0700"
slug = "textkit-2"
```
With iOS 16, Apple switched on TextKit 2 for UITextViews. But, if you access any of the TextKit 1 objects on the text view, it will automatically fall back to a compatibility mode. [All of the work I did](/2020/uipreviewparameters-textlinerects/) to mimic Safari's link context menu animation was, of course, using the TextKit 1 APIs, so it was blocking me from fully adopting TextKit 2. So, here's how to update that code.
<!-- excerpt-end -->
The first part of my implementation that needed to change is how I get which link is being tapped. I have a function called `getLinkAtPoint(_:)` that takes a CGPoint in the coordinate space of the view and tries to find the link at that point. To update it, almost the entire old body of the function is wrapped in an if statement that checks if TextKit 2 is available:
```swift
func getLinkAtPoint(_ point: CGPoint) -> (URL, NSRange)? {
let pointInTextContainer = CGPoint(x: point.x - textContainerInset.left, y: point.y - textContainerInset.top)
if #available(iOS 16.0, *),
let textLayoutManager = self.textLayoutManager {
// ...
} else {
// ...
}
}
```
Note that I fall back to the TextKit 1 path if the app's not running on iOS 16 _or_ the `NSTextLayoutManager` is not available. Even on iOS 16, a text view may still fall back to TextKit 1 if the old API is used—in which case, the TextKit 2 stack may be swapped out for TextKit 1. In my testing this can happen occasionally even if you're never using the TextKit 1 API yourself, meaning something in the framework is accessing it (though this may simply be a beta bug).
When TextKit 2 is available, there are several steps we need to go through to get the attributes at a point.
First, we get the text layout fragment that the layout manager has for the point in the coordinate space of the text container. The documentation is sparse, but in my testing, layout fragments correspond to paragraphs.
```swift
guard let fragment = textLayoutManager.textLayoutFragment(for: pointInTextContainer) else {
return nil
}
```
From there, we get the line fragment (corresponding to a visual line of text) that contains our point. To get the line fragment, there is no builtin helper method, so we just go through the layout fragment's `textLineFragments` array until we find the one that matches. For each line fragment, we check if its typographic bounds contain the target point converted to the layout fragment's coordinate space.
```swift
let pointInLayoutFragment = CGPoint(x: pointInTextContainer.x - fragment.layoutFragmentFrame.minX, y: pointInTextContainer.y - fragment.layoutFragmentFrame.minY)
guard let let lineFragment = fragment.textLineFragments.first(where: { lineFragment in
lineFragment.typographicBounds.contains(pointInLayoutFragment)
}) else {
return nil
}
```
If there's no matching layout or line fragment, that means the given location is on a piece of text and therefore there's no link, so the method returns `nil`.
After that, we can get the tapped character index by using the `characterIndex(for:)` method with the target point converted to the line fragment's coordinate space.
```swift
let pointInLineFragment = CGPoint(x: pointInLayoutFragment.x - lineFragment.typographicBounds.minX, y: pointInLayoutFragment.y - lineFragment.typographicBounds.minY)
let charIndex = lineFragment.characterIndex(for: pointInLineFragment)
```
And then we can use the line fragment's attributed string to lookup the attribute:
```swift
var range = NSRange()
guard let link = lineFragment.attributedString.attribute(.link, at: charIndex, longestEffectiveRange: &range, in: lineFragment.attributedString.fullRange) as? URL else {
return nil
}
let textLayoutFragmentStart = textLayoutManager.offset(from: textLayoutManager.documentRange.location, to: fragment.rangeInElement.location)
let rangeInSelf = NSRange(location: range.location + textLayoutFragmentStart, length: range.length)
return (link, rangeInSelf)
```
One important thing to note is that the line fragment's `attributedString` property is an entirely separate string from the text view's atttributed string. So the return value of `characterIndex` and the longest effective range have indices into the _substring_. The rest of my code expects the return value to be a range in the index-space of the full string, so I need to convert it by adding the offset between the beginning of the document and the beginning of the line fragment's substring.
For the legacy TextKit 1 path, I use the `characterIndex(for:in:fractionOfDistanceBetweenInsertionPoints:)` method on the layout manager to get the character index and then look up the attribute at that location. I won't go into detail in that code here, since it's more straightforward—and lots of other examples can be found online.
Next up: context menu previews. The vast majority of the code is unchanged, all that needs to be done is changing how we get the rects spanned by a range in the text.
In the `contextMenuInteraction(_:previewForHighlightingMenuWithConfiguration:)` method, rather than always using the TextKit 1 API, we again check if TextKit 2 is available, and if so, use that:
```swift
var textLineRects = [CGRect]()
if #available(iOS 16.0),
let textLayoutManager = self.textLayoutManager {
let contentManager = textLayoutManager.contentManager!
guard let startLoc = contentManager.location(contentManager.documentRange.location, offsetBy: range.location),
let endLoc = contentManager.location(startLoc, offsetBy: range.length),
let textRange = NSTextRange(location: startLoc, end: endLoc) else {
return nil
}
textLayoutManager.enumerateTextSegments(in: textRange, type: .standard, options: .rangeNotRequired) { _, rect, _, _ in
textLineRects.append(rect)
return true
}
} else {
let notFoundRange = NSRange(location: NSNotFound, length: 0)
self.layoutManager.enumerateEnclosingRects(forGlyphRange: linkRange,
withinSelectedGlyphRange: notFoundRange,
in: self.textContainer) { (rect, stop) in
textLineRects.append(rect)
}
}
```
In the TextKit 2 path, we get the `NSTextContentManager` and force-unwrap it (as far as I can tell, this is never `nil` and is only optional because it's weak).
We use the content manager to convert the `NSRange` of the link that's being previewed into an `NSTextRange` (a range of `NSTextLocation`s, which are opaque objects that represent locations in the document however the content manager sees fit). I'm not sure in what circumstances these calls could fail, but if any of them do, we return `nil` and let the framework use the default preview.
With that, we can call `enumerateTextSegments` to get the bounding rectangles of the text segments. Since these are in the coordinate space of the text layout manager, there's nothing further we need to do, so we can just add them to the `textLineRects` array. And from the block, we return true to continue enumerating. One minor thing to note is that we can pass the `.rangeNotRequired` option to tell the framework to skip calculating text ranges for every segment since we don't need them.
From there, the code is exactly the same as last time.
And with those changes in place, I can use my app without any warnings about text views falling back to TextKit 1 and the accompanying visual artifacts.

View File

@ -0,0 +1,67 @@
```
title = "Webshit Weekly (2022/08/14)"
tags = ["misc"]
date = "2022-08-16 10:00:42 -0400"
short_desc = "A tribute to the seemingly ended series from n-gate."
slug = "webshit-weekly"
```
An annotated digest of the top "Hacker" "News" posts for the second week of August, 2022.
(A tribute[^1] to the seemingly ended webshit weekly series from [n-gate](http://n-gate.com/).)
[^1]: Contrary to the title, I will not be doing this weekly for I have neither the time nor the energy. I deeply respect that n-gate was able to do it for nearly 5 years, I couldn't have managed a fraction as long.
<!-- excerpt-end -->
<style>
.article-content {
font-family: 'Comic Sans MS', 'Chalkboard SE', 'Comic Neue', 'VGA' !important;
font-size: 1.1rem !important;
}
h3, h4 {
font-family: 'Comic Sans MS', 'Chalkboard SE', 'Comic Neue', 'VGA' !important;
margin-bottom: 0;
}
h4 {
margin-top: 0;
}
.article-content a.header-anchor {
display: none;
}
a::before, a::after {
content: "" !important;
}
.article-content a {
text-decoration: underline !important;
}
</style>
### [To uncover a deepfake video call, ask the caller to turn sideways](https://metaphysic.ai/to-uncover-a-deepfake-video-call-ask-the-caller-to-turn-sideways/)
#### August 8, 2022 [(comments)](https://news.ycombinator.com/item?id=32384653)
Metaphysic (a company that seeks to "empower individuals" with artificial intelligence "content generation" (read: plagiarism laundering) tools) determines that relatively few images of people in big datasets are from a completely in profile. As such, deepfake tools have poor results when compositing someone's face onto a profile view of a subject. Hackernews helpfully notes that this is merely a present limitation of deepfake and face alignment neural networks, and that within a few short years, these dark days of deepfake detectability will be behind us. Other Hackernews propose a series of solutions that will surely not be defeated by further advancements in deepfake tools.
### [An incident impacting 5M accounts and private information on Twitter](https://privacy.twitter.com/en/blog/2022/an-issue-affecting-some-anonymous-accounts)
#### August 9, 2022 [(comments)](https://news.ycombinator.com/item?id=32399949)
Twitter (business model: "Uber for bad takes") informs the public that a flaw in their code let anyone discover which account, if any, a particular email address or phone number belonged to. [Again](https://privacy.twitter.com/en/blog/2020/an-incident-impacting-your-account-identity). They assure everyone that they take privacy Very Seriously and that [only they](https://www.ftc.gov/news-events/news/press-releases/2022/05/ftc-charges-twitter-deceptively-using-account-security-data-sell-targeted-ads) are allowed to use that information for illicit purposes. One Hackernews considers the possibility of enumerating all 10 billion phone numbers. A number of Hackernews also note that Twitter helpfully victim ~~blames~~ suggests their users simply not use a publicly-known phone number to protect against the company's incompetence.
### [Instagram can track anything you do on any website in their in-app browser](https://krausefx.com/blog/ios-privacy-instagram-and-facebook-can-track-anything-you-do-on-any-website-in-their-in-app-browser)
#### August 10, 2022 [(comments)](https://news.ycombinator.com/item?id=32415470)
Meta&#8482; (business model: "Uber for antitrust complaints") decides that not only are they entitled to write down everything you do on their ~~websites~~ ~~apps~~ metaspaces, but they are also entitled to spy on you whenever you try to leave them. Hackernews are confused about why Meta&#8482; doesn't simply use the APIs provided by Apple (business model: "Uber for UI frameworks") for showing web views without spyware. A conversation ensues about whether Apple should neuter in-app, faux-Safari browsers and further clamp down on the already nigh nonexistent 3rd-party browser ecosystem (another topic of frequent consternation). Another sub-thread raises the alarm that in-app web browsers allow access to \*shock\*, \*awe\* _The Internet_.
### [A 17-year-old designed a novel synchronous reluctance motor](https://www.smithsonianmag.com/innovation/this-17-year-old-designed-a-motor-that-could-potentially-transform-the-electric-car-industry-180980550/)
#### August 11, 2022 [(comments)](https://news.ycombinator.com/item?id=32426777)
A high school student improves upon an electric motor design that doesn't use rare-earth magnets. Hackernews bitterly resents that they weren't child prodigies and tries to nitpick the student's work into meaningless-ness. Other Hackernews conclude that maybe the kids are alright.
### [Arrest of suspected developr of Tornado Cash](https://www.fiod.nl/arrest-of-suspected-developer-of-tornado-cash/)
#### August 12, 2022 [(comments)](https://news.ycombinator.com/item?id=32436413)
The Dutch Fiscal Information and Investigation Service (business model: "Uber for stopping financial crimes") arrests a man believed to be the developer of the Ethereum tumbler Tornado Cash (business model: "Uber for committing financial crimes"). Hackernews is very concerned about their future prospects if all of a sudden governments are arresting people who build software designed to let people commit crimes. One subthread devolves into arguments about gun rights in the United States (business model: "Uber for racial inequality") and others into general fearmongering about the end of privacy as we know it.
### [I hacked my car](https://programmingwithstyle.com/posts/howihackedmycar/)
#### August 13, 2022 [(comments)](https://news.ycombinator.com/item?id=32447650)
In which the author finds a series of vulnerabilities that should be embarassing for a company with a 36B USD market cap, culminating in finding the private key used to sign their car's firmware on the internet—an engineer having evidently reused it from a tutorial. At over 3000 words, most Hackernews can't be bothered with reading it and, as such, the comments are a barren wasteland. Hackernews mostly has complaints about their own cars. Another Hackernews does a casual racism and slights the engineering ability of an entire continent (but it's definitely okay because he's personally had bad experiences with _those_ people).
### [Oasis: Small statically-linked Linux system](https://github.com/oasislinux/oasis)
#### August 14, 2022 [(comments)](https://news.ycombinator.com/item?id=32458744)
Some developers have come up with a Linux (business model: "Uber for FOSS dweebs") distribution that will be even more annoying to use than the usual musl-based ones. Half of Hackernews rails against dynamic linking and the other half rails against static linking. Compromise is on no one's mind; this can only end in war. Only one Hackernews is excited about any other potential merit of the project (namely that it boots a few seconds faster than their current distro of choice).

View File

@ -0,0 +1,20 @@
```
title = "LiveView Native"
tags = ["elixir", "swift"]
date = "2022-09-01 12:30:42 -0400"
short_desc = "An iOS SwiftUI client for Phoenix LiveView."
slug = "liveviewnative"
```
I'm very excited for the project I've been working on all year to finally be public. [LiveView Native](https://native.live) is a library that lets you build native apps backed by [Phoenix LiveView](https://github.com/phoenixframework/phoenix_live_view). I've been developing the [iOS client](https://github.com/liveviewnative/liveview-client-swiftui) which is backed by SwiftUI.
Using LiveView Native lets avoid duplicating business logic on the frontend and save time on implementing dedicated APIs for native apps. The iOS client can be integrated into any existing app by using a single SwiftUI view, so it's easy to adopt it for just a single screen at a time.
You can find the documentation[^1] for the Swift package [here](https://liveviewnative.github.io/liveview-client-swiftui/documentation/phoenixliveviewnative/), including a step-by-step [tutorial](https://liveviewnative.github.io/liveview-client-swiftui/tutorials/yourfirstapp) which walks you through building a complete app with LiveView Native.
[^1]: This is the first time I've used [DocC](https://developer.apple.com/documentation/docc) and it has been largely excellent. It's made it very easy to produce nice-looking and well-organized documentation. And the tutorial mechanism has been very useful.
We've also developed a simple [chat app](https://github.com/liveviewnative/elixirconf_chat) which was used by the attendees of ElixirConf this year, and serves as a complete example of a LiveView Native app.
I'm very excited to see what people build with it.

View File

@ -0,0 +1,88 @@
```
title = "Live Activities (and Bad Apple)"
tags = ["swift"]
date = "2022-10-01 14:05:42 -0400"
short_desc = "Getting nerd sniped for fun and—well, just fun really."
slug = "live-activities"
```
I recently got [nerd sniped](https://xkcd.com/356/) by [this tweet](https://twitter.com/zhuowei/status/1573711389285388288) from Zhuowei Zhang about playing the Bad Apple video in the Dynamic Island on the iPhone 14 Pro. His original implementation used a webpage and the media session API, and this worked, but the system plays an animation when the artwork changes, so the framerate was limited to 2 FPS. Not ideal for watching a video. So, I wanted to see how much closer to watchable I could get.
This post isn't going to be a detailed guide or anything, just a collection of some mildly interesting things I learned.
<!-- excerpt-end -->
Before I started this, I was already aware that Live Activity updates had a 4KB limit on the size of the dynamic state. So, the first thing I worked on was encoding the video into a smaller format. Because this particular video is black and white, I encoded it as 1 bit-per-pixel. Each frame is also complete, so the widget doesn't need access to any previous frames to display the current one.
Since I wanted to display it in the Dynamic Island, the video is also scaled down to be very small, 60 &times; 45 pixels—one eighth the resolution of the original. This is a convenient size because each row of pixels can be encoded as a single UInt64. The entire frame comes out to 45 &times; 8 = 360 bytes, which is plenty small.[^1]
[^1]: Zhuowei [pointed out](https://notnow.dev/objects/bddcdb31-7ece-4ae1-86b1-7c5a2d23bec6) that by using a palette, you could get even get a color frame the same size into 2.8 kilobytes without even getting into any fancy encoding techniques.
The whole video is encoded when the app starts up, which takes about 8 seconds. That's faster than real time (the video is 3m39s), so it could be done during playback, but doing it ahead of time is fast enough that I didn't feel like putting in the work to optimize a shitpost.
The widget can then unpack the encoded frame into a bitmap that can be turned into an image.
Adding the Live Activity wasn't difficult—the API is wonderfully straightforward—but, alas, updating it was not so.
While the app was in the foreground, ActivityKit would log a message whenever I asked it to update the activity. But, when the app went into the background, those messages were no longer logged—even though my code was still running and requesting updates. Interestingly, the app is considered to be in the foreground if you open Notification Center while in the app, and so the activity can be updated, which is how this demo came about:
<div>
<video controls style="max-width: 50%; margin: 0 auto; display: block;" title="The Bad Apple video, with sound, playing back first in an app and then in a Live Activity in the Notification Center.">
<source src="/2022/live-activities/notif-center.mp4" type="video/mp4">
</video>
</div>
I scratched my head at the issue of background updates for a while, and tried a couple things to no avail, until I attached Console.app to my phone and filtered for "activity". At which point, I saw a bunch of messages like these from `sessionkitd` (which is the system daemon that manages live activities):
<div class="article-content-wide">
<img src="/2022/live-activities/console.png" alt="com.apple.activitykit sessionkitd xpc Process is playing background media and forbidden to update activity: 984">
</div>
Apps playing background audio seem to be completely forbidden from updating Live Activities. The only possible reason for this I can imagine is to prevent apps from making their own now playing activities, rather than relying on the system one. I don't know why this is the case, but whatever, back to trying to find workarounds.
At this point, I downloaded the most recent iOS 16 IPSW for the iPhone 14 Pro. After extracting the dyld shared cache from the image (using [this](https://github.com/blacktop/ipsw) helpful tool) I started poking around to try and find the code responsible for deciding if an app is allowed to update its activities. Opening the dyld shared cache in Hopper and searching for "session" revealed several promising-sounding frameworks: SessionCore, SessionFoundation, and SessionKit.
SessionCore turned out to be the one containing that log message. But unfortunately, it's written in Swift and I am not good enough at reverse engineering to decipher what it's actually doing. I did, however, manage to find a couple tidbits just be looking through the strings in the binary:
1. An entitlement named `com.apple.private.sessionkit.backgroundAudioUpdater`
It wasn't of much use to me, but if someone ever manages to jailbreak these devices, you could have some fun with it.
2. A log message reading "Process looks like a navigation app and can update activity"
This looked more promising because the phrasing "looks like" suggests to me that it's just using a heuristic, rather than determining what the app is for certain. I tried to trick it by adding entitlements for MapKit and location, tracking the user's location while in the background, and trying various audio session categories that seemed more map-app like. But this effort was to no avail, `sessionkitd` still forbade me from updating the activity in the background.
At this point, I gave up on getting audio working and just settled for playing the video in the Dynamic Island. I had to scrap the previous implementation of detecting new frames because it used `AVPlayer` and is therefore incompatible with updating in the background. But, since I have an array of frames, playing it back myself is as simple as using a timer to emit a new one every 1/30th of a second.
With that, I was finally able to play back the video in the island:
<div>
<video controls style="max-width: 50%; margin: 0 auto; display: block;" title="The Bad Apple video playing back silently in the left section of the Dynamic Island.">
<source src="/2022/live-activities/island-silent.mp4" type="video/mp4">
</video>
</div>
You may notice that in both attempts the video appears somewhat blurry. This is becuase iOS animates any changes to the Live Activity view. As with all widgets, the SwiftUI view tree is serialized and then deserialized and displayed in a separate process, so you don't have direct control over the animation. There is a new [`ContentTransition`](https://developer.apple.com/documentation/swiftui/contenttransition) API that the Live Activity docs say should work, but unfortunately the identity transition "which indicates that content changes should't animate" has no effect on the activity.
Just having the video in the island was pretty satisfying to see. But, I still really wanted to see the video playing in the island with the audio.
Zhuowei suggested using the system Music app to play back the sound while my app controlled the video. This worked, though it means the app is no longer entirely self-contained. You can use the `MPMusicPlayerController.systemMusicPlayer` to control the system Music app, and playing back a particular track is simple enough:
```swift
let player = MPMusicPlayerController.systemMusicPlayer
let query = MPMediaQuery.songs()
query.addFilterPredicate(MPMediaPropertyPredicate(value: "badapple", forProperty: MPMediaItemPropertyTitle))
player.setQueue(with: query)
do {
try await player.prepareToPlay()
player.play()
} catch {
// if the song doesn't exist, ignore it
}
```
Annoyingly, the only way of getting the track into the Music app on my phone was by disabling iCloud Music Library and syncing it from my Mac. Why iCloud needs to be disabled to let you manually sync files, I do not know—especially seeing as the manually-synced tracks remain on the device even after iCloud is turned back on.
And this, it turns out, is impossible to record because apparently the Music app mutes itself during screen recordings (using the builtin Control Center method, or attaching to QuickTime on a Mac). So, you'll just have to take my word for it.
This was a fun, if utterly pointless, endeavor. If you want to see the code or run it yourself, it's available [here](https://git.shadowfacts.net/shadowfacts/LiveApple).

37
site/show.html Normal file
View File

@ -0,0 +1,37 @@
{% extends "layout/default.html" %}
{% block title%}{{ show.metadata.title }}{% endblock %}
{% block content -%}
<h1 class="article-title">
{{ show.metadata.title }}
</h1>
<p>
Last updated: {{ show.last_updated()|pretty_date }}
</p>
<button onclick="document.querySelectorAll('summary').forEach(e => e.click())">Epand All</button>
<div class="tv-show">
{% for episode in show.episodes %}
<details>
<summary>
<h2>{{ episode.title }}</h2>
<span class="episode-watched">
Watched:
<time datetime="{{ episode.date|iso_date }}">
{{ episode.date|pretty_date }}
</time>
</span>
</summary>
<div class="article-content">
{{ episode.content|safe }}
</div>
</details>
{% endfor %}
</div>
{%- endblock %}

View File

@ -0,0 +1,86 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Hamburger Menu Demo</title>
<style>
body {
font-family: sans-serif;
}
main {
position: relative;
max-width: 980px;
margin: 0 auto;
}
#sidebar-visible {
display: none;
}
.sidebar-toggle {
cursor: pointer;
}
#sidebar-container {
display: none;
position: fixed;
top: 0;
bottom: 0;
left: 0;
right: 0;
z-index: 100;
background-color: rgba(0, 0, 0, 0.3);
}
#sidebar-visible:checked + #sidebar-container {
display: block;
}
#sidebar-content {
background-color: #eee;
width: 25%;
height: 100vh;
}
</style>
</head>
<body>
<input type="checkbox" id="sidebar-visible">
<div id="sidebar-container">
<div id="sidebar-content">
<label for="sidebar-visible" class="sidebar-toggle">Close</label>
<p>Some sidebar content</p>
</div>
</div>
<main>
<label for="sidebar-visible" class="sidebar-toggle">Open Sidebar</label>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut at ipsum commodo, hendrerit sapien sit amet, sodales
neque. Aenean imperdiet consequat nulla, quis fringilla quam eleifend vulputate. Duis at felis leo. Etiam at
ligula massa. Nunc ut faucibus velit, sit amet vestibulum lectus. Aliquam sit amet est vitae risus interdum
laoreet. Etiam non commodo libero, id pellentesque sapien. Vivamus in augue tempor, luctus ligula et, dignissim
ex. Pellentesque eleifend, tellus sit amet rhoncus ornare, mi est hendrerit ex, ac blandit felis nisi nec ligula.
Ut at sem non diam iaculis dignissim non eu purus. Etiam placerat sapien ut sagittis porttitor.
Pellentesque mollis nulla magna, at dapibus enim tristique nec. Nam felis augue, suscipit ut dignissim elementum,
lacinia sed turpis. Vivamus justo sem, cursus nec lectus non, consectetur sodales nisi.
</p>
<p>
Quisque id purus in urna tempor vulputate. Fusce a turpis semper odio sollicitudin lobortis. Pellentesque ac
placerat ex, in tincidunt augue. Etiam mattis enim vel tempus porta. Donec maximus, massa at iaculis dapibus,
mauris eros volutpat enim, sed ullamcorper est sem ac mauris. Mauris blandit purus a auctor feugiat. Duis
scelerisque orci at nibh fermentum, ut lobortis libero vestibulum. Vivamus sed mi nec nibh condimentum dapibus.
Sed sed vulputate augue. Maecenas ut lobortis orci. Sed eleifend dapibus auctor. Pellentesque dignissim massa
sed mi facilisis malesuada eget ut leo. Proin mollis ac est malesuada hendrerit. Fusce pulvinar sagittis ultrices.
Cras fermentum sodales lectus quis iaculis.
</p>
<p>
Sed molestie sit amet eros vel placerat. Aliquam feugiat sagittis nisl eget pretium. Quisque ut ligula sit amet
quam tempus commodo. Phasellus vulputate quis turpis et faucibus. Morbi neque est, blandit mollis enim at,
lacinia egestas risus. Cras gravida interdum est vitae ultricies. Fusce consequat neque molestie, molestie elit
et, tincidunt massa.
</p>
</main>
</body>
</html>

View File

@ -0,0 +1,91 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Hamburger Menu Demo</title>
<style>
body {
font-family: sans-serif;
}
main {
position: relative;
max-width: 980px;
margin: 0 auto;
}
#sidebar-visible {
display: none;
}
.sidebar-toggle {
cursor: pointer;
}
#sidebar-container {
display: none;
position: fixed;
top: 0;
bottom: 0;
left: 0;
right: 0;
z-index: 100;
background-color: rgba(0, 0, 0, 0.3);
}
#sidebar-visible:checked + #sidebar-container {
display: flex;
flex-direction: row;
}
#sidebar-content {
background-color: #eee;
width: 25%;
}
#sidebar-dismiss {
flex-grow: 1;
}
</style>
</head>
<body>
<input type="checkbox" id="sidebar-visible">
<div id="sidebar-container">
<div id="sidebar-content">
<label for="sidebar-visible" class="sidebar-toggle">Close</label>
<p>Some sidebar content</p>
</div>
<label for="sidebar-visible" id="sidebar-dismiss"></label>
</div>
<main>
<label for="sidebar-visible" class="sidebar-toggle">Open Sidebar</label>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut at ipsum commodo, hendrerit sapien sit amet, sodales
neque. Aenean imperdiet consequat nulla, quis fringilla quam eleifend vulputate. Duis at felis leo. Etiam at
ligula massa. Nunc ut faucibus velit, sit amet vestibulum lectus. Aliquam sit amet est vitae risus interdum
laoreet. Etiam non commodo libero, id pellentesque sapien. Vivamus in augue tempor, luctus ligula et, dignissim
ex. Pellentesque eleifend, tellus sit amet rhoncus ornare, mi est hendrerit ex, ac blandit felis nisi nec ligula.
Ut at sem non diam iaculis dignissim non eu purus. Etiam placerat sapien ut sagittis porttitor.
Pellentesque mollis nulla magna, at dapibus enim tristique nec. Nam felis augue, suscipit ut dignissim elementum,
lacinia sed turpis. Vivamus justo sem, cursus nec lectus non, consectetur sodales nisi.
</p>
<p>
Quisque id purus in urna tempor vulputate. Fusce a turpis semper odio sollicitudin lobortis. Pellentesque ac
placerat ex, in tincidunt augue. Etiam mattis enim vel tempus porta. Donec maximus, massa at iaculis dapibus,
mauris eros volutpat enim, sed ullamcorper est sem ac mauris. Mauris blandit purus a auctor feugiat. Duis
scelerisque orci at nibh fermentum, ut lobortis libero vestibulum. Vivamus sed mi nec nibh condimentum dapibus.
Sed sed vulputate augue. Maecenas ut lobortis orci. Sed eleifend dapibus auctor. Pellentesque dignissim massa
sed mi facilisis malesuada eget ut leo. Proin mollis ac est malesuada hendrerit. Fusce pulvinar sagittis ultrices.
Cras fermentum sodales lectus quis iaculis.
</p>
<p>
Sed molestie sit amet eros vel placerat. Aliquam feugiat sagittis nisl eget pretium. Quisque ut ligula sit amet
quam tempus commodo. Phasellus vulputate quis turpis et faucibus. Morbi neque est, blandit mollis enim at,
lacinia egestas risus. Cras gravida interdum est vitae ultricies. Fusce consequat neque molestie, molestie elit
et, tincidunt massa.
</p>
</main>
</body>
</html>

View File

@ -0,0 +1,131 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Hamburger Menu Demo</title>
<style>
body {
font-family: sans-serif;
}
main {
position: relative;
max-width: 980px;
margin: 0 auto;
}
main .sidebar-toggle {
position: absolute;
right: calc(100% + 1rem);
}
#sidebar-visible {
display: none;
}
.sidebar-toggle {
cursor: pointer;
}
#sidebar-container {
visibility: hidden;
display: flex;
flex-direction: row;
position: fixed;
top: 0;
bottom: 0;
left: 0;
right: 0;
z-index: 100;
background-color: transparent;
transition:
visibility 0.35s ease-in-out,
background-color 0.35s ease-in-out;
}
#sidebar-visible:checked + #sidebar-container {
visibility: visible;
background-color: rgba(0, 0, 0, 0.3);
}
#sidebar-content {
background-color: #eee;
width: 25%;
position: relative;
left: -100%;
padding: 1rem;
transition: left 0.35s ease-in-out;
}
#sidebar-visible:checked + #sidebar-container > #sidebar-content {
left: 0;
}
#sidebar-content .sidebar-toggle {
position: absolute;
right: 1rem;
top: 1rem;
}
#sidebar-dismiss {
flex-grow: 1;
}
@media (max-width: 980px) {
#sidebar-content {
width: 40%;
}
main .sidebar-toggle {
position: unset;
}
}
@media (max-width: 480px) {
#sidebar-content {
width: 100%;
}
}
</style>
</head>
<body>
<input type="checkbox" id="sidebar-visible">
<div id="sidebar-container">
<div id="sidebar-content">
<label for="sidebar-visible" class="sidebar-toggle">Close</label>
<p>Some sidebar content</p>
</div>
<label for="sidebar-visible" id="sidebar-dismiss"></label>
</div>
<main>
<label for="sidebar-visible" class="sidebar-toggle">Open Sidebar</label>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut at ipsum commodo, hendrerit sapien sit amet, sodales
neque. Aenean imperdiet consequat nulla, quis fringilla quam eleifend vulputate. Duis at felis leo. Etiam at
ligula massa. Nunc ut faucibus velit, sit amet vestibulum lectus. Aliquam sit amet est vitae risus interdum
laoreet. Etiam non commodo libero, id pellentesque sapien. Vivamus in augue tempor, luctus ligula et, dignissim
ex. Pellentesque eleifend, tellus sit amet rhoncus ornare, mi est hendrerit ex, ac blandit felis nisi nec ligula.
Ut at sem non diam iaculis dignissim non eu purus. Etiam placerat sapien ut sagittis porttitor.
Pellentesque mollis nulla magna, at dapibus enim tristique nec. Nam felis augue, suscipit ut dignissim elementum,
lacinia sed turpis. Vivamus justo sem, cursus nec lectus non, consectetur sodales nisi.
</p>
<p>
Quisque id purus in urna tempor vulputate. Fusce a turpis semper odio sollicitudin lobortis. Pellentesque ac
placerat ex, in tincidunt augue. Etiam mattis enim vel tempus porta. Donec maximus, massa at iaculis dapibus,
mauris eros volutpat enim, sed ullamcorper est sem ac mauris. Mauris blandit purus a auctor feugiat. Duis
scelerisque orci at nibh fermentum, ut lobortis libero vestibulum. Vivamus sed mi nec nibh condimentum dapibus.
Sed sed vulputate augue. Maecenas ut lobortis orci. Sed eleifend dapibus auctor. Pellentesque dignissim massa
sed mi facilisis malesuada eget ut leo. Proin mollis ac est malesuada hendrerit. Fusce pulvinar sagittis ultrices.
Cras fermentum sodales lectus quis iaculis.
</p>
<p>
Sed molestie sit amet eros vel placerat. Aliquam feugiat sagittis nisl eget pretium. Quisque ut ligula sit amet
quam tempus commodo. Phasellus vulputate quis turpis et faucibus. Morbi neque est, blandit mollis enim at,
lacinia egestas risus. Cras gravida interdum est vitae ultricies. Fusce consequat neque molestie, molestie elit
et, tincidunt massa.
</p>
</main>
</body>
</html>

View File

@ -0,0 +1,77 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Hamburger Menu Demo</title>
<style>
body {
font-family: sans-serif;
}
main {
position: relative;
max-width: 980px;
margin: 0 auto;
}
#sidebar-visible {
display: none;
}
.sidebar-toggle {
cursor: pointer;
}
#sidebar-content {
display: none;
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
z-index: 100;
}
#sidebar-visible:checked + #sidebar-content {
display: block;
}
</style>
</head>
<body>
<input type="checkbox" id="sidebar-visible">
<div id="sidebar-content">
<p>Some sidebar content</p>
<label for="sidebar-visible" class="sidebar-toggle">Close</label>
</div>
<main>
<label for="sidebar-visible" class="sidebar-toggle">Open Sidebar</label>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut at ipsum commodo, hendrerit sapien sit amet, sodales
neque. Aenean imperdiet consequat nulla, quis fringilla quam eleifend vulputate. Duis at felis leo. Etiam at
ligula massa. Nunc ut faucibus velit, sit amet vestibulum lectus. Aliquam sit amet est vitae risus interdum
laoreet. Etiam non commodo libero, id pellentesque sapien. Vivamus in augue tempor, luctus ligula et, dignissim
ex. Pellentesque eleifend, tellus sit amet rhoncus ornare, mi est hendrerit ex, ac blandit felis nisi nec ligula.
Ut at sem non diam iaculis dignissim non eu purus. Etiam placerat sapien ut sagittis porttitor.
Pellentesque mollis nulla magna, at dapibus enim tristique nec. Nam felis augue, suscipit ut dignissim elementum,
lacinia sed turpis. Vivamus justo sem, cursus nec lectus non, consectetur sodales nisi.
</p>
<p>
Quisque id purus in urna tempor vulputate. Fusce a turpis semper odio sollicitudin lobortis. Pellentesque ac
placerat ex, in tincidunt augue. Etiam mattis enim vel tempus porta. Donec maximus, massa at iaculis dapibus,
mauris eros volutpat enim, sed ullamcorper est sem ac mauris. Mauris blandit purus a auctor feugiat. Duis
scelerisque orci at nibh fermentum, ut lobortis libero vestibulum. Vivamus sed mi nec nibh condimentum dapibus.
Sed sed vulputate augue. Maecenas ut lobortis orci. Sed eleifend dapibus auctor. Pellentesque dignissim massa
sed mi facilisis malesuada eget ut leo. Proin mollis ac est malesuada hendrerit. Fusce pulvinar sagittis ultrices.
Cras fermentum sodales lectus quis iaculis.
</p>
<p>
Sed molestie sit amet eros vel placerat. Aliquam feugiat sagittis nisl eget pretium. Quisque ut ligula sit amet
quam tempus commodo. Phasellus vulputate quis turpis et faucibus. Morbi neque est, blandit mollis enim at,
lacinia egestas risus. Cras gravida interdum est vitae ultricies. Fusce consequat neque molestie, molestie elit
et, tincidunt massa.
</p>
</main>
</body>
</html>

View File

@ -0,0 +1,103 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Hamburger Menu Demo</title>
<style>
body {
font-family: sans-serif;
}
main {
position: relative;
max-width: 980px;
margin: 0 auto;
}
#sidebar-visible {
display: none;
}
.sidebar-toggle {
cursor: pointer;
}
#sidebar-container {
visibility: hidden;
display: flex;
flex-direction: row;
position: fixed;
top: 0;
bottom: 0;
left: 0;
right: 0;
z-index: 100;
background-color: transparent;
transition:
visibility 0.35s ease-in-out,
background-color 0.35s ease-in-out;
}
#sidebar-visible:checked + #sidebar-container {
visibility: visible;
background-color: rgba(0, 0, 0, 0.3);
}
#sidebar-content {
background-color: #eee;
width: 25%;
position: relative;
left: -100%;
transition: left 0.35s ease-in-out;
}
#sidebar-visible:checked + #sidebar-container > #sidebar-content {
left: 0;
}
#sidebar-dismiss {
flex-grow: 1;
}
</style>
</head>
<body>
<input type="checkbox" id="sidebar-visible">
<div id="sidebar-container">
<div id="sidebar-content">
<label for="sidebar-visible" class="sidebar-toggle">Close</label>
<p>Some sidebar content</p>
</div>
<label for="sidebar-visible" id="sidebar-dismiss"></label>
</div>
<main>
<label for="sidebar-visible" class="sidebar-toggle">Open Sidebar</label>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut at ipsum commodo, hendrerit sapien sit amet, sodales
neque. Aenean imperdiet consequat nulla, quis fringilla quam eleifend vulputate. Duis at felis leo. Etiam at
ligula massa. Nunc ut faucibus velit, sit amet vestibulum lectus. Aliquam sit amet est vitae risus interdum
laoreet. Etiam non commodo libero, id pellentesque sapien. Vivamus in augue tempor, luctus ligula et, dignissim
ex. Pellentesque eleifend, tellus sit amet rhoncus ornare, mi est hendrerit ex, ac blandit felis nisi nec ligula.
Ut at sem non diam iaculis dignissim non eu purus. Etiam placerat sapien ut sagittis porttitor.
Pellentesque mollis nulla magna, at dapibus enim tristique nec. Nam felis augue, suscipit ut dignissim elementum,
lacinia sed turpis. Vivamus justo sem, cursus nec lectus non, consectetur sodales nisi.
</p>
<p>
Quisque id purus in urna tempor vulputate. Fusce a turpis semper odio sollicitudin lobortis. Pellentesque ac
placerat ex, in tincidunt augue. Etiam mattis enim vel tempus porta. Donec maximus, massa at iaculis dapibus,
mauris eros volutpat enim, sed ullamcorper est sem ac mauris. Mauris blandit purus a auctor feugiat. Duis
scelerisque orci at nibh fermentum, ut lobortis libero vestibulum. Vivamus sed mi nec nibh condimentum dapibus.
Sed sed vulputate augue. Maecenas ut lobortis orci. Sed eleifend dapibus auctor. Pellentesque dignissim massa
sed mi facilisis malesuada eget ut leo. Proin mollis ac est malesuada hendrerit. Fusce pulvinar sagittis ultrices.
Cras fermentum sodales lectus quis iaculis.
</p>
<p>
Sed molestie sit amet eros vel placerat. Aliquam feugiat sagittis nisl eget pretium. Quisque ut ligula sit amet
quam tempus commodo. Phasellus vulputate quis turpis et faucibus. Morbi neque est, blandit mollis enim at,
lacinia egestas risus. Cras gravida interdum est vitae ultricies. Fusce consequat neque molestie, molestie elit
et, tincidunt massa.
</p>
</main>
</body>
</html>

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 132 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 223 KiB

Some files were not shown because too many files have changed in this diff Show More