merging main ffs

drafts
Nick Dumas 5 months ago
commit 1c30172910

@ -9,7 +9,7 @@ steps:
- name: hugo - name: hugo
image: code.ndumas.com/ndumas/hugo image: code.ndumas.com/ndumas/hugo
commands: commands:
- hugo -d /drone/src/public/ --baseURL https://blog.ndumas.com - hugo --enableGitInfo -d /drone/src/public/ --baseURL https://blog.ndumas.com
- name: publish - name: publish
depends_on: depends_on:
- hugo - hugo
@ -24,8 +24,10 @@ steps:
source: /drone/src/public/ source: /drone/src/public/
target: /var/www/blog.ndumas.com/ target: /var/www/blog.ndumas.com/
include: ["*"] include: ["*"]
branches: when:
- main branch:
include:
- main
--- ---
kind: pipeline kind: pipeline
name: dev name: dev
@ -38,7 +40,7 @@ steps:
- name: hugo - name: hugo
image: code.ndumas.com/ndumas/hugo image: code.ndumas.com/ndumas/hugo
commands: commands:
- hugo -d /drone/src/public/ --baseURL https://dev.blog.ndumas.com - hugo --enableGitInfo -d /drone/src/public/ --baseURL https://dev.blog.ndumas.com
- name: publish - name: publish
depends_on: depends_on:
- hugo - hugo
@ -53,8 +55,10 @@ steps:
source: /drone/src/public/ source: /drone/src/public/
target: /var/www/dev.blog.ndumas.com/ target: /var/www/dev.blog.ndumas.com/
include: ["*"] include: ["*"]
branches: when:
- dev branch:
include:
- dev
--- ---
kind: pipeline kind: pipeline
name: drafts name: drafts
@ -67,7 +71,7 @@ steps:
- name: hugo - name: hugo
image: code.ndumas.com/ndumas/hugo image: code.ndumas.com/ndumas/hugo
commands: commands:
- hugo -DF -d /drone/src/public/ --baseURL https://drafts.blog.ndumas.com - hugo --enableGitInfo -DF -d /drone/src/public/ --baseURL https://drafts.blog.ndumas.com
- name: publish - name: publish
depends_on: depends_on:
- hugo - hugo
@ -82,5 +86,7 @@ steps:
source: /drone/src/public/ source: /drone/src/public/
target: /var/www/drafts.blog.ndumas.com/ target: /var/www/drafts.blog.ndumas.com/
include: ["*"] include: ["*"]
branches: when:
- dev branch:
include:
- main

8
.gitattributes vendored

@ -0,0 +1,8 @@
content/posts/genesis-roadmap/SoilTexture_USDA.png filter=lfs diff=lfs merge=lfs -text
content/posts/mapping-aardwolf-with-graphviz/250-rooms.png filter=lfs diff=lfs merge=lfs -text
content/posts/series-and-navigation/prev-next-links-example.png filter=lfs diff=lfs merge=lfs -text
content/posts/series-and-navigation/series-insert-example.png filter=lfs diff=lfs merge=lfs -text
content/posts/adding-content-to-taxonomy-terms/series-display-screenshot.png filter=lfs diff=lfs merge=lfs -text
content/posts/gardening-with-quartz/drone-builds-screenshot.png filter=lfs diff=lfs merge=lfs -text
content/posts/mapping-aardwolf-with-graphviz/250-rooms.svg filter=lfs diff=lfs merge=lfs -text
content/about/resume.pdf filter=lfs diff=lfs merge=lfs -text

1
.gitignore vendored

@ -2,3 +2,4 @@
public/ public/
*.lock *.lock
*/_gen/* */_gen/*
.obsidian/

@ -0,0 +1,2 @@
FROM golang
RUN CGO_ENABLED=1 go install -tags extended github.com/gohugoio/hugo@latest

@ -0,0 +1,55 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 682.66669 682.66669"
height="682.66669"
width="682.66669"
xml:space="preserve"
id="svg2"
version="1.1"><metadata
id="metadata8"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs
id="defs6"><clipPath
id="clipPath20"
clipPathUnits="userSpaceOnUse"><path
style="clip-rule:evenodd"
id="path18"
d="M 2560,0 C 3973.85,0 5120,1146.15 5120,2560 5120,3973.85 3973.85,5120 2560,5120 1146.15,5120 0,3973.85 0,2560 0,1146.15 1146.15,0 2560,0 Z" /></clipPath><clipPath
id="clipPath26"
clipPathUnits="userSpaceOnUse"><path
id="path24"
d="M 0,0 H 5120 V 5120 H 0 Z" /></clipPath><clipPath
id="clipPath36"
clipPathUnits="userSpaceOnUse"><path
style="clip-rule:evenodd"
id="path34"
d="M 3057.78,1754.95 H 1189.97 l 872.23,1610.1 h 1867.83 l -872.25,-1610.1" /></clipPath><clipPath
id="clipPath42"
clipPathUnits="userSpaceOnUse"><path
id="path40"
d="M 0,0 H 5120 V 5120 H 0 Z" /></clipPath></defs><g
transform="matrix(1.3333333,0,0,-1.3333333,0,682.66667)"
id="g10"><g
transform="scale(0.1)"
id="g12"><g
id="g14"><g
clip-path="url(#clipPath20)"
id="g16"><g
clip-path="url(#clipPath26)"
id="g22"><path
id="path28"
style="fill:none;stroke:#000000;stroke-width:320;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
d="M 2560,0 C 3973.85,0 5120,1146.15 5120,2560 5120,3973.85 3973.85,5120 2560,5120 1146.15,5120 0,3973.85 0,2560 0,1146.15 1146.15,0 2560,0 Z" /></g></g></g><g
id="g30"><g
clip-path="url(#clipPath36)"
id="g32"><g
clip-path="url(#clipPath42)"
id="g38"><path
id="path44"
style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none"
d="m 1139.97,1704.95 h 2840.06 v 1710.1 H 1139.97 Z" /></g></g></g></g></g></svg>

After

Width:  |  Height:  |  Size: 2.4 KiB

@ -0,0 +1,55 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 682.66669 682.66669"
height="682.66669"
width="682.66669"
xml:space="preserve"
id="svg2"
version="1.1"><metadata
id="metadata8"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs
id="defs6"><clipPath
id="clipPath20"
clipPathUnits="userSpaceOnUse"><path
style="clip-rule:evenodd"
id="path18"
d="M 2560,0 C 3973.85,0 5120,1146.15 5120,2560 5120,3973.85 3973.85,5120 2560,5120 1146.15,5120 0,3973.85 0,2560 0,1146.15 1146.15,0 2560,0 Z" /></clipPath><clipPath
id="clipPath26"
clipPathUnits="userSpaceOnUse"><path
id="path24"
d="M 0,0 H 5120 V 5120 H 0 Z" /></clipPath><clipPath
id="clipPath36"
clipPathUnits="userSpaceOnUse"><path
style="clip-rule:evenodd"
id="path34"
d="M 3057.78,1754.95 H 1189.97 l 872.23,1610.1 h 1867.83 l -872.25,-1610.1" /></clipPath><clipPath
id="clipPath42"
clipPathUnits="userSpaceOnUse"><path
id="path40"
d="M 0,0 H 5120 V 5120 H 0 Z" /></clipPath></defs><g
transform="matrix(1.3333333,0,0,-1.3333333,0,682.66667)"
id="g10"><g
transform="scale(0.1)"
id="g12"><g
id="g14"><g
clip-path="url(#clipPath20)"
id="g16"><g
clip-path="url(#clipPath26)"
id="g22"><path
id="path28"
style="fill:none;stroke:#979797;stroke-width:320;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
d="M 2560,0 C 3973.85,0 5120,1146.15 5120,2560 5120,3973.85 3973.85,5120 2560,5120 1146.15,5120 0,3973.85 0,2560 0,1146.15 1146.15,0 2560,0 Z" /></g></g></g><g
id="g30"><g
clip-path="url(#clipPath36)"
id="g32"><g
clip-path="url(#clipPath42)"
id="g38"><path
id="path44"
style="fill:#a5a5a5;fill-opacity:1;fill-rule:nonzero;stroke:none"
d="m 1139.97,1704.95 h 2840.06 v 1710.1 H 1139.97 Z" /></g></g></g></g></g></svg>

After

Width:  |  Height:  |  Size: 2.4 KiB

@ -0,0 +1,55 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 682.66669 682.66669"
height="682.66669"
width="682.66669"
xml:space="preserve"
id="svg2"
version="1.1"><metadata
id="metadata8"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs
id="defs6"><clipPath
id="clipPath20"
clipPathUnits="userSpaceOnUse"><path
style="clip-rule:evenodd"
id="path18"
d="M 2560,0 C 3973.85,0 5120,1146.15 5120,2560 5120,3973.85 3973.85,5120 2560,5120 1146.15,5120 0,3973.85 0,2560 0,1146.15 1146.15,0 2560,0 Z" /></clipPath><clipPath
id="clipPath26"
clipPathUnits="userSpaceOnUse"><path
id="path24"
d="M 0,0 H 5120 V 5120 H 0 Z" /></clipPath><clipPath
id="clipPath36"
clipPathUnits="userSpaceOnUse"><path
style="clip-rule:evenodd"
id="path34"
d="M 3057.78,1754.95 H 1189.97 l 872.23,1610.1 h 1867.83 l -872.25,-1610.1" /></clipPath><clipPath
id="clipPath42"
clipPathUnits="userSpaceOnUse"><path
id="path40"
d="M 0,0 H 5120 V 5120 H 0 Z" /></clipPath></defs><g
transform="matrix(1.3333333,0,0,-1.3333333,0,682.66667)"
id="g10"><g
transform="scale(0.1)"
id="g12"><g
id="g14"><g
clip-path="url(#clipPath20)"
id="g16"><g
clip-path="url(#clipPath26)"
id="g22"><path
id="path28"
style="fill:none;stroke:#ffffff;stroke-width:320;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
d="M 2560,0 C 3973.85,0 5120,1146.15 5120,2560 5120,3973.85 3973.85,5120 2560,5120 1146.15,5120 0,3973.85 0,2560 0,1146.15 1146.15,0 2560,0 Z" /></g></g></g><g
id="g30"><g
clip-path="url(#clipPath36)"
id="g32"><g
clip-path="url(#clipPath42)"
id="g38"><path
id="path44"
style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
d="m 1139.97,1704.95 h 2840.06 v 1710.1 H 1139.97 Z" /></g></g></g></g></g></svg>

After

Width:  |  Height:  |  Size: 2.4 KiB

@ -3,7 +3,7 @@
# https://blowfish.page/docs/getting-started/ # https://blowfish.page/docs/getting-started/
# theme = "blowfish" # UNCOMMENT THIS LINE # theme = "blowfish" # UNCOMMENT THIS LINE
# baseURL = "https://your_domain.com/" baseURL = "https://blog.ndumas.com/"
defaultContentLanguage = "en" defaultContentLanguage = "en"
# pluralizeListTitles = "true" # hugo function useful for non-english languages, find out more in https://gohugo.io/getting-started/configuration/#pluralizelisttitles # pluralizeListTitles = "true" # hugo function useful for non-english languages, find out more in https://gohugo.io/getting-started/configuration/#pluralizelisttitles
@ -65,3 +65,6 @@ enableEmoji = true
name = 'fragmentrefs' name = 'fragmentrefs'
type = 'fragments' type = 'fragments'
weight = 10 weight = 10
[permalinks]
posts = "/:year/:month/:filename"

@ -1,7 +1,7 @@
languageCode = "en" languageCode = "en"
languageName = "English" languageName = "English"
weight = 1 weight = 1
title = "Blowfish" title = "Form and Function"
[params] [params]
displayName = "EN" displayName = "EN"
@ -11,14 +11,14 @@ title = "Blowfish"
# logo = "img/logo.png" # logo = "img/logo.png"
# secondaryLogo = "img/secondary-logo.png" # secondaryLogo = "img/secondary-logo.png"
# description = "My awesome website" # description = "My awesome website"
# copyright = "Copy, _right?_ :thinking_face:" copyright = "CC-BY-NC 2024 Nick Dumas"
# [author] [author]
# name = "Your name here" name = "Nick Dumas"
# image = "img/blowfish_logo.png" # image = "img/blowfish_logo.png"
# headline = "I'm only human" # headline = "I'm only human"
# bio = "A little bit about you" # bio = "A little bit about you"
# links = [ links = [
# { email = "mailto:hello@your_domain.com" }, # { email = "mailto:hello@your_domain.com" },
# { link = "https://link-to-some-website.com/" }, # { link = "https://link-to-some-website.com/" },
# { amazon = "https://www.amazon.com/hz/wishlist/ls/wishlist-id" }, # { amazon = "https://www.amazon.com/hz/wishlist/ls/wishlist-id" },
@ -32,7 +32,7 @@ title = "Blowfish"
# { facebook = "https://facebook.com/username" }, # { facebook = "https://facebook.com/username" },
# { flickr = "https://www.flickr.com/photos/username/" }, # { flickr = "https://www.flickr.com/photos/username/" },
# { foursquare = "https://foursquare.com/username" }, # { foursquare = "https://foursquare.com/username" },
# { github = "https://github.com/username" }, { github = "https://github.com/therealfakemoot" },
# { gitlab = "https://gitlab.com/username" }, # { gitlab = "https://gitlab.com/username" },
# { google = "https://www.google.com/" }, # { google = "https://www.google.com/" },
# { hashnode = "https://username.hashnode.dev" }, # { hashnode = "https://username.hashnode.dev" },
@ -41,8 +41,9 @@ title = "Blowfish"
# { keybase = "https://keybase.io/username" }, # { keybase = "https://keybase.io/username" },
# { kickstarter = "https://www.kickstarter.com/profile/username" }, # { kickstarter = "https://www.kickstarter.com/profile/username" },
# { lastfm = "https://lastfm.com/user/username" }, # { lastfm = "https://lastfm.com/user/username" },
# { linkedin = "https://linkedin.com/in/username" }, { linkedin = "https://www.linkedin.com/in/nicholas-dumas-92b58736/" },
# { mastodon = "https://mastodon.instance/@username" }, { mastodon = "https://fosstodon.org/@nickdumas" },
{ bandcamp_white_circle = "https://signalnoise.bandcamp.com" },
# { medium = "https://medium.com/username" }, # { medium = "https://medium.com/username" },
# { microsoft = "https://www.microsoft.com/" }, # { microsoft = "https://www.microsoft.com/" },
# { orcid = "https://orcid.org/userid" }, # { orcid = "https://orcid.org/userid" },
@ -66,4 +67,4 @@ title = "Blowfish"
# { youtube = "https://youtube.com/username" }, # { youtube = "https://youtube.com/username" },
# { ko-fi = "https://ko-fi.com/username" }, # { ko-fi = "https://ko-fi.com/username" },
# { codeberg = "https://codeberg.org/username"}, # { codeberg = "https://codeberg.org/username"},
# ] ]

@ -10,20 +10,27 @@
# overridden by providing a weight value. The menu will then be # overridden by providing a weight value. The menu will then be
# ordered by weight from lowest to highest. # ordered by weight from lowest to highest.
#[[main]] [[main]]
# name = "Blog" name = "Blog"
# pageRef = "posts" pre = "edit"
# weight = 10 pageRef = "posts"
weight = 10
[[main]]
name = "About Me"
pre = "circle-info"
pageRef = "/about"
weight = 10
#[[main]] #[[main]]
# name = "Parent" # name = "Parent"
# weight = 20 # weight = 20
#[[main]] [[main]]
# name = "example sub-menu 1" name = "Projects"
# parent = "Parent" parent = "About Me"
# pageRef = "posts" pageRef = "projects"
# weight = 20 weight = 20
#[[main]] #[[main]]
# name = "example sub-menu 2" # name = "example sub-menu 2"
@ -58,12 +65,23 @@
# the copyright notice. Configure as per the main menu above. # the copyright notice. Configure as per the main menu above.
# [[footer]] [[footer]]
# name = "Tags" name = "Tags"
# pageRef = "tags" pre = "tag"
# weight = 10 pageRef = "tags"
weight = 10
# [[footer]]
# name = "Categories" [[footer]]
# pageRef = "categories" pre = "eye"
# weight = 20 name = "Live"
url = "https://analytics.ndumas.com/blog.ndumas.com"
parent = "Analytics"
weight = 50
[[footer]]
pre = "eye"
name = "Dev"
url = "https://analytics.ndumas.com/dev.ndumas.com"
parent = "Analytics"
weight = 50

@ -1,3 +1,6 @@
[hugoVersion] [hugoVersion]
extended = false extended = false
min = "0.87.0" min = "0.87.0"
[[imports]]
path = "github.com/nunocoracao/blowfish/v2"

@ -5,12 +5,12 @@
# Refer to the theme docs for more details about each of these parameters. # Refer to the theme docs for more details about each of these parameters.
# https://blowfish.page/docs/configuration/#theme-parameters # https://blowfish.page/docs/configuration/#theme-parameters
colorScheme = "blowfish" colorScheme = "forest"
defaultAppearance = "light" # valid options: light or dark defaultAppearance = "light" # valid options: light or dark
autoSwitchAppearance = true autoSwitchAppearance = false
enableSearch = true enableSearch = true
enableCodeCopy = false enableCodeCopy = true
# mainSections = ["section1", "section2"] # mainSections = ["section1", "section2"]
# robots = "" # robots = ""
@ -22,7 +22,7 @@ disableTextInHeader = false
# defaultFeaturedImage = "IMAGE.jpg" # used as default for featured images in all articles # defaultFeaturedImage = "IMAGE.jpg" # used as default for featured images in all articles
# highlightCurrentMenuArea = true # highlightCurrentMenuArea = true
# smartTOC = true smartTOC = true
# smartTOCHideUnfocusedChildren = true # smartTOCHideUnfocusedChildren = true
[header] [header]
@ -38,9 +38,9 @@ disableTextInHeader = false
[homepage] [homepage]
layout = "profile" # valid options: page, profile, hero, card, background, custom layout = "profile" # valid options: page, profile, hero, card, background, custom
#homepageImage = "IMAGE.jpg" # used in: hero, and card #homepageImage = "IMAGE.jpg" # used in: hero, and card
showRecent = false showRecent = true
showRecentItems = 5 showRecentItems = 7
showMoreLink = false showMoreLink = true
showMoreLinkDest = "/posts" showMoreLinkDest = "/posts"
cardView = false cardView = false
cardViewScreenWidth = false cardViewScreenWidth = false
@ -51,14 +51,14 @@ disableTextInHeader = false
showViews = false showViews = false
showLikes = false showLikes = false
showDateOnlyInArticle = false showDateOnlyInArticle = false
showDateUpdated = false showDateUpdated = true
showAuthor = true showAuthor = true
# showAuthorBottom = false # showAuthorBottom = false
showHero = false showHero = false
# heroStyle = "basic" # valid options: basic, big, background, thumbAndBackground # heroStyle = "basic" # valid options: basic, big, background, thumbAndBackground
layoutBackgroundBlur = true # only used when heroStyle equals background or thumbAndBackground layoutBackgroundBlur = true # only used when heroStyle equals background or thumbAndBackground
layoutBackgroundHeaderSpace = true # only used when heroStyle equals background layoutBackgroundHeaderSpace = true # only used when heroStyle equals background
showBreadcrumbs = false showBreadcrumbs = true
showDraftLabel = true showDraftLabel = true
showEdit = false showEdit = false
# editURL = "https://github.com/username/repo/" # editURL = "https://github.com/username/repo/"
@ -68,22 +68,22 @@ disableTextInHeader = false
showPagination = true showPagination = true
invertPagination = false invertPagination = false
showReadingTime = true showReadingTime = true
showTableOfContents = false showTableOfContents = true
# showRelatedContent = false showRelatedContent = true
# relatedContentLimit = 3 relatedContentLimit = 3
showTaxonomies = false showTaxonomies = false
showAuthorsBadges = false showAuthorsBadges = false
showWordCount = true showWordCount = true
# sharingLinks = [ "linkedin", "twitter", "reddit", "pinterest", "facebook", "email", "whatsapp", "telegram"] # sharingLinks = [ "linkedin", "twitter", "reddit", "pinterest", "facebook", "email", "whatsapp", "telegram"]
showZenMode = false showZenMode = true
[list] [list]
showHero = false showHero = false
# heroStyle = "background" # valid options: basic, big, background, thumbAndBackground # heroStyle = "background" # valid options: basic, big, background, thumbAndBackground
layoutBackgroundBlur = true # only used when heroStyle equals background or thumbAndBackground layoutBackgroundBlur = true # only used when heroStyle equals background or thumbAndBackground
layoutBackgroundHeaderSpace = true # only used when heroStyle equals background layoutBackgroundHeaderSpace = true # only used when heroStyle equals background
showBreadcrumbs = false showBreadcrumbs = true
showSummary = false showSummary = true
showViews = false showViews = false
showLikes = false showLikes = false
showTableOfContents = false showTableOfContents = false
@ -101,7 +101,7 @@ disableTextInHeader = false
showTermCount = true showTermCount = true
showHero = false showHero = false
# heroStyle = "background" # valid options: basic, big, background, thumbAndBackground # heroStyle = "background" # valid options: basic, big, background, thumbAndBackground
showBreadcrumbs = false showBreadcrumbs = true
showViews = false showViews = false
showLikes = false showLikes = false
showTableOfContents = false showTableOfContents = false
@ -110,7 +110,7 @@ disableTextInHeader = false
[term] [term]
showHero = false showHero = false
# heroStyle = "background" # valid options: basic, big, background, thumbAndBackground # heroStyle = "background" # valid options: basic, big, background, thumbAndBackground
showBreadcrumbs = false showBreadcrumbs = true
showViews = false showViews = false
showLikes = false showLikes = false
showTableOfContents = true showTableOfContents = true

@ -0,0 +1,22 @@
---
title: "About Me"
summary: "A brief bio and my credentials"
showDate: false
showReadingTime: false
---
## Who am I?
Hi, my name's Nick, he/him, I'm a software developer, infrastructure engineer, and artist. I've been working with software for about 20 years now in languages like Javascript, PHP, Python and Go, and I've used those in service of a variety of disciplines like web development, generative art, and system administration.
I do a little drawing now and then but my big artistic focuses are music and tabletop gaming. I make synthesizer beats with [bespoke synth](https://www.bespokesynth.com/) and have a weekly game session where we try out new game systems and regularly rotate through who's running what.
## My Credentials
In no particular order I've worked on
- Python set-top box and browser frontends for OTT ( over the topology ) media delivery networks
- On-prem Kubernetes clusters ingesting tens of thousands of data broker events per hour consistnetly 24/7
- Building and maintaining forums in both Symfony and in-house PHP frameworks
- a bunch of Markov chain implementations, with a focus on mimicking specific styles on-demand
-
[Here](/about/resume.pdf)'s a copy of my resume that provides a timeline and some concrete details.

BIN
content/about/resume.pdf (Stored with Git LFS)

Binary file not shown.

@ -0,0 +1,125 @@
---
draft: false
title: "Using Taxonomy Terms in Hugo: Adding Content"
aliases: ["Using Taxonomy Terms in Hugo: Adding Content"]
series: ["blogging-with-quartz"]
series_order: 5
date: "2023-04-09"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: ""
showFullContent: false
tags:
- hugo
- quartz
- webdev
---
## What am I Doing?
As I've been writing, I've collected posts about specific projects into series through Hugo's [taxonomies](https://gohugo.io/content-management/taxonomies/). The example I've been working on is my [Blogging with Quartz](https://blog.ndumas.com/series/blogging-with-quartz/) series.
## Why does it work?
Taxonomies are great, you can even assign multiple taxonomies to a single page when that's relevant ( making a movie database or something, I suppose ?).
## Why doesn't it work?
The base implementation in Quartz is pretty bare though. The term listing lists the name and the members of the taxonomy, which is great but only goes so far. I'd love to be able to write an overview of each series and have the option of embedding media.
## What are the options?
Hugo's Taxonomies feature is prepared for this, but Quartz's templates were not.
There were two tricky things about this.
### Where does it go?
I chased a few weeks worth of red herrings here, so I'll cut to the chase.
Taxonomies contain terms. Taxonomies are content. This means my task was pretty simple:
```
content/
notes/
series/
blogging-with-quartz/
_index.md
```
Creating an `<taxonomyType>/<taxonomyItem>/_index.md` is the first step. Here's what I've got so far in `content/series/blogging-with-quartz/_index.md`:
```
+++
title = "Blogging with Quartz: a trigonal crystal in a round hole"
+++
this is where i talk about building my blog with quartz
```
The other half of this setup is modifying the term template at `layouts/_default/term.html`. I had to add the `{{ .Content}}` line ( that's really all it took ) so it would render out the body content of my `_index.md` as above.
```html
<!DOCTYPE html>
<html lang="{{ .Lang }}">
{{ partial "head.html" . }}
<body>
{{partial "search.html" .}}
<div class="singlePage">
<!-- Begin actual content -->
{{partial "header.html" .}}
<article>
<h1>{{ i18n "tag" }}: {{ .Title }}</h1>
{{ .Content }}
{{with .Params.summary}}
<p>{{.}}</p>
{{end}}
{{partial "page-list.html" .Paginator.Pages}}
{{ template "_internal/pagination.html" . }}
</article>
{{partial "contact.html" .}}
</div>
</body>
</html>
```
### Where does it come from?
There was a really confusing issue I had with the title section of the term page. The title was appearing correctly, even pulling from the `_index.md`, but it was being prefixed with `Tag: `.
It took me some digging to find the cause. I started with grepping for `Tag`, and found it in the `i18n/en.toml`. This told me that a template was calling `i18n`, and there we had it in `layouts/_default/term.html`
```html
<h1>{{ i18n "tag" }}: {{ .Title }}</h1>
```
In practice, I don't think
The final template looks like this.
```html
<!DOCTYPE html>
<html lang="{{ .Lang }}">
{{ partial "head.html" . }}
<body>
{{partial "search.html" .}}
<div class="singlePage">
<!-- Begin actual content -->
{{partial "header.html" .}}
<article>
<h1>{{ i18n "interactive_graph" }}: {{ .Title }}</h1>
{{ .Content }}
{{with .Params.summary}}
<p>{{.}}</p>
{{end}}
{{partial "page-list.html" .Paginator.Pages}}
{{ template "_internal/pagination.html" . }}
</article>
{{partial "contact.html" .}}
</div>
</body>
</html>
```
## Now what?
We've got a nice looking series page now:
{{< figure
src="series-display-screenshot.png"
alt="Screenshot of a website with four links to blog posts connected as a series"
caption=""
>}}
The next steps are to start filling out my series pages and writing about my projects. This actually clears out the outstanding list of projects I had for the blog, so I don't have any big structural stuff to do.

@ -0,0 +1,130 @@
---
draft: false
title: "Automating Caddy on my DigitalOcean Droplet"
date: "2023-01-05"
author: "Nick Dumas"
authorTwitter: "" #do not include @
cover: ""
tags: ["webdev", "devops"]
keywords: ["", ""]
summary: "Automation ambitions fall flat"
showFullContent: false
---
## Defining units of work
I've got a few different websites that I want to run: this blog, my portfolio, and my about page which acts as a hub for my other sites, my Bandcamp, and whatever else I end up wanting to show off.
To keep things maintainable and reproducible, I decided to stop attempting to create a monolithic all-in-one configuration file. This made it way harder to keep changes atomic; multiple iterations on my blog started impacting my prank websites, and it became harder and harder to return my sites to a working state.
## Proof of concept
The first test case was my blog because I knew the Hugo build was fine, I just needed to get Caddy serving again.
A static site is pretty straightforward with Caddy:
```
blog.ndumas.com {
encode gzip
fileserver
root * /var/www/blog.ndumas.com
}
```
And telling Caddy to load it:
```bash
curl "http://localhost:2019/load" \
-H "Content-Type: text/caddyfile" \
--data-binary @blog.ndumas.com.caddy
```
This all works perfectly, Caddy's more than happy to load this but it does warn that the file hasn't be formatted with `caddy fmt`:
```
[{"file":"Caddyfile","line":2,"message":"input is not formatted with 'caddy fmt'"}]
```
## The loop
Here's where things went sideways. Now that I have two unit files, I'm ready to work on the tooling that will dynamically load my config. For now, I'm just chunking it all out in `bash`. I've got no particular fondness for `bash`, but it's always a bit of a matter of pride to see whether or not I *can*.
```bash
# load-caddyfile
#! /bin/bash
function loadConf() {
curl localhost:2019/load \
-X POST \
-H "Content-Type: text/caddyfile" \
--data-binary @"$1"
}
loadConf "$1"
```
```bash
# load-caddyfiles
#! /bin/bash
source load-caddyfile
# sudo caddy stop
# sudo caddy start
for f in "$1/*.caddy"; do
echo -e "Loading $(basename $f)"
loadConf "$f"
echo
done
```
After implementing the loop my barelylegaltrout.biz site started throwing a 525 while blog.ndumas.com continued working perfectly. This was a real head scratcher, and I had to let the problem sit for a day before I came back to it.
After some boring troubleshooting legwork, I realized I misunderstood how the `/load` endpoint works. This endpoint completely replaces the current config with the provided payload. In order to do partial updates, I'd need to use the `PATCH` calls, and look who's back?
## can't escape JSON
The `PATCH` API *does* let you do partial updates, but it requires your payloads be JSON which does make sense. Because my current set of requirements explicitly excludes any JSON ( for now ), I'm going to have to ditch my dreams of modular code.
Not all my code has gone to waste, though. Now that I know `POST`ing to to `/load` overwrites the whole configuration, I don't need to worry about stopping/restarting the caddy process to get a clean slate. `load-caddyfile` will let me keep iterating as I make changes.
## Proxies
In addition to the static sites I'm running a few applications to make life a little easier. I'll showcase my Gitea/Gitlab and Asciinema configs. At the moment, my setup for these are really janky, I've got a `tmux` session on my droplet where I've manually invoked `docker-compse up`. I'll leave cleaning that up and making systemd units or something proper out of them for a future project.
Reverse proxying with Caddy is blessedly simple:
```
cast.ndumas.com {
encode gzip
reverse_proxy localhost:10083
}
```
```
code.ndumas.com {
encode gzip
reverse_proxy localhost:3069
}
```
With that, my gitea/gitlab is up and running along with my Asciinema instance is as well:
[![asciicast](https://cast.ndumas.com/a/28.svg)](https://cast.ndumas.com/a/28)
## Back to Square One
After finally making an honest attempt to learn how to work with Caddy 2 and its configurations and admin API, I want to take a swing at making a systemd unit file for Caddy to make this a proper setup.
## Finally
Here's what's currently up and running:
- [My blog](blog.ndumas.com)
- [Asciinema](blog.ndumas.com)
- [Gitea](blog.ndumas.com)
I've had loads of toy projects over the years ( stay tuned for butts.ndumas.com ) which may come back, but for now I'm hoping these are going to help me focus on creative, stimulating projects in the future.
The punchline is that I still haven't really automated Caddy; good thing you can count on `tmux`
[![asciicast](https://cast.ndumas.com/a/aWYCFj69CjOg94kgjbGg4n2Uk.svg)](https://cast.ndumas.com/a/aWYCFj69CjOg94kgjbGg4n2Uk)
The final code can be found [here](https://code.ndumas.com/ndumas/caddyfile). Nothing fancy, but troublesome enough that it's worth remembering the problems I had.

@ -0,0 +1,494 @@
---
draft: false
title: "Beautiful Builds with Bazel"
aliases: ["Beautiful Builds with Bazel"]
series: ["building-with-bazel"]
series_order: 1
date: "2023-08-25"
author: "Nick Dumas"
cover: ""
summary: "bzlmod makes bazel extremely appealing and isn't hard to grasp for anyone already familiar with go modules. My frustration with make for complex builds led me to bazel."
showFullContent: false
tags:
- bazel
- golang
- devops
---
## What am I Doing?
I write programs to solve problems. Most of the time these are pretty personal and only get used once or twice, never see the light of day again, and that's fine.
Lately, though, I've been working on tooling for my Obsidian notes and I want to make my tools as accessible as possible. This involves a couple steps that are particularly important, tedious, and error prone when done manually:
- trying to cross compile my binaries for a relatively long list of cpu/arch combinations
- build a docker image
- push that docker image to OCI image repositories
- run tests
- run benchmarks
- cache builds effectively
I've started with a Makefile I stole from some gist I didn't save a link to. This [makefile](https://code.ndumas.com/ndumas/wikilinks-parser/src/tag/v0.0.5/Makefile) is kinda hefty so I'm gonna focus on compiling go binaries and preparing OCI images that contain those binaries.
This makefile's extremely opinionated, hyper-targeted at Go builds. It assumes that your binaries live in `cmd/binaryName/`.
```Makefile
# Parameters
PKG = code.ndumas.com/ndumas/obsidian-markdown
NAME = parse-wikilinks
DOC = README.md LICENSE
DISTDIR ?= $(WD)/dist
CMDS := $(shell find "$(CMDDIR)/" -mindepth 1 -maxdepth 1 -type d | sed 's/ /\\ /g' | xargs -n1 basename)
INSTALL_TARGETS := $(addprefix install-,$(CMDS))
VERSION ?= $(shell git -C "$(MD)" describe --tags --dirty=-dev)
COMMIT_ID := $(shell git -C "$(MD)" rev-parse HEAD | head -c8)
LDFLAGS = -X $(PKG).Version=$(VERSION) -X $(PKG).Build=$(COMMIT_ID)
GOCMD = go
GOINSTALL = $(GOCMD) install -a -tags "$(BUILD_TAGS)" -ldflags "$(LDFLAGS)"
GOBUILD = gox -osarch="!darwin/386" -rebuild -gocmd="$(GOCMD)" -arch="$(ARCHES)" -os="$(OSES)" -output="$(OUTTPL)" -tags "$(BUILD_TAGS)" -ldflags "$(LDFLAGS)"
GZCMD = tar -czf
SHACMD = sha256sum
ZIPCMD = zip
build: $(CMDS)
$(CMDS): setup-dirs dep
$(GOBUILD) "$(CMDPKG)/$@" | tee "$(RPTDIR)/build-$@.out"
install: $(INSTALL_TARGETS)
$(INSTALL_TARGETS):
$(GOINSTALL) "$(CMDPKG)/$(subst install-,,$@)"
dist: clean build
for docfile in $(DOC); do \
for dir in "$(DISTDIR)"/*; do \
cp "$(PKGDIR)/$$docfile" "$$dir/"; \
done; \
done
cd "$(DISTDIR)"; for dir in ./*linux*; do $(GZCMD) "$(basename "$$dir").tar.gz" "$$dir"; done
cd "$(DISTDIR)"; for dir in ./*windows*; do $(ZIPCMD) "$(basename "$$dir").zip" "$$dir"; done
cd "$(DISTDIR)"; for dir in ./*darwin*; do $(GZCMD) "$(basename "$$dir").tar.gz" "$$dir"; done
cd "$(DISTDIR)"; find . -maxdepth 1 -type f -printf "$(SHACMD) %P | tee \"./%P.sha\"\n" | sh
$(info "Built v$(VERSION), build $(COMMIT_ID)")
```
Because this isn't a makefile tutorial, I'm going to just hit the high notes and explain why this isn't working. Given the parameters at the top, it looks in `cmd/` for directories and passes them to `go build` with `-ldflags` thrown in.
Here we have the machinery behind `make bump`, github link below. `bump` is a tool that'll automatically create semantic versioning tags in a git repo based on existing tags. You can `bump {patch,minor,major}` and it'll create the next tag in the versioning sequence for you.
```Makefile
setup-bump:
go install github.com/guilhem/bump@latest
bump-major: setup-bump
bump major
bump-minor: setup-bump
bump minor
bump-patch: setup-bump
bump patch
```
## Why does it work?
Automation is a great thing. This makefile inspired me to start actually using semantic versioning diligently. It didn't hurt that I was working on a lot Drone pipelines at the time and was starting to get incredibly frustrated debugging `:latest` images and never being certain what code was running.
Working with bash is never...pleasant, but it definitely gets the job done. I'm no stranger to shell scripts and the minor modifications needed to get `bump` integrated and other miscellany I've legitimately fully forgotten by now ( document your code for your own sake ) posed no real burden.
This makefile helped me rapidly iterate on code and release it in a way that was easily consumable, including docker images pushed to my self-hosted registry on Gitea. The pipeline that handles this blog post is using a docker image tagged by the makefile components described above, in fact.
## Why doesn't it work?
The real kink in the hose ended up being gox. Gox worked great until I tried to generate alpine builds. It was possible, but I'd have to start changing the makefile pretty significantly, write bash helper functions, and more. I decided that wasn't worth the maintenance overhead pretty quickly and started looking in
it's not "smart". The solutions for cross-compilation ended up being clunky to compose with Docker builds
## What are the options?
The only real solution is a smarter build system. I had to choose between hand-rolling something with a bunch of switch statements in bash, or I could look into more modern toolkits. I looked into three:
- meson
- bazel
- scons
## The contenders
Bazel looked like it had the most to offer:
- hermetic builds
- reproducible builds
- aggressive, fine-grained caching
- extensible
All of these fit the bill for what I needed. In particular, it has pretty decent go support through [rules_go](https://github.com/bazelbuild/rules_go) and [gazelle](https://github.com/bazelbuild/bazel-gazelle), which we'll look at in more depth later.
There's not a lot to say here, I knew nothing about any of the three candidates and when I started I wasn't certain I'd stick with bazel all the way. Sometimes you just have to try stuff and see how it feels.
### Caution
bazel seems to be going through an ecosystem shift from the WORKSPACE paradigm to bzlmod. Documentation does exist, but it might not be in the README yet. I've tested the code here and it works in this narrow case. Caveat emptor.
## Getting Going with Gazelle
With that, here is how a modern bzlmod enabled go repo is born.
### Building Go code
The first step is, in no particular order, init your git repository and init your go module. The former is helpful for keeping track of when you broke something and the latter is required for gazelle to do its job.
- `go mod init`
- `git init`
Write your go code. The simplest hello world will work for demonstration purposes.
Create your `MODULE.bazel` file.
``` {title="MODULE.bazel"}
module(
name = "obsidian-markdown", # set this manually
repo_name = "code.ndumas.com_ndumas_obsidian-markdown", # this is the name of your go module, with /'s replaces with _'s
)
bazel_dep(name = "gazelle", version = "0.32.0")
bazel_dep(name = "rules_go", version = "0.41.0")
go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps")
go_deps.from_file(go_mod = "//:go.mod")
```
`module()` is how you declare a top-level bazel project. Everything is namedspaced under this module.
`bazel_dep` tells bazel to retrieve modules from the [bazel registry](https://registry.bazel.build/).
`use_extension` imports functions from bazel modules; here we're importing `go_deps` because it'll read out `go.mod` file and help bazel automatically calculate direct and transitive dependencies.
and `BUILD.bazel`
``` {title="BUILD.bazel"}
load("@gazelle//:def.bzl", "gazelle")
gazelle(name = "gazelle")
gazelle(
name = "gazelle-update-repos",
args = [
"-from_file=go.mod",
"-to_macro=deps.bzl%go_dependencies",
"-prune",
],
command = "update-repos",
)
```
This is straight from the gazelle README. You `load()` the gazelle module and declare two build targets: `gazelle` and `gazelle-update-repos`. After the rest of the setup, these targets are what will do the work of actually generating build/test targets for all your code.
Next, `.bazelrc`
``` {title=".bazelrc"}
common --experimental_enable_bzlmod
# Disable lockfiles until it works properly.
# https://github.com/bazelbuild/bazel/issues/19068
common --lockfile_mode=off
###############################
# Directory structure #
###############################
# Artifacts are typically placed in a directory called "dist"
# Be aware that this setup will still create a bazel-out symlink in
# your project directory, which you must exclude from version control and your
# editor's search path.
build --symlink_prefix=dist/
###############################
# Output #
###############################
# A more useful default output mode for bazel query, which
# prints "ng_module rule //foo:bar" instead of just "//foo:bar".
query --output=label_kind
# By default, failing tests don't print any output, it's logged to a
# file instead.
test --test_output=errors
```
Only the first line is required; the rest are just conveniences. I do **strongly** recommend the `query` setting though, extremely nice for debugging.
Finally, a `.gitignore` to mask out generated artifacts.
``` {title=".gitignore"}
dist/*
reports/*
bazel-*
*.bazel.lock
```
Run `bazel build //:gazelle`. This will auto-generate a lot of scaffolding, and probably emit a `buildozer` command that will modify something. This is the build system (specifically gazelle ) automatically detecting dependencies that are declared in `go.mod` but not in your bazel code.
```bash
$ bazel run //:gazelle
WARNING: /home/ndumas/work/gomud/MODULE.bazel:8:24: The module extension go_deps defined in @gazelle//:extensions.bzl reported incorrect imports of repositorie
s via use_repo():
Not imported, but reported as direct dependencies by the extension (may cause the build to fail):
com_github_therealfakemoot_go_telnet
** You can use the following buildozer command(s) to fix these issues:
buildozer 'use_repo_add @gazelle//:extensions.bzl go_deps com_github_therealfakemoot_go_telnet' //MODULE.bazel:all
INFO: Analyzed target //:gazelle (0 packages loaded, 0 targets configured).
INFO: Found 1 target...
Target //:gazelle up-to-date:
dist/bin/gazelle-runner.bash
dist/bin/gazelle
INFO: Elapsed time: 0.473s, Critical Path: 0.01s
INFO: 1 process: 1 internal.
INFO: Build completed successfully, 1 total action
INFO: Running command line: dist/bin/gazelle
$ git st
## dev
?? .bazelrc
?? .gitignore
?? BUILD
?? MODULE.bazel
?? cmd/BUILD.bazel
?? protocol/BUILD.bazel
$
```
Running the `buildozer` command and then `git diff` shows its work:
```bash
$ buildozer 'use_repo_add @gazelle//:extensions.bzl go_deps com_github_therealfakemoot_go_telnet' //MODULE.bazel:all
fixed /home/ndumas/work/gomud/MODULE.bazel
$ git st
## dev
M MODULE.bazel
$ git diff
diff --git a/MODULE.bazel b/MODULE.bazel
index b482f31..8e82690 100644
--- a/MODULE.bazel
+++ b/MODULE.bazel
@@ -7,3 +7,4 @@ bazel_dep(name = "gazelle", version = "0.32.0")
go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps")
go_deps.from_file(go_mod = "//:go.mod")
+use_repo(go_deps, "com_github_therealfakemoot_go_telnet")
$
```
This diff shows how bazel references external dependencies. gazelle's `go_deps` tool acts as a provider for these lookups and offers information bazel needs to verify its build graphs. Yours may look different depending on what you've imported, if anything.
Examining the produced `BUILD.bazel` file should yield something like this for a `main` package.
``` {title="cmd/echo/BUILD.bazel"}
load("@rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "echo_lib",
srcs = ["server.go"],
importpath = "code.ndumas.com/ndumas/gomud/cmd/echo",
visibility = ["//visibility:private"],
)
go_binary(
name = "echo",
embed = [":echo_lib"],
visibility = ["//visibility:public"],
)
```
If the package is importable, you'll see something like this:
``` {ttile="protocol/BUILD.bazel"}
load("@rules_go//go:def.bzl", "go_library")
go_library(
name = "protocol",
srcs = ["telnet.go"],
importpath = "code.ndumas.com/ndumas/gomud/protocol",
visibility = ["//visibility:public"],
)
```
These are examples of `rules_go` build targets. These do a bunch of magic to invoke Go toolchains and in theory let bazel cache builds at a pretty granular level. I'm hoping this is true, I've got a few pipelines that are starting to run way longer than I like.
### OCI Images
For ease of use, I like to build docker images containing my packages. This is particularly important for Drone pipelines.
We're gonna amend our `MODULE.bazel` to add some new tools.
``` {title="MODULE.bazel"}
bazel_dep(name = "rules_oci", version = "1.3.1") # gives us ways to interact with OCI images and repositories
bazel_dep(name = "rules_pkg", version = "0.9.1") # exposes a way to tar our app, which is necessary for packing with rules_oci
oci = use_extension("@rules_oci//oci:extensions.bzl", "oci")
oci.pull(
name = "distroless_base",
image = "gcr.io/distroless/base",
tag = "latest", # This is temporary. For reproducible builds, you'll want to use digest hashes.
)
use_repo(oci, "distroless_base")
```
`pull()` does more or less what it says: it creates a target that represents an OCI image pulled from a registry, and another `use_repo()` call tells bazel that we're *using* our image.
And add this to the `BUILD.bazel` file for the binary you want built into an OCI image
``` {title="cmd/echo/BUILD.bazel"}
load("@rules_pkg//:pkg.bzl", "pkg_tar")
pkg_tar(
name = "tar",
srcs = [":echo"],
)
load("@rules_oci//oci:defs.bzl", "oci_image")
oci_image(
name = "image",
base = "@distroless_base",
entrypoint = ["/echo"],
tars = [":tar"],
)
```
`oci_image` requires that whatever you package into the image it creates be contained in a tar file, which seems pretty reasonable. `rules_pkg` handles that for us.
Run `bazel build //cmd/echo:image` and you'll see another `buildozer` command and a lot of errors. This is to be expected, bazel wants builds to be reproducible and because we haven't specified a version or a hash it can't do that. It helpfully emits the `buildozer` command that'll set the proper digest hash and platforms bazel needs to resolve its builds.
```
bazel build //cmd/echo:image
WARNING: fetching from https://gcr.io/v2/distroless/base/manifests/latest without an integrity hash. The result will not be cached.
WARNING: for reproducible builds, a digest is recommended.
Either set 'reproducible = False' to silence this warning,
or run the following command to change oci.pull to use a digest:
(make sure you use a recent buildozer release with MODULE.bazel support)
buildozer 'set digest "sha256:73deaaf6a207c1a33850257ba74e0f196bc418636cada9943a03d7abea980d6d"' 'remove tag' 'remove platforms' 'add platforms "linux/amd64" "
linux/arm64" "linux/arm" "linux/s390x" "linux/ppc64le"' MODULE.bazel:distroless_base
WARNING: fetching from https://gcr.io/v2/distroless/base/manifests/latest without an integrity hash. The result will not be cached.
INFO: Repository rules_oci~1.3.1~oci~distroless_base_single instantiated at:
callstack not available
Repository rule oci_pull defined at:
/home/ndumas/.cache/bazel/_bazel_ndumas/482ba52ed14b5c036eb1d379e90911a8/external/rules_oci~1.3.1/oci/private/pull.bzl:437:27: in <toplevel>
ERROR: An error occurred during the fetch of repository 'rules_oci~1.3.1~oci~distroless_base_single':
Traceback (most recent call last):
File "/home/ndumas/.cache/bazel/_bazel_ndumas/482ba52ed14b5c036eb1d379e90911a8/external/rules_oci~1.3.1/oci/private/pull.bzl", line 373, column 17, in
_oci_pull_impl
fail("{}/{} is a multi-architecture image, so attribute 'platforms' is required.".format(rctx.attr.registry, rctx.attr.repository))
Error in fail: gcr.io/distroless/base is a multi-architecture image, so attribute 'platforms' is required.
ERROR: <builtin>: fetching oci_pull rule //:rules_oci~1.3.1~oci~distroless_base_single: Traceback (most recent call last):
File "/home/ndumas/.cache/bazel/_bazel_ndumas/482ba52ed14b5c036eb1d379e90911a8/external/rules_oci~1.3.1/oci/private/pull.bzl", line 373, column 17, in
_oci_pull_impl
fail("{}/{} is a multi-architecture image, so attribute 'platforms' is required.".format(rctx.attr.registry, rctx.attr.repository))
Error in fail: gcr.io/distroless/base is a multi-architecture image, so attribute 'platforms' is required.
ERROR: /home/ndumas/.cache/bazel/_bazel_ndumas/482ba52ed14b5c036eb1d379e90911a8/external/rules_oci~1.3.1~oci~distroless_base/BUILD.bazel:1:6: @rules_oci~1.3.1~
oci~distroless_base//:distroless_base depends on @rules_oci~1.3.1~oci~distroless_base_single//:distroless_base_single in repository @rules_oci~1.3.1~oci~distro
less_base_single which failed to fetch. no such package '@rules_oci~1.3.1~oci~distroless_base_single//': gcr.io/distroless/base is a multi-architecture image,
so attribute 'platforms' is required.
ERROR: Analysis of target '//cmd/echo:image' failed; build aborted:
INFO: Elapsed time: 2.434s
INFO: 0 processes.
FAILED: Build did NOT complete successfully (27 packages loaded, 341 targets configured)
```
A diff after running should show something like this:
```
git diff
diff --git a/MODULE.bazel b/MODULE.bazel
index 4d9ba08..682985b 100644
--- a/MODULE.bazel
+++ b/MODULE.bazel
@@ -15,8 +15,14 @@ use_repo(go_deps, "com_github_therealfakemoot_go_telnet")
oci = use_extension("@rules_oci//oci:extensions.bzl", "oci")
oci.pull(
name = "distroless_base",
+ digest = "sha256:73deaaf6a207c1a33850257ba74e0f196bc418636cada9943a03d7abea980d6d",
image = "gcr.io/distroless/base",
- tag = "latest",
+ platforms = [
+ "linux/amd64",
+ "linux/arm",
+ "linux/arm64",
+ "linux/ppc64le",
+ "linux/s390x",
+ ],
)
-
use_repo(oci, "distroless_base")
```
And then re-running `bazel build //cmd/echo:image` should complete successfully:
```
bazel build //cmd/echo:image
INFO: Analyzed target //cmd/echo:image (22 packages loaded, 9284 targets configured).
INFO: Found 1 target...
Target //cmd/echo:image up-to-date:
dist/bin/cmd/echo/image
INFO: Elapsed time: 5.799s, Critical Path: 0.85s
INFO: 17 processes: 12 internal, 2 linux-sandbox, 3 local.
INFO: Build completed successfully, 17 total actions
```
Pushing our image to a repository ends up being relatively simple after all the legwork. The diff below shows the full changes but in summary
- change the `load()` call for `rules_oci`. It's variadic and takes an arbitrary number of arguments indicating names to import. Add `oci_push` to the list.
- Use the imported `oci_push` rule to set tags and the destination registry
```diff
diff --git a/cmd/echo/BUILD.bazel b/cmd/echo/BUILD.bazel
index 4f52043..44d8a6c 100644
--- a/cmd/echo/BUILD.bazel
+++ b/cmd/echo/BUILD.bazel
@@ -20,7 +20,7 @@ pkg_tar(
srcs = [":echo"],
)
-load("@rules_oci//oci:defs.bzl", "oci_image")
+load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push")
oci_image(
name = "image",
@@ -28,3 +28,10 @@ oci_image(
entrypoint = ["/echo"],
tars = [":tar"],
)
+
+oci_push(
+ name = "registry",
+ image = ":image",
+ repository = "code.ndumas.com/ndumas/gomud",
+ remote_tags = ["latest"],
+)
```
Running `bazel run //cmd/echo:registry` will push your image, as long as you'd otherwise be able to use `docker push` or similar. You will need to inject authentication details into your build pipelines, etc.
```bash
$ bazel run //cmd/echo:registry
INFO: Analyzed target //cmd/echo:registry (0 packages loaded, 0 targets configured).
INFO: Found 1 target...
Target //cmd/echo:registry up-to-date:
dist/bin/cmd/echo/push_registry.sh
INFO: Elapsed time: 0.330s, Critical Path: 0.01s
INFO: 1 process: 1 internal.
INFO: Build completed successfully, 1 total action
INFO: Running command line: dist/bin/cmd/echo/push_registry.sh
2023/08/19 13:03:24 pushed blob: sha256:b02a7525f878e61fc1ef8a7405a2cc17f866e8de222c1c98fd6681aff6e509db
2023/08/19 13:03:24 pushed blob: sha256:f5a45b52c7f9934ccad7dce04c930af615270af739e172b7ff46c7b34689578c
2023/08/19 13:03:24 pushed blob: sha256:a7ca0d9ba68fdce7e15bc0952d3e898e970548ca24d57698725836c039086639
2023/08/19 13:03:24 pushed blob: sha256:fcb6f6d2c9986d9cd6a2ea3cc2936e5fc613e09f1af9042329011e43057f3265
2023/08/19 13:03:24 pushed blob: sha256:fe5ca62666f04366c8e7f605aa82997d71320183e99962fa76b3209fdfbb8b58
2023/08/19 13:03:24 pushed blob: sha256:e8c73c638ae9ec5ad70c49df7e484040d889cca6b4a9af056579c3d058ea93f0
2023/08/19 13:03:24 pushed blob: sha256:4aa0ea1413d37a58615488592a0b827ea4b2e48fa5a77cf707d0e35f025e613f
2023/08/19 13:03:24 pushed blob: sha256:1e3d9b7d145208fa8fa3ee1c9612d0adaac7255f1bbc9ddea7e461e0b317805c
2023/08/19 13:03:24 pushed blob: sha256:7c881f9ab25e0d86562a123b5fb56aebf8aa0ddd7d48ef602faf8d1e7cf43d8c
2023/08/19 13:03:24 pushed blob: sha256:5627a970d25e752d971a501ec7e35d0d6fdcd4a3ce9e958715a686853024794a
2023/08/19 13:03:24 pushed blob: sha256:08553ba93cfea7ad45b59911d8ed0a025489e7c3623920dfda331b9a49f1e8aa
2023/08/19 13:03:24 pushed blob: sha256:96266735468f361ae6828901a80fc15a7f75e26640351df9e0f0f9824f36cf92
2023/08/19 13:03:24 pushed blob: sha256:2758d0c31c8ca76c3379e7b1be20adc4144e9230873bb2c5bdb41f3691fa75bc
2023/08/19 13:03:24 pushed blob: sha256:fce64026d8c539f2a8cd7d81f173f94cffab1311a15d5578e451f66404b5a1eb
2023/08/19 13:03:24 code.ndumas.com/ndumas/gomud@sha256:eaf1ff753e1dca1a9dc20b635ff5276de5633824232d8bdd59555757c3ab024e: digest: sha256:eaf1ff753e1dca1a9dc20b
635ff5276de5633824232d8bdd59555757c3ab024e size: 2275
2023/08/19 13:03:25 code.ndumas.com/ndumas/gomud:latest: digest: sha256:eaf1ff753e1dca1a9dc20b635ff5276de5633824232d8bdd59555757c3ab024e size: 2275
$
```
And with that, you've got an OCI image pushed to your repository of choice. Note that bazel relies on the environment to provide an OCI toolchain and the authorization. I've got my drone credentials in environment variables, but your setup may vary.
## Success Story???
The next step forward is to take a step backwards: integrate bazel into a Makefile. `make` is actually pretty nice as a task-runner; now that bazel can handle the top-to-bottom process of builds, the makefile doesn't need much, if any, logic in it. All it'll have to do is serve as fancy aliases for bazel invocations.
I also haven't actually set up cross-compilation. Work for another day.
### Useful Tips
#### Supported go compilation targets
I haven't used this one yet, but it's handy for manually cross-compiling.
```
bazel query 'kind(platform, @rules_go//go/toolchain:all)'
```

@ -0,0 +1,9 @@
---
title: "Bf in Go"
date: "2020-01-27"
draft: true
toc: false
images:
tags:
- untagged
---

@ -0,0 +1,94 @@
---
draft: false
title: "Data Interfaces in Go"
aliases: ["Data Interfaces in Go"]
series: []
author: "Nick Dumas"
cover: ""
summary: "Playing with interfaces"
showFullContent: false
tags:
- genesis
- golang
- procedural-generation
date: "2019-02-06"
---
# interfaces
I'm a fan of Go's interfaces. They're really simple and don't require a lot of legwork.
```go
type Mover interface {
func Move(x, y int) (int, int)
}
type Dog struct {
Name string
}
func (d Dog) Move(x, y int) (int, int) {
return x, y
}
```
Dog is now a Mover! No need for keywords like `implements`. The compiler just checks at the various boundaries in your app, like struct definitions and function signatures.
```go
type Map struct {
Actors []Mover
}
func something(m Mover, x,y int) bool {
// do something
}
```
# Functionality vs Data
This is where things get tricky. Interfaces describe *functionality*. What if you want the compiler to enforce the existence of specific members of a struct? I encountered this problem in a project of mine recently and I'll use it as a case study for a few possible solutions.
## Concrete Types
If your only expectation is that the compiler enforce the existence of specific struct members, specifying a concrete type works nicely.
```golang
type Issue struct {
Key string
Title string
Created time.Time
Updated time.Time
Body string
Attrs map[string][]string
}
type IssueService interface {
Get() []Issue
}
```
There's a few benefits to this. Because Go will automatically zero out all members of a struct on initialization, one only has to fill in what you explicitly want or need to provide. For example, the `Issue` type may represent a Jira ticket, or a Gitlab ticket, or possibly something as simple as lines in a TODO.txt file in a project's root directory.
In the context of this project, the user provides their own functions which "process" these issues. By virtue of being responsible for both the production and consumption of issues, the user/caller doesn't have to worry about mysteriously unpopulated data causing issues.
Where this falls apart, though, is when you want to allow for flexible/arbitrary implementations of functionality while still enforcing presence of "data" members.
## Composition
I think the correct solution involves breaking your struct apart: you have the `Data` and the `Thinger` that needs the data. Instead of making `Issue` itself have methods, `Issue` can have a member that satisfies a given interface. This seems to offer the best of both worlds. You don't lose type safety, while allowing consumers of your API to plug-and-play their own concrete implementations of functionality while still respecting your requirements.
```go
type Issue struct {
Key string
Title string
Created time.Time
Updated time.Time
Body string
Checker IssueChecker
Attrs map[string][]string
}
type IssueChecker interface {
Check(Issue) bool
}
```

@ -0,0 +1,73 @@
---
draft: false
title: "How to find that one volume you're pretty sure you didn't lose"
date: "2024-06-25"
series: []
author: "Nick Dumas"
cover: ""
tags: ["bash", "docker"]
summary: "Docker volumes can be opaque, so I wrote a small bash script to help you troubleshoot."
showFullContent: false
---
## What I expect you to know
This article is only relevant if you know about and use Docker volumes and have some fluency in bash. I'll explain the code as I go, if it helps.
## The Problem
Over the lifetime of a Docker host machine, it's like that orphaned volumes ( and other detritus ) will accumulate over time. You might also find yourself fumbling a configuration and orphaning a volume yourself.
However we got here, we have a bunch of volumes and we need to know if any of them are important. In a perfect world, they'll have decent names. We don't live in a perfect world.
## Make a list
Luckily, we have tools at our disposal to handle this. My thought process almost always starts with "Can I turn a list of the things I care about into a newline separated list?" If I can do that, I can start automating my troubleshooting.
Let's start with `docker volume ls`. This is how we list volumes, but the default output isn't quite what I'm looking for:
```
docker volume ls
DRIVER VOLUME NAME
local d35fce052fbce42b94b2f9b2957be0f77090fa006b1a192030eff07db3675af2
local grafana-storage
local plausible_db-data
local plausible_event-data
```
This is human readable, and we could even do some slicing with `cut` or `awk`, but Docker gives us a flag that will take us exactly where we need to go: `--format`. Generally, Docker uses Go's `text/template` library to back this feature, and more specifically, individual flags (usually) [document](https://docs.docker.com/reference/cli/docker/volume/ls/#format) the template verbs available. Here, we want `Name`.
```
docker volume ls --format "{{.Name}}"
d35fce052fbce42b94b2f9b2957be0f77090fa006b1a192030eff07db3675af2
grafana-storage
plausible_db-data
plausible_event-data
```
And now we have a newline separated list of volume names.
## Process of elimination
The next part is fairly straightforward. We loop over this list and ask Docker to create a temporary container based on alpine, with a single volume mounted at `/test/`.
```
#! /bin/bash
# Newline separated list of volume names
volumes=$(docker volume ls --format="{{.Name}}")
for volume in $volumes; do
# Help the user keep track of which volume they're exploring
echo "Mounting $volume in a temporary image."
docker run --rm -v $volume:/test/ -it alpine /bin/sh
done
```
Running this script should do something like this:
```
./cycle-volumes.sh
Mounting d35fce052fbce42b94b2f9b2957be0f77090fa006b1a192030eff07db3675af2 in a temporary image.
/ # ls /test/
clickhouse-server.err.log clickhouse-server.log.1.gz clickhouse-server.log.4.gz clickhouse-server.log.7.gz
clickhouse-server.log clickhouse-server.log.2.gz clickhouse-server.log.5.gz clickhouse-server.log.8.gz
clickhouse-server.log.0.gz clickhouse-server.log.3.gz clickhouse-server.log.6.gz
/ #
Mounting grafana-storage in a temporary image.
/ # ls /test
alerting csv file-collections grafana.db plugins png
/ # exit
Mounting plausible_db-data in a temporary image.
/ # exit
Mounting plausible_event-data in a temporary image.
/ # exit
```
You can use bash to explore the volume and identify its contents, make note of which ones are which, and proceed accordingly.

@ -0,0 +1,198 @@
---
draft: false
title: "Copying HTML files by hand is for suckers"
date: "2023-02-02"
series: ["blogging-with-quartz"]
series_order: 1
author: "Nick Dumas"
authorTwitter: ""
cover: ""
tags: ["drone", "gitea", "obsidian", "devops"]
keywords: ["drone", "gitea", "obsidian", "devops"]
summary: "How I built a drone instance and pipeline to publish my blog"
showFullContent: false
---
### Attribution
Credit to Jim Sheldon in the Harness slack server who pointed me [here](https://blog.ruanbekker.com/blog/2021/03/09/cicd-with-droneci-and-gitea-using-docker-compose/) which provided much of the starting skeleton of the project.
## The Old way
I use [hugo](https://gohugo.io/) to build my blog, and I love it. Static sites are the way to go for most content, and keeping them in git provides strong confidence that I'll never lose my work. I really like working in Markdown, and hosting is cheap and easy. Unfortunately, my current setup is extremely manual; I run `hugo` myself and copy the files into `/var/www`.
For a long time, this has been a really uncomfortable process and is part of why I find myself so disinterested in writing with any frequency. When the new year rolled around, I decided it was time to do better.
I want every push to my blog repository to generate a new hugo build and publish my content somewhere. The tools I've chosen are [gitea](/posts/gitea-lfs-and-syncing-obsidian-vaults) for managed git services, [drone](https://www.drone.io/) for continuous integration/deployment, and hugo to build the site.
## Hello Drone
Standing up a working Drone instance involves a few moving pieces:
1) configure an `ouath2` application in your hosted git service with which to authenticate your Drone instance
2) You need the `drone` server itself, which hosts the web UI, database, responds to webhooks
3) The `drone-runner` is a separate entity that communicates with `drone` and actually executes pipelines. There's a few flavors of `drone-runner` and I've selected the [docker runner](https://docs.drone.io/runner/docker/overview/).
Step 1 is accomplished [manually](https://docs.drone.io/server/provider/gitea/), or with the gitea admin API. Using `docker-compose`, I was able to assemble the following configuration files to satisfy points 2 and 3.
### docker-compose
```yaml
version: '3.6'
services:
drone:
container_name: drone
image: drone/drone:${DRONE_VERSION:-1.6.4}
restart: unless-stopped
environment:
# https://docs.drone.io/server/provider/gitea/
- DRONE_DATABASE_DRIVER=sqlite3
- DRONE_DATABASE_DATASOURCE=/data/database.sqlite
- DRONE_GITEA_SERVER=https://code.ndumas.com
- DRONE_GIT_ALWAYS_AUTH=false
- DRONE_RPC_SECRET=${DRONE_RPC_SECRET}
- DRONE_SERVER_PROTO=https
- DRONE_SERVER_HOST=drone.ndumas.com
- DRONE_TLS_AUTOCERT=false
- DRONE_USER_CREATE=${DRONE_USER_CREATE}
- DRONE_GITEA_CLIENT_ID=${DRONE_GITEA_CLIENT_ID}
- DRONE_GITEA_CLIENT_SECRET=${DRONE_GITEA_CLIENT_SECRET}
ports:
- "3001:80"
- "3002:443"
networks:
- cicd_net
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./drone:/data:z
drone-runner:
container_name: drone-runner
image: drone/drone-runner-docker:${DRONE_RUNNER_VERSION:-1}
restart: unless-stopped
depends_on:
- drone
environment:
# https://docs.drone.io/runner/docker/installation/linux/
# https://docs.drone.io/server/metrics/
- DRONE_RPC_PROTO=https
- DRONE_RPC_HOST=drone.ndumas.com
- DRONE_RPC_SECRET=${DRONE_RPC_SECRET}
- DRONE_RUNNER_NAME="${HOSTNAME}-runner"
- DRONE_RUNNER_CAPACITY=2
- DRONE_RUNNER_NETWORKS=cicd_net
- DRONE_DEBUG=false
- DRONE_TRACE=false
ports:
- "3000:3000"
networks:
- cicd_net
volumes:
- /var/run/docker.sock:/var/run/docker.sock
networks:
cicd_net:
name: cicd_net
```
All of the `docker-compose` files were ripped straight from documentation so there's very little surprising going on. The most common pitfall seems to be setting `DRONE_PROTO_HOST` to a URL instead of a hostname.
For me, the biggest hurdle I had to vault was SELinux. Because this is a fresh Fedora install, SELinux hasn't been relaxed in any way.
When dealing with SELinux, your friends are `ausearch` and `audit2{why,allow}`. In my case, I needed to grant `system_u:system_r:container_t` on `/var/run/docker.sock` so `drone` and `drone-runner` can access the host Docker service.
That wasn't the end of my SELinux woes, though. Initially, my Drone instance was crashing with "cannot open database file" errors. To that end, observe `:z` on this following line. This tells docker to automatically apply SELinux labels necessary to make the directory mountable.
```yaml
- ./drone:/data:z
```
Why didn't this work for `docker.sock`? I really couldn't say, I did try it. With all the SELinux policies configured, I had a Drone instance that was able to see my Gitea repositories.
### caddy config
```
drone.ndumas.com {
encode gzip
reverse_proxy localhost:3001
}
```
The caddy configuration is a very simple reverse-proxy. Caddy has builtin LetsEncrypt support, so it's pretty nice to act as a last-hop for internet traffic. `sudo caddy start` will run caddy and detach, and with that Drone has been exposed to the internet under a friendly subdomain.
### startup script
```bash
#!/usr/bin/env bash
export HOSTNAME=$(hostname)
export DRONE_VERSION=2.16.0
export DRONE_RUNNER_VERSION=1.8.3
export DRONE_ADMIN_USER="admin"
export DRONE_RPC_SECRET="$(echo ${HOSTNAME} | openssl dgst -md5 -hex|cut -d' ' -f2)"
export DRONE_USER_CREATE="username:${DRONE_ADMIN_USER},machine:false,admin:true,token:${DRONE_RPC_SECRET}"
# These are set in ~/.bash_profile
# export DRONE_GITEA_CLIENT_ID=""
# export DRONE_GITEA_CLIENT_SECRET=""
docker-compose -f docker-compose/drone.yml up -d
caddy start --config caddy/drone --adapter caddyfile
```
The startup script, `drone.sh` injects some environment variables. Most of these are boring but `DRONE_RPC_SECRET` and `DRONE_USER_CREATE` are the two most important. This script is set up to make these deterministic; this will create an admin user whose access token is the `md5` of your host machine's hostname.
This really saved my bacon when I realized I didn't know how to access the admin user for my drone instance when I needed it. Diving into your Drone instance's database is technically on the table, but I wouldn't advise it.
## It's pipeline time
Once I had drone up and running, getting my blog publishing pipeline going was a relatively straightforward process: write a pipeline step, commit, push, check Drone for a green build. After a couple days of iterating, the complete result looks like this:
```yaml
kind: pipeline
name: default
steps:
- name: submodules
image: alpine/git
commands:
- git submodule update --init --recursive
- name: build
image: alpine:3
commands:
- apk add hugo
- hugo
- name: publish
image: drillster/drone-rsync
settings:
key:
from_secret: blog_sync_key
user: blog
delete: true
recursive: true
hosts: ["blog.ndumas.com"]
source: ./public/
target: /var/www/blog.ndumas.com
include: ["*"]
```
The steps are pretty simple
1) Clone the repository ( this is actually handled by Drone itself ) and populate submodules, a vehcile for my Hugo theme
2) Building the site with Hugo is as simple as running `hugo`. Over time, I'm going to add more flags to the invocation, things like `--build{Drafts,Future,Expired}=false`, `--minify`, and so on.
3) Deployment of the static files to the destination server. This did require pulling in a pre-made Drone plugin, but I did vet the source code to make sure it wasn't trying anything funny. This could be relatively easily reproduced on a raw Alpine image if desired.
## Green checkmarks
At this point, I've got a fully automated publishing pipeline. As soon as a commit gets pushed to my blog repository, Drone jumps into action and runs a fresh Hugo build. The process is far from perfect, though.
You might've noticed a lack of screenshots or other media in my posts. At the moment, I'm authoring my blog posts in [Obsidian](https://obsidian.md), my preferred note-taking application, because it gives me quick access to...well, my notes. The catch is that Obsidian and Hugo use different conventions for linking between documents and referencing attachments/images.
In the long term, what I want to do is probably write a script and pipeline which can
1) convert Obsidian-style links and frontmatter blocks to their Hugo equivalents, so I can more easily cross-link between posts while drafting
2) Find embedded media ( images, etc ) and pull them into the blog repository, commit and push to trigger the blog publish pipeline.
## Unsolved Mysteries
For some reason, `audit2allow` was emitting invalid output as the result of something in my audit log. I never traced it down. Whatever was causing this wasn't related to my `drone` setup since I got everything running without fixing it.
```
[root@drone x]# cat /var/log/audit/audit.log|audit2allow -a -M volumefix
compilation failed:
volumefix.te:24:ERROR 'syntax error' at token 'mlsconstrain' on line 24:
mlsconstrain sock_file { write setattr } ((h1 dom h2 -Fail-) or (t1 != mcs_constrained_type -Fail-) ); Constraint DENIED
# mlsconstrain sock_file { ioctl read getattr } ((h1 dom h2 -Fail-) or (t1 != mcs_constrained_type -Fail-) ); Constraint DENIED
/usr/bin/checkmodule: error(s) encountered while parsing configuration
```

@ -0,0 +1,40 @@
---
draft: false
title: "Filtering Hugo pages by Type"
aliases: ["Filtering Hugo pages by Type"]
series: ["blogging-with-quartz"]
series_order: 4
date: "2023-04-08"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: "More complex Hugo sites sometimes require creating markdown files you don't want displayed in specific listings."
showFullContent: false
tags:
- hugo
- quartz
- webdev
---
## What am I Doing?
As part of my effort to beautify my series features, I'm trying to set up a landing page where I can add arbitrary markdown content. You can see an example of one of these pages [here](/series/blogging-with-quartz).
Being able to embed some graphics and write short summaries of each series would be nice, so I've been experimenting with adding `_index.md` and `index.md` files at various places.
## Why doesn't it work?
The problem here is that the query in my `recent.html` partial was fetching pages were a little too vague and caught these
``` {title="recent.html"}
{{$notes := .Site.RegularPages.ByLastmod.Reverse}}
```
## What are the options?
Hugo has a bunch of different ways of grabbing groups of Pages. There's page bundles, taxonomies, and more.
## I picked X
Each page has a [Content Type](https://gohugo.io/content-management/types/) assigned to it. This ends up being the simplest option for filtering for now.
``` {title="recentNotes.html"}
{{$notes := where site.RegularPages "Type" "notes"}}
```
I think the actual smart thing I did was factor the "notes" query into a dedicated partial: `recentNotes.html`.
In a perfect world I'd parameterize this partial, but I'm not really sure that's even possible in Hugo. Partials explicitly accept up to one argument, no more.
Maybe this is what shortcodes are for.

@ -0,0 +1,71 @@
---
draft: false
title: "Ashes to Ashes, Buffs to Buffs"
aliases: ["Ashes to Ashes, Buffs to Buffs"]
series: ["aardwolf-adventures"]
series_order: 1
date: "2023-03-02"
author: "Nick Dumas"
cover: ""
tags: ["games", "aardwolf"]
keywords: ["games", "aardwolf"]
summary: "MUDding, reflection"
showFullContent: false
---
## Broad Strokes
This year I picked up [Aardwolf](https://www.aardwolf.com/), a free MUD ( Multi User Dungeon ), a now ancient form of gaming that takes place over Telnet.
The game is structured around a couple different gameplay loops, the most central of which is the `remort` and `tier` systems which allow you to reset your level and begin acquiring more skills at earlier levels respectively .
Equipment gets carried over between iterations of a character, so on a macro-scale the game is about collecting Quest Points to purchase gear and Wishes, special permanent character upgrades.
Gameplay on the micro-scale consists of traveling between Areas, which may or may not have some amount of public/secret goals/quests. Some goals are as simple as killing a number of target creatures, some can take days or weeks to complete as you gather components and complete tasks.
After a few weeks of play, I hit level 200, and then 201 as I Superhero'd. I've done a bunch of goals, a bunch of campaigns and quests, and I wanted to try to consolidate and share what I'm learning.
## Plugins
I learned really quickly that effective play ~~requires~~ is greatly aided by the use of plugins. Going into full usage examples is beyond the scope of this post, but are planned.
### mapper
Above and beyond the most important. The world of Aardwolf is massive, spanning multiple continents and hundreds of discrete zones with their own complex internal, often non-Euclidian geometries. For the most part, it runs in the background quietly tracking which rooms connect to each other and most critically, what creatures have been seen where.
### snd
The core micro-gameplay loop consists of campaigns and quests, tasks that ask you to kill a certain number of specific targets throughout the world. snd ( search and destroy ) leverages the mapper's room and mob data to assist you.
snd scans the output of `cp check` and similar to extract the rough location of each target. You can then use snd commands to leverage the mapper to auto-navigate to your destination area and then find your target.
### dinv
Inventory management is a huge deal for me, so learning about dinv was a godsend. dinv will build a database of items held or carried and use that database to do things like:
- automatically find and use a portal
- use the mapper to navigate to a vendor which sells the highest level healing potion you can use and purchase a specific quantity
- analyze your equipment and design an equipment set that optimizes your prioritized stats ( presets are available for all base classes )
### adb
the docs indicate that most of its utility is dedicated to helping deal with Enchanting. very cool but not something i'm specced into so I've put a pin in exploring this one
## My Journey
I rolled a male Psion Navigator because I've always loved the flavor of these characters, and Navigator was the only one that sounded like it had interesting abilities. Psion ended up being a really convenient starter class. Very quickly on, you start getting get spells that allow you to fly, breathe under water, ignore food and water, and more.
This probably saved my run; if I had to manage buying and eating food for very long I don't think I'd have kept at it because I hadn't learned about dinv yet. I try to avoid consumables when I'm allowed, so the hassle would've turned me away most likely.
Another huge thing was joining the [Boot](https://aardwolfboot.com/) clan. Immensely friendly people, and having a clan means having a morgue: your corpse returns to a safe location when you die saving you a LOT of trouble.
Guild knowledge went a long way to help me learn the fundamentals:
1) Portals are your friend
1) don't forget to `mapper cexit`
2) never forget your detects
3) lots of doors exist and are accessible, simply not visible on first look
4) learning the hunt trick (or using snd )
After graduating the Academy, I really just ground out those levels. Campaign level is tedious and slow, but it paid off. I scored around 8000 total Quest Points before remort. Extremely satisfied with that. I bought a Bag of Aardwolf, which is doing wonders for my carry capacity.
I didn't spend any time at Hero or Superhero this run, I did not feel good fighting those high level mobs, so I decided to take it around one more time and work on my Global Quests so I can get promoted. Already knocked one out today, so it's not far off.

@ -0,0 +1,23 @@
---
draft: false
aliases: ["First Post"]
series: []
author: "Nick Dumas"
cover: ""
summary: ""
showFullContent: false
title: "First Post"
date: "2018-02-10"
tags: [ "Code", "Site Updates"]
---
# Introduction
I've been programming with a passion for the last 15 years. I started with Python 2.2 or so and stuck with that for a good while, I also spent a few years doing web development work and most recently added Golang to my kit.
# This Site
This site is going to be a portfolio/showroom for my projects. I'll try to find clever ways to interact with my tools through the browser to take advantage of the optimizations and growing array of high power tools being baked into browsers like Chrome and Firefox.
A section of this site will also be used for my streaming content and tools. I've finished the first prototype of my [idle screen](http://idle.ndumas.com) which will continue to get polished.
## Tech
Shoutout to [jcmdln](https://github.com/jcmdln) for the CSS framework, `yttrium`. I'll try to do it justice in making a site that doesn't look a mess.

Binary file not shown.

@ -0,0 +1,270 @@
---
draft : false
title : "Gardening with Quartz"
aliases : ["blogging-with-quartz"]
date : "2023-03-04"
series: ["blogging-with-quartz"]
series_order: 2
author : "Nick Dumas"
authorTwitter : ""
cover : ""
tags : ["drone", "hugo", "devops", "obsidian", "quartz"]
keywords : ["drone", "hugo", "devops", "obsidian", "quartz"]
summary : "When you want a container built right, you have to do it yourself."
showFullContent : false
---
## Authoring blog posts in Obsidian
I'm using Gitea, Drone, and Hugo to watch for commits to my Obsidian vault, extract blog posts, and publish them to one of my servers. I run my stuff on Digital Ocean droplets, and I use Caddy for serving static sites.
## Why does it work?
it's cheap, fast, and simple. Self-hosting means I have more control over what gets published. This could all be accomplished with Github Actions, but I'd have to have separate vaults/repositories for public vs private content or I'd have to just make all my notes public.
## Why doesn't it work?
My original selection of pipeline images and commands was inefficient, incurring unnecessary network traffic and relying on third party package mirrors that suddenly started performing very badly.
Another important detail is media: the directory structure for my Obsidian vault and my site are very different.
I want to write blog posts with screenshots, media files, and more. Obsidian lets you drag and drop attachments, or link them manually with links in the form `![[path/to/attachment.png]]`
Finally, Hugo is a great static site generator, but there are better options when you're looking to publish content authored in Obsidian. In particular, the graph view is something that I'd love to bring into my blog. Luckily, [Quartz](https://github.com/jackyzha0/quartz) is built directly on top of Hugo and comes with a theme and some helper utilities
## What are the options?
### The Requirements
- [ ] attachment links must be transformed from `![[attachments/whatever.png]]` to `![[notes/post-name/whatever.]]`
- [ ] the site must be built with Quartz instead of Hugo
### Transforming links
The first choice is to whether I "fix" this during authoring, or during the publishing step. For the former, my options look something like this:
1) manually typing the final URL into the note
2) creating a complicated template system for generating Hugo shortcodes. in my head, this would use a prompter to let me select what attachment i want to insert, ask for resizing parameters, etc, and then generate a Hugo shortcode or an `<img>` tag.
None of these are satisfactory to me. I'd love to just drag and drop a piece of media into my note inside Obsidian and simply not have to think about it any further.
This leaves implementing something during the publishing pipeline. Now that I've got my [drone pipeline](notes/drone-and-hugo/) working, it's the perfect place to do transformations. This path presents a variety of possibilities falling on a spectrum somewhere between a bash script invoking sed and a custom ( Golang ) program that parses frontmatter, markdown, and applies pre-configured transformations.
### Quartz
The Quartz repo has a few built-in options for turning your notes into a website: a Dockerfile, a Makefile, and instructions on how to build everything from scratch. All of these are great, and I played with them all at different times to figure out which was a good fit.
## Pipelines: More than meets the eye
Unsurprisingly, I opted to extend my existing Drone pipeline with a transformer. This part of the pipeline has been in the back of my mind since the beginning, more or less, but it was much more important to get things stable first.
The pipeline I'm finally satisfied with looks like this, with checked boxes indicating what I had implemented at the start of this phase of the project.
- [x] Create a temporary shared directory, `/tmp/blog`
- [x] Clone the vault repository
- [x] do a `submodule` update and use `git-lfs` to pull down attachments
- [ ] clone my forked Quartz repository into `/tmp/blog`
- [x] Copy posts from `$VAULT/Resources/blog/post-name.md` to `/tmp/blog/content/notes/post-name/index.md`
- [ ] Scan all `index.md` files in `/tmp/blog/content/` for links that look like `![[attachments/whatever.png]]`, find `whatever.png` and copy it into the `/tmp/blog/content/notes/post-name/` directory for that `index.md`.
- [ ] Scan all `index.md` files in `/tmp/blog/content/` for links that look like `![[attachments/whatever.png]]` and edit them to `![[notes/post-name/whatever.png]]`
- [ ] Run the Quartz build command
- [x] Copy the static site to destination web server
## Hours and hours of debugging pipelines later
### Drone Volumes
The linchpin of this whole operation is having a temporary workspace that all these tools can operate on in sequence. To that end, I used Drone's [Temporary Volumes](https://docs.drone.io/pipeline/docker/syntax/volumes/temporary/) to mount `/tmp/blog` in all the relevant pipeline steps.
Creating a temporary volume looks like this. I really couldn't tell you what `temp:{}` is about, it certainly looks strange but I never had the spare cycles to investigate.
```yaml {title=".drone.yml"}
volumes:
- name: blog
temp: {}
```
Once you've created the volume, a pipeline step can mount it to a desired path. See below for an example of using your created volume.
### Quartz
Forking Quartz was easy, I'd done so late last year during another attempt to get this blog off the ground.
After a merge to get my fork up to date with upstream, I was able to slot this into the pipeline with the following.
```yaml {title=".drone.yml"}
- name: clone-quartz
image: alpine/git
volumes:
- name: blog
path: /tmp/blog
commands:
- git clone -b hugo https://github.com/therealfakemoot/quartz.git /tmp/blog
```
This sets the stage for building the site; this sets the stage for a step I implemented previously:
![[Resources/attachments/copy-posts-checkbox-screenshot.png]]
I opted to stop committing content to a blog repository and cloning the static site skeleton into the pipeline for a few reasons:
1) I already have reproducibility by virtue of building things with docker and having sources of truth in git.
2) It was an unnecessary layer of complexity
3) It was an unnecessary inversion of control flow
Configuring Quartz had its rocky moments. I've had to wrestle with frontmatter a lot, confusing TOML and YAML syntaxes can break your build or break certain features like the local graph.
### Gathering Media
This step ended up being pretty fun to work on. I took the opportunity to write this in Go because I knew I could make it fast and correct.
The process is simple:
1) Walk a target directory and find an `index.md` file
2) When you find an `index.md` file, scan it for links of the form `[[attachments/whatever.png]]`
3) Find `whatever.png` in the vault's attachments directory and copy it adjacent to its respective `index.md` file.
`walkFunc` is what handles step 1. You call `err := filepath.Walk(target, walkFunc(attachments))` and it will call your `walkFunc` for every filesystem object the OS returns.
This piece of code checks if we've found a blog post and then chucks it to `scanReader`.
```go {title="main.go"}
func walkFunc(matchChan matches) filepath.WalkFunc {
return func(path string, info fs.FileInfo, err error) error {
if err != nil {
return nil
}
if info.IsDir() {
return nil
}
f, err := os.Open(path)
if err != nil {
return err
}
if strings.HasSuffix(path, "index.md") {
scanReader(f, path, matchChan)
}
return nil
}
}
```
`scanReader` iterates line-by-line and uses a regular expression to grab the necessary details from matching links.
```go {title="main.go"}
type Attachment struct {
Filename string
Note string
}
type matches chan Attachment
func scanReader(r io.Reader, path string, matchChan matches) {
log.Printf("scanning markdown file: %s", path)
pat := regexp.MustCompile(`\[\[(Resources\/attachments\/.*?)\]\]`)
s := bufio.NewScanner(r)
for s.Scan() {
tok := s.Text()
matches := pat.FindAllStringSubmatch(tok, -1)
if len(matches) > 0 {
log.Printf("media found in %s: %#+v\n", path, matches)
for _, match := range matches {
dirs := strings.Split(path, "/")
noteFilename := dirs[len(dirs)-2]
log.Println("noteFilename:", noteFilename)
matchChan <- Attachment{Filename: match[1], Note: noteFilename}
}
}
}
}
```
Finally, `moveAttachment` receives a struct containing context ( the location of the `index.md` file and the name of the attachment to copy ) and performs a copy.
```go {title="main.go"}
func moveAttachment(att Attachment, dest string) error {
destPath := filepath.Jon(dest, strings.Split(att.Note, ".")[0])
log.Println("moving files into:", destPath)
_, err := copy(att.Filename, filepath.Join(destPath, filepath.Base(att.Filename)))
return err
}
func copy(src, dst string) (int64, error) {
sourceFileStat, err := os.Stat(src)
if err != nil {
return 0, err
}
if !sourceFileStat.Mode().IsRegular() {
return 0, fmt.Errorf("%s is not a regular file", src)
}
source, err := os.Open(src)
if err != nil {
return 0, err
}
defer source.Close()
destination, err := os.Create(dst)
if err != nil {
return 0, err
}
defer destination.Close()
nBytes, err := io.Copy(destination, source)
return nBytes, err
}
```
This ended up being the most straightforward part of the process by far. I packed this in a `Dockerfile` , using build stages to improve caching.
```docker {title="Dockerfile"}
FROM golang:latest as BUILD
WORKDIR /gather-media
COPY go.mod ./
# COPY go.sum ./
RUN go mod download
COPY *.go ./
RUN go build -o /bin/gather-media
```
Integration into the pipeline is here:
```yaml {title=".drone.yml"}
- name: gather-media
image: code.ndumas.com/ndumas/gather-media:latest
volumes:
- name: blog
path: /tmp/blog
commands:
- gather-media -target /tmp/blog/content/notes
```
Full code can be found [here](https://code.ndumas.com/ndumas/gather-media/src/branch/main/main.go).
### Transforming Links
Link transformation ended up being pretty trivial, but it took way way longer than any of the other steps because of an embarrassing typo in a `find` invocation. Another Docker image, another appearance of the blog volume.
The typo in my `find` was using `contents/` instead of `content/`. My code worked perfectly, but the pipeline wasn't finding any files to run it against.
```yaml {title=".drone.yml"}
- name: sanitize-links
image: code.ndumas.com/ndumas/sanitize-links:latest
volumes:
- name: blog
path: /tmp/blog
commands:
- find /tmp/blog/content/ -type f -name 'index.md' -exec sanitize-links {} \;
```
`sanitize-links` is a bog-standard `sed` invocation. My original implementation tried to loop inside the bash script, but I realized I could refactor this into effectively a `map()` call and simplify things a whole bunch.
The pipeline calls `find`, which produces a list of filenames. Each filename is individually fed as an argument to `sanitize-links`. Clean and simple.
```bash {title="sanitize-links"}
#! /bin/sh
echo "scanning $1 for attachments"
noteName=$(echo $1|awk -F'/' '{print $(NF-1)}')
sed -i "s#Resources/attachments#notes/$noteName#w /tmp/changes.txt" $1
cat /tmp/changes.txt
```
## Lots of Moving Pieces
If you're reading this post and seeing images embedded, everything is working. I'm pretty happy with how it all came out. Each piece is small and maintainable. Part of me worries that there's too many pieces, though. `gather-media` is written in Go, I could extend it to handle some or all of the other steps.
{{< figure
src="drone-builds-screenshot.png"
alt="Screenshot showing a series of green and red bars indicating a a set of mostly successful builds"
caption="This is mostly just a flex"
>}}
## For the future
Things I'd like to keep working on
- [ ] include shortcodes for images, code snippets, and the like
- [ ] customize the CSS a little bit
- [ ] customize the layout slightly
## Unsolved Mysteries
- What does `temp: {}` do? Why is it necessary?

@ -0,0 +1,57 @@
---
draft: false
title: "Handling flags in Genesis"
aliases: ["Handling flags in Genesis"]
series: ["genesis-development"]
series_order: 2
author: "Nick Dumas"
cover: ""
summary: "Using Cobra to accept a huge amount of inputs"
tags:
- genesis
- golang
- procedural-generation
date: "2018-04-08"
---
# Genesis
Genesis is a project Ive spent a great deal of time thinking about and working on for a while with little progress. Im recycling my old Github blog [post](/blog/genesis-roadmap/) because it still highlights the overall design plan. Ive since altered the project to use Golang instead of CPython. The change is inspired by a desire/need for improved performance, in my view Golang is the perfect tool to accomplish this goal and is the natural next step in my progression as a developer.
# Config files, CLI flags, and repeatability
With the decision to switch to Golang some necessary design choices had to be made. Due to the interactive and 'multi-phase' design of Genesis, it naturally lends itself to a single binary with an abundance of subcommands, such as `genesis render`, `genesis generate terrain` and so on.
After some research, an extremely appealing option for building the command-line interface came up: spf13's [cobra](https://github.com/spf13/cobra). This library is used by a lot of pretty big projects, including Hugo ( used to build the site you're reading right now ).
Due to the complex nature involved in each step of the world generation process, and considering one of the design goals is *repeatability*, I required a powerful yet flexible and reliable option for consuming and referencing configuration data. A user should be able to use interactive mode to iteratively discover parameters that produce results they desire and be able to save those values. Once again, spf13 comes to the rescue with [viper](https://github.com/spf13/viper). `viper` allows you to pull config values from quite a few different sources ranging from local files to environment variables to remote stores such as `etcd`.
The most complex requirement is a composition of the previous two; once a user has found a set of parameters that approximate what theyre looking for, they need to be able to interactively ( via command-line or other user interfaces yet to be designed and developed ) modify or override parameters to allow a fine-tuning of each phase of the generation process. Fortunately, the author of these libraries had the foresight to understand the need for these libraries.
## BindPFlags
This composition is then exposed via the `BindPFlags` [method](https://github.com/spf13/cobra#bind-flags-with-config). Given the correct arrangement of `cobra` flags, `viper` can now source 'config' values from the aforementioned sources _and_ command-line flags, with flags taking priority over all values except explicit `Set()` calls written directly into the Golang source code.
Thus, I had my solution. `viper` will read any configuration files that are present, and when prompted to present the value for a parameter (a pretend example would be something like `mountain-tallness`), it would check config files, environment variables, and then command-line flags providing the value given _last_ in the sequence of options.
Unfortunately, I was stymied by a number of different issues, not least of which was somewhat unspecified documentation in the README for `viper`. I opened a [Github issue](https://github.com/spf13/viper/issues/375) on this in August of 2017 and for a variety of personal reasons lost track of this issue and failed to check for updates. Fortunately, [Tyler Butters](https://github.com/tbutts) responded to it relatively quickly and even though I didn't come back to the issue until April of 2018, I responded to further questions on his [pull request](https://github.com/spf13/viper/pull/396) almost instantly.
I'm going to break down my misunderstandings and what might be considered shortcomings in the libraries and documentation before wrapping up with my solutions at the end of the post.
My first misunderstanding was not quite realizing that once `viper` has consumed flags from a given command, those values are then within the `viper` data store available to all commands and other components of the application. In short, `PersistentFlags` are not necessary once `viper` has been bound. This being true is a huge boon to the design of my parameters and commands; so long as my parameter names remain unique across the project, I can bind once in each commands `init()` and never have to touch any `cobra` value APIs using it for nothing more than dealing with posix flags etc etc. The rest of the problems I had require a little more elaboration.
### Naming Confusion
The next issue, I would argue, is a design...oversight in `viper`. `viper`s `BindPFlags` is troublingly named; in the context of `cobra`, `PFlags` can be misconstrued as `PersistentFlags` which are values that propagate downward from a given command to all its children. This could be useful for setting parameters such as an output directory, a desired file format for renders/output files and so on. `PersistentFlag` values would allow you to avoid repeating yourself when creating deeply nested command hierarchies.
What `BindPFlags` _actually_ means is "bind" to [PFlags](https://github.com/ogier/pflag), a juiced up, POSIX compliant replacement for the Golang standard library's `flag` toolkit. Realizing this took me quite a while. I cant be _too_ upset though because `BindPFlags` accepts a [*pflag.Flagset](https://godoc.org/github.com/ogier/pflag#FlagSet), so it might be assumed that this would be obvious. Either way, it really disrupted my understanding of the process and left me believing that `BindPFlags` was able and willing to look for `PersistentFlag` values.
In [this commit](https://github.com/therealfakemoot/genesis/blob/da7e9c39e8e443df7d2de23ab1172ce5b3a100ff/cmd/root.go#L49-L63) you can see where I set up my flags; originally these were `PersistentFlags` because I wanted these values to propagate downwards through subcommands. Thanks to the use of `viper` as the application's source-of-truth, `PersistentFlags` aren't strictly necessary.
### Order of Operations
The last issue is more firmly in the realm of 'my own fault'; `cobra` offers a range of initialization and pre/post command hooks that allow you to perform setup/teardown of resources and configurations during the lifecycle of a command being executed.
My failing here is rather specific. `cobra` by default recommends using the `init()` function of each command file to perform your [flag setup](https://github.com/therealfakemoot/genesis/blob/da7e9c39e8e443df7d2de23ab1172ce5b3a100ff/cmd/root.go#L49-L63). On line 62, you can see my invocation of `BindPFlags`. The code I inserted to test whether `viper` was successfully pulling these values was also included in the same `init()` method. After some discussion with Tyler B, I had to re-read every single line of code and eventually realize that when `init()` is called `cobra` hasn't actually parsed any command line values!
In addition to the change from `PersistentFlag` to `Flag` values, I moved my debug code _inside_ of the `cobra` [command hooks](https://github.com/therealfakemoot/genesis/blob/da7e9c39e8e443df7d2de23ab1172ce5b3a100ff/cmd/root.go#L21-L25) and found that configuration file values were being read correctly (as they always had been) *and* when an identically named command-line flag was passed, `viper` presented the overriding value correctly.
# Summary
This series of misunderstanding and error in logic roadblocked my work on `genesis` for far longer than I'm proud to admit; efficient, effective, and sane configuration/parameterization is a key non-neogitable feature of this project. Any attempts to move forward with hacked-in or 'magic number' style parameters would be brittle and have to be dismantled (presumably painfully) at some point in the future. Thanks to Tyler, I was able to break through my improper grasp of the tools I was using and reach a point where I can approach implementing the more 'tangible' portions of the project such as generating terrain maps, accomplishing everything from rendering them to even starting to reason out something like a graphical interface.

Binary file not shown.

@ -0,0 +1,54 @@
---
draft: false
title: "Genesis Roadmap"
aliases: ["Genesis Roadmap"]
series: ["genesis-development"]
series_order: 1
author: "Nick Dumas"
cover: ""
summary: "Planning out a creative asset creation toolkit"
showFullContent: false
tags:
- genesis
- procedural-generation
- python
date: "2013-06-03"
---
Recently, I was working on an idea for a MUD; the general idea was to build a game with detailed 'inhabited' areas such as cities and dungeons as well as expansive wilderness regions for exploring and claiming by players. Naturally, I realized that doing this by hand would be unbearably tedious. A semi-realistic MUD world could contain 4,000,000 rooms; manually creating "Meadowy Plains #1421" would be error-prone and would drain the creative ability of a human. Thus, [Genesis](https://github.com/therealfakemoot/genesis-retired) was conceived. Note: this repoistory is now retired; the Python version of this project will not be pursued any longer.
# Moving Beyond the MUD
Planning a world generator for a MUD was an excellent idea, but very limited in scope. In particular, I realized how little I wanted to indelibly restrict myself to [Evennia](http://evennia.com/) as the framework in which the creation process happened. Evennia offers a great deal of power and flexibility, but the restriction of MUD concepts got me thinking that I should generalise the project even further.
In the end, I want Genesis to be a completely standalone world-design toolkit. The target audience includes tabletop gamers who need a physical setting for their campaigns, as well as authors who can create characters and stories, but have trouble with the tedium of drawing coastlines and mountain ranges out by hand.
# The Vision
As of the time of writing, implementation of Phase 1 is only partially completed. What follows is my overall goals for what each phase can accomplish.
## Phase 1: Heightmap Generation
Using a [simplex](http://en.wikipedia.org/wiki/Simplex) function, I populate an array with values representing the height of the world's terrain at a given coordinate. This is pretty simple; it's a pure function and when run with PyPy it absolutely screams. There's little to be said about this step because it produces the least interesting output. If so desired, however, a user could take the topological map generated and do whatever they please without following any further phases.
## Phase 2: Water Placement
The water placement phase is the simplest phase, yet has the most potentially drastic consequences in further phases. Given a heightmap from Phase 1, the user will be able to select a sea-level and at that point all areas of the map with a height below sea level will be considered underwater. This step can be reapplied to smaller subsections of the map allowing for the creation of mountain lakes and other bodies of water which are not at sea-level.
## Phase 3: Biome Assignment
Biome assignment is a rather complex problem. Full weather simulations are way beyond what one needs for an interesting map. To this end, I've found what I believe to be two excellent candidates for biome classification systems.
{{< figure src="" alt="Original image seems to be lost. Sorry" caption="Two-axis Biome Chart">}}
This graph uses two axes to describe biomes: rainfall and temperature. This is an exceedingly easy metric to use. Proximity to water is a simple way to determine a region's average rainfall. Temperature is also an easy calculation, given a planet's axial tilt and the latitude and longitude of a location.
{{< figure src="SoilTexture_USDA.png" caption="Three-axis Biome Chart">}}
This graph is slightly more detailed, factoring in a location's elevation into the determination of its biome. As this phase is still unimplemented, options remain open.
## Phase 4: Feature Generation
In the Milestones and Issues, I use the term 'feature' as a kind of catch-all; this phase is the most complex because it involves procedural generation of landmarks, cities, dungeons, villages, huts, and other details that make a piece of land anything more than an uninteresting piece of dirt. This phase will have the most direct interaction by a user, primarily in the form of reviewing generated features and approving or rejecting them for inclusion in the world during Phase 5. In this Phase, the user will determine what types of features they desire (large above ground stone structures, small villages, underground dungeons, and so on).
## Phase 5: Feature Placement
Phase 5 takes the objects generated during Phase 4 and allows the user the option of manually placing features, allowing Genesis to determine on its own where to place them, or some combination of both. Although it wouldn't make much sense to have multiple identical cities in the same world, this phase will allow duplication of features allowing for easy placement of templates which can be customised at some future point.
# In Practice
The Genesis Github repository currently has a working demo of Phase 1. CPython is exceedingly slow at generating large ranges of simplex values and as such, the demo will crash or stall when given exceedingly large inputs. This is currently being worked on as per #8.

@ -0,0 +1,130 @@
---
draft: false
title: "Gitea, git-lfs, and syncing Obsidian Vaults"
aliases: ["Gitea, git-lfs, and syncing Obsidian Vaults"]
date: "2023-01-31"
author: "Nick Dumas"
cover: ""
tags: ["obsidian", "git", "gitea"]
keywords: ["obsidian", "git", "gitea"]
summary: "A brief overview of how I stood up a gitea instance for the purpose of backing up and syncing my Obsidian vault."
showFullContent: false
---
## What am I Doing?
I take notes on a broad spectrum of topics ranging from tabletop roleplaying games to recipes to the last wishes of my loved ones. Because of how valuable these notes are, I need to accomplish two things:
1) Back up my notes so that no single catastrophe can wipe them out
2) Make my notes accessible on multiple devices like my phone and various work laptops
For writing and organizing my notes, I use an application called [Obsidian](https://obsidian.md), an Electron Markdown reader and editor with an emphasis on plaintext, local-only files to represent your notes. This has a lot of interesting implications which are well beyond the scope of this post, but this is the one that's germane: your notes are a textbook use-case for version control.
Markdown files are plain-text, human-readable content that every modern Version Control System is supremely optimized for handling. In this arena, there's a lot of options ( mercurial, bzr, git, svn, fossil, and more ) but I'm partial to git.
## Life with git
```bash
nick@DESKTOP-D6H8V4O MINGW64 ~/Desktop/general-notes (main)
$ git log $(!!)
git log $(git rev-list --max-parents=0 HEAD)
commit 18de1f967d7d9c667ec42f0cb41ede868d6bdd31
Author: unknown <>
Date: Tue May 31 09:44:49 2022 -0400
adding gitignore
```
I've kept my vault under git for all but the first 2 months of my vault's lifetime and I cannot count the number of times it's saved me from a mistake or a bug.
A few times a day, I'll commit changes to my notes, plugins, or snippets and push them up. This is a manual process, but by reviewing all my changes as they're committed I kill a few birds with one stone:
1) I get a crude form of spaced repetition by forcing myself to review notes as they change
2) I verify that templates and other code/plugins are working correctly and if they aren't, I can revert to a known-good copy trivially
3) reorganizations become much easier ( see point 2, reverting to known-good copies )
For convenience, I chose to start off with Github as my provider. I set up a private repository because my notes contain sensitive information of various flavors and had no problems with it, except for attachments. This works great, Github is a fast reliable provider and meets all the requirements I laid out above.
## The catch
There is no free lunch. On Github, free repositories have restrictions:
1) github will warn you if you commit files larger than 50mb and ask you to consider removing them or using git-lfs
2) github will not permit any files larger than 100mb to be committed
3) You're allowed a limited number of private repositories, depending on the type and tier of your account.
My vault does not exclusively consist of plaintext files, though; there's PDFs, PNGs, PSDs, and more hanging out, taking up space and refusing to diff efficiently. I've got a lot of PDFs of TTRPG content, screenshots of important parts of software I care about for work or my personal life, and a lot of backup copies of configuration files.
In theory, this is sustainable. None of my attachments currently exceed 100mb, the median size is well under 1mb.
```bash
$ pwd
~/evac/obsidian-vaults/bonk/Resources/attachments
$ ls -lah|awk '{print $5}'|sort -hr|head -n5
62M
36M
8.4M
3.1M
2.9M
```
I'm not satisfied with theoretical sustainability, though. For something this important and sensitive, I'd like to have total confidence that my system will work as expected for the foreseeable future.
## What are the options?
1) Github has its own [lfs service](https://docs.github.com/en/repositories/working-with-files/managing-large-files/about-git-large-file-storage) with the free tier capped at 2gb of storage.
2) Pay for a higher tier of Github's LFS
3) Managed Gitlab (or similar) instance
4) Host my own
Options 1 and 2 are the lowest effort solution and rely the most on third parties. I've opted not to go with this because Github may change its private repository or git-lfs policies at any time.
Option 3 is better; a managed git hosting service splits the difference nicely. Using Gitlab would give me built-in CI/CD.
I've opted out of this mostly for price and partly because I know for a fact that I can implement option 4.
## Option 4
I chose to use what I'm already familiar with: [Gitea](https://gitea.io/en-us/). Gitea is a fork of Gogs, a hosted git service written in Go. It's lightweight and its simplest implementation runs off an sqlite database so I don't even need a PostgreSQL service running.
I've been using gogs and gitea for years and they've been extremely reliable and performant. It also integrates tightly with [Drone](https://www.drone.io/), a CI/CD system which will help me automate my blog, publish my notes, and more I haven't had the energy to plan.
## docker-compose and gitea
For my first implementation, I'm going to host gitea using docker-compose. This will give me a simple, reproducible setup that I can move between providers if necessary.
Hosting will be done on my DigitalOcean droplet running a comically old version of Fedora for now. This droplet is really old and up until now I've had very poor reproducibility on my setups. I'm working on fixing that with [caddy](/notes/automating-caddy-on-my-droplet), and using gitea for code management is next.
Below you'll see the `docker-compose.yaml` for my gitea instance. This is ripped directly from the gitea documentation so there's very little to comment on. The `ports` field is arbitrary and needs to be adjusted based on your hosting situation.
```yaml
version: "3"
networks:
gitea:
external: false
services:
server:
image: gitea/gitea:1.18.0
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- gitea
volumes:
- ./gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3069:3000"
- "222:22"
```
Starting it up is similarly uninteresting; using detached mode for "production" work because I'm not super interested in watching all the logs. If something breaks, I can start it back up again without detaching and see whatever error output is getting kicked up.
```bash
$ docker-compose up -d
Starting gitea ... done
$
```
Once this is done, you've got a gitea instance waiting to be configured with an admin user and a few other bootstrap settings. Navigate to the URL you chose for your gitea instance while following the docs and you're ready to create a repository for your vault.
The web UI will guide you from there.
## Success Story???
This solution is only a week or two old so it has not be put under a lot of load yet, but gitea has a good reputation and supports a lot of very high profile projects, and DigitalOcean has been an extremely reliable provider for years.
Migrating my attachments into git-lfs was trivial, but it did rewrite every commit which is something to be mindful of if you're collaborating between people or devices.
I don't intend to get more aggressive with adding large media attachments to my vault, I *prefer* plaintext when it's an option. Backing up my notes was only one item on a list of reasons I stood gitea up, in the coming weeks I'm going to work on using Drone to automate blog posts and use that as a springboard into more automation.

@ -0,0 +1,127 @@
---
draft: false
title: "Quantization in Go: Rehsaping floats"
aliases: ["Quantization in Go: Rehsaping floats"]
series: ["genesis-development"]
series_order: 3
author: "Nick Dumas"
cover: ""
summary: "Implementing float interpolation for fun and profit"
showFullContent: false
tags:
- genesis
- golang
- procedural-generation
date: "2018-04-22"
---
# The Goal
Before going too deep into the implementation details of Genesis, I'll touch on the high level aspect of quantization. Quantization is a technique used to map arbitrary inputs into a well defined output space. This is, practically speaking, a hash function. When the term 'quantization' is used, however, it's typically numeric in nature. Quantization is typically used in audio/image processing to compress inputs for storage or transmission.
# Quantizing OpenSimplex
My use case is a little less straightforward. The OpenSimplex implementation I'm using as a default noisemap generator produces values in the [interval](https://en.wikipedia.org/wiki/Interval_(mathematics)#Including_or_excluding_endpoints) [-1,1]. The 3d simplex function produces continuous values suitable for a noisemap, but the values are by nature infinitesimally small and diverge from their neighbors in similarly small quantities. Here's an example of a small sampling of 25 points:
```
[1.9052595476929043e-65 0.23584641815494023 -0.15725758120580122 -0.16181229773462788 -0.2109552918614408 -0.24547524871149487 0.4641016420951697 0.08090614886731387 -0.3720484238283594 -0.5035758520116665 -0.14958647968356706 -0.22653721682847863 0.4359742698469777 -0.6589156578369094 -1.1984697154842467e-66 0.2524271844660192 -0.3132366454912306 -0.38147748611610527 5.131908781688952e-66 0.3814774861161053 0.07543249830197025 0.513284589875744 -1.4965506447200717e-65 0.031883015701786095 0.392504694554317]
```
As you can see, there are some reasonably comprehensible values, like `-0.50357585`, `-0.222`, `0.075432`, but there's also values like `-1.1984697154842467e-66` and `1.9052595476929043e-65`. Mathematically, these values end up being continous and suitable for generating a noisemap but for a human being doing development work and examining raw data, it's almost impossible to have any intuitive grasp of the numbers I'm seeing. Furthermore, when I pass these values to a visualization tool or serialize them to a storage format, I want them to be meaningful and contextually "sane". The noisemap values describe the absolute height of terrain at a given (X,Y) coordinate pair. If we assume that terrain hight is measured in meters, a world whose total height ranges between -1 meter and 1 meter isn't very sensible. A good visualization tool can accomodate this data, but it's not good enough for my purposes.
To that end, I'm working on implementing a quantization function to scale the [-1,1] float values to arbitrary user defined output spaces. For example, a user might desire a world with very deep oceans, but relatively short mountain features. They should be able to request from the map generator a range of [-7500, 1000], and Quantize() should evenly distribute inputs between those desired outputs.
In this way, I'll kill two birds with one stone. The first bird has been fudging coefficients in the noise generation algorithm, and at the "edges" of the `Eval3` function to modify the "scale" of the output. This has been an extremely troublesome process because I do not have enough higher maths education to full grasp the entirety of the simplex noise function, and because there's so many coefficients and "magic numbers" involved in the process that mapping each of their effects on each other and the output simultaneously is a very daunting task. The second bird is a longer term goal of Genesis involving detailed customizability of terrain output.
By virtue of having an effective quantization function, a user will be able to customize terrain to their liking in a well-defined manner, uniformly scaling the map however they desire.
# The problem
Unfortunately, my hand-rolled implementation of Quantize is not yet fully functional. Open source/free documentation on quantization of floating point values to integer domains is very sparse. I was only able to find one StackOverflow post where someone posted their MATLAB implementation which was marginally useful but largely incomprehensible, as I currently do not know or own MATLAB.
With some trial and error, however, I was able to get very close to a working *and* correct implementation:
```
Output Domain: {Min:-5 Max:5 Step:1}
[1.9052595476929043e-65 0.23584641815494023 -0.15725758120580122 -0.16181229773462788 -0.2109552918614408 -0.24547524871149487 0.4641016420951697 0.08090614886731387 -0.3720484238283594 -0.5035758520116665 -0.14958647968356706 -0.22653721682847863 0.4359742698469777 -0.6589156578369094 -1.1984697154842467e-66 0.2524271844660192 -0.3132366454912306 -0.38147748611610527 5.131908781688952e-66 0.3814774861161053 0.07543249830197025 0.513284589875744 -1.4965506447200717e-65 0.031883015701786095 0.392504694554317]
[-1 1 -2 -2 -3 -3 3 0 -4 -6 -2 -3 3 -7 -1 1 -4 -4 -1 2 0 5 -1 0 2]
```
Unfortunately, my current implementation is outputting values outside the provided domain. it almost looks like the output is twice what it should be, but I know that doing things incorrectly and just dividing by 2 afterwards isn't sufficient. I've got relatively few leads at the moment, but I'm not giving up.
# The Code
```go {title="main.go"}
package main
import (
"fmt"
noise "github.com/therealfakemoot/genesis/noise"
// "math"
// "math/rand"
)
// Domain describes the integer space to which float values must be mapped.
type Domain struct {
Min float64
Max float64
Step float64
}
// func quantize(delta float64, i float64) float64 {
// return delta * math.Floor((i/delta)+.5)
// }
func quantize(steps float64, x float64) int {
if x >= 0.5 {
return int(x*steps + 0)
}
return int(x*(steps-1) - 1)
}
// Quantize normalizes a given set of arbitrary inputs into the provided output Domain.
func Quantize(d Domain, fs []float64) []int {
var ret []int
var steps []float64
for i := d.Min; i <= d.Max; i += d.Step {
steps = append(steps, i)
}
stepFloat := float64(len(steps))
// quantaSize := (d.Max - d.Min) / (math.Pow(2.0, stepFloat) - 1.0)
for _, f := range fs {
ret = append(ret, quantize(stepFloat, f))
}
fmt.Printf("Steps: %v\n", steps)
// fmt.Printf("Quanta size: %f\n", quantaSize)
return ret
}
func main() {
d := Domain{
Min: -5.0,
Max: 5.0,
Step: 1.0,
}
n := noise.NewWithSeed(8675309)
var fs []float64
for x := 0.0; x < 10.0; x++ {
for y := 0.0; y < 10.0; y++ {
fs = append(fs, n.Eval3(x, y, 0))
}
}
// for i := 0; i < 20; i++ {
// fs = append(fs, rand.Float64())
// }
v := Quantize(d, fs)
fmt.Printf("%v\n", fs)
fmt.Printf("%v\n", v)
}
```

@ -0,0 +1,160 @@
---
draft: false
title: "Putting Lipgloss on a Snake: Prettier Help Output for Cobra"
aliases: ["Putting Lipgloss on a Snake: Prettier Help Output for Cobra" ]
series: []
date: "2023-05-08"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: "Using lipgloss to abstract away the specifics of nice terminal output."
showFullContent: false
tags:
- golang
- cli
---
## What am I Doing?
[Cobra](https://github.com/spf13/cobra) is a library for building command line applications in Go. It's pretty fully featured with sub-commands, automatic completion generators for zsh and bash, and more.
I'm building a helper application to work with my blog publishing pipeline and to keep things modular I'm trying to break it down into smaller utilities.
## Why does it work?
Cobra is a well established library and feature-rich. It's got a great helper utility (`cobra-cli`) to set up a skeleton you can use to jump in.
One thing that Cobra automatically handles in particular is help output. Veering away from the tradition of `docopt`, you build a series of objects, set callbacks and configuration values, and Cobra works backwards from that to generate a nice help output listing flags and all the usual stuff.
Cobra lets you override a few members on the `Command` structs to further customize the outputs: `Short` and `Long`.
## Readable Line Length
What tripped me up as I prototyped the first few commands was the fact that Cobra will simply print whatever strings to provide to `Short` and `Long` verbatim.
I don't appreciate this because most of my displays are pretty wide. Paragraphs get smeared across the entire screen and become really challenging to read.
## What are the options?
The naïve solution is to just manually terminate my lines at 80 or 120 or some other count. This is "simple" and portable, but extremely tedious.
The other option, as always, is "delegate". I know that there's a few different toolkits out there for terminal interaction, but this time I knew what I wanted to use.
## Charming serpents...
[charm](https://charm.sh/) is a constellation of golang libraries for building terminal applications. My target was [lipgloss](https://github.com/charmbracelet/lipgloss) which handles the bulk of aligning, positioning, and styling terminal output for the rest of Charm's suite.
lipgloss is nice to work with, it has a pretty simple API that seems to mostly stay out of your way. Cobra, on the other hand, is an extremely opinionated library with lots of (mostly optional) wiring.
The lipgloss documentation has plenty of good examples so for brevity I'm going to jump right into where things got interesting.
## is easier than taming them
The parts of Cobra we care about here are related to the help and usage outputs. These are handled publicly by `Command.Set{Help,Usage}Func`, and privately by a handful of unexported functions that take `*cobra.Command` and shove that into a template.
### Setting our helpers
Telling Cobra to use our custom Usage and Help functions is pretty straightforward:
```go {title="cmd/root.go"}
func init() {
rootCmd.SetUsageFunc(demo.CharmUsage)
rootCmd.SetHelpFunc(demo.CharmHelp)
}
```
One helpful feature of Cobra is that child commands inherit properties from their parent, including Usage and Help funcs. This means you only have to set this up once on your root command, and your entire application will be able to leverage this.
## Help and Usage
Below, we have the definitions for each function. As you can see, I've managed to cleverly abstract away the hard work and real knowledge by yet another layer; each simply calls out to `tmpl()` and `pretty()`, both of which will be explained further.
Because `tmpl()` is unexported, I had to dig into the Cobra source and copy it out, but that's coming up. For now, it's enough to say that it takes a writer, a template string, and a `cobra.Command` and executes the template.
The only particularly clever part of this code is leveraging `UsageTemplate()` and `HelpTemplate()`. My original implementation copied those templates verbatim as well. If all you need to do is wrap the standard output, you can get the built-in template this way.
```go {title="gloss.go"}
package demo
import (
"bytes"
"fmt"
"io"
"github.com/charmbracelet/lipgloss"
"github.com/spf13/cobra"
)
func CharmUsage(c *cobra.Command) error {
var b bytes.Buffer
err := tmpl(&b, c.UsageTemplate(), c)
if err != nil {
c.PrintErrln(err)
return err
}
pretty(c.ErrOrStderr(), b.String())
return nil
}
func CharmHelp(c *cobra.Command, a []string) {
var b bytes.Buffer
// The help should be sent to stdout
// See https://github.com/spf13/cobra/issues/1002
err := tmpl(&b, c.HelpTemplate(), c)
if err != nil {
c.PrintErrln(err)
}
pretty(c.ErrOrStderr(), b.String())
}
```
### pretty()
Below we'll find the implementation of `pretty()`, a very straightforward function. Take a string and write out the `Render()`'d version.
```go {title="gloss.go"}
var BaseStyle = lipgloss.NewStyle().Bold(true).BorderStyle(lipgloss.RoundedBorder()).Width(60).PaddingLeft(1).PaddingRight(1).PaddingBottom(2)
func pretty(w io.Writer, s string) {
fmt.Fprintf(w, "%s\n", BaseStyle.Render(s))
}
```
### tmpl() and friends
cobra implements a set of template helper functions, and `tmpl(w io.Writer, text string, data interface{}) error` which simply executes a template against a writer.
```go {title="template.go"}
package demo
import (
"fmt"
"io"
"reflect"
"strconv"
"strings"
"text/template"
"unicode"
)
var templateFuncs = template.FuncMap{
"trim": strings.TrimSpace,
"trimRightSpace": trimRightSpace,
"trimTrailingWhitespaces": trimRightSpace,
"appendIfNotPresent": appendIfNotPresent,
"rpad": rpad,
"gt": Gt,
"eq": Eq,
}
// i'm eliding the bodies of these functions for brevity
func Gt(a interface{}, b interface{}) bool {}
func Eq(a interface{}, b interface{}) bool {}
func trimRightSpace(s string) string {}
func appendIfNotPresent(s, stringToAppend string) string {}
func rpad(s string, padding int) string {}
// tmpl executes the given template text on data, writing the result to w.
func tmpl(w io.Writer, text string, data interface{}) error {
t := template.New("top")
t.Funcs(templateFuncs)
template.Must(t.Parse(text))
return t.Execute(w, data)
}
```
## How does it look?
Not bad:
[![asciicast](https://asciinema.org/a/Hyk4epQZiPjjzLTO1MvmuMzaZ.svg)](https://asciinema.org/a/Hyk4epQZiPjjzLTO1MvmuMzaZ)
You can find the full code [here](https://github.com/therealfakemoot/lipgloss-cobra-demo).
## Success Story???
I'm not sure if this is even particularly useful yet. There's edge cases where adding a border causes things to break, and probably more. I'm pretty satisfied with learning more about how cobra is wired up.

@ -0,0 +1,52 @@
---
draft: false
title: "Making Noise: Simplex Noise and Quantization"
aliases: ["Making Noise: Simplex Noise and Quantization"]
series: ["genesis-development"]
series_order: 4
author: "Nick Dumas"
cover: ""
summary: "Pseudorandom noise and making it fit your domain."
showFullContent: false
date: "2019-02-28"
images:
tags:
- genesis
- golang
- procedural-generation
---
# The Conceit
I've written about Genesis [before](/posts/genesis-roadmap/), but it's got a lot of complexity attached to it, and the roadmap I originally laid out has shifted a bit. For this post I'm focusing solely on Phase 1, the generation of geography. This is obviously a fundamental starting point, and it has roadblocked my progress on Genesis for quite some time ( somewhere on the order of 8 years or so ).
My [original implementation](https://github.com/therealfakemoot/genesis_retired) was written in Python; this was...servicable, but not ideal. Specifically, visualizing the terrain I generated was impossible at the time. Matplotlib would literally lock up my entire OS if I tried to render a contour plot of my maps if they exceeded 500 units on a side. I had to power cycle my desktop computer many times during testing.
Eventually, I jumped from Python to Go, which was a pretty intuitive transition. I never abandoned Genesis, spiritually, but until this point I was never able to find technology that felt like it was adequate. Between Go's natural performance characteristics and the rise of tools like D3.js, I saw an opportunity to start clean.
It took a year and some change to make real progress
# Making Noise
Noise generation, in the context of "procedural" world or map generation, describes the process of creating random numbers or data in a way that produces useful and sensible representations of geography. To generate values that are sensible, though, there are some specific considerations to keep in mind. Your typical (P)RNG generates values with relatively little correlation to each other. This is fine for cryptography, rolling dice, and so on, but it's not so great for generating maps. When assigning a "height" value to point (X, Y), you may get 39; for (X+1, Y), you might get -21, and so on.
This is problematic because adjacent points on the map plane can vary wildly, leading to inconsistent or impossible geography: sharp peaks directly adjacent to impossible deep valleys with no transition or gradation between. This is where noise functions come in. Noise functions have the property of being "continuous" which means that when given inputs that are close to each other, the outputs change smoothly. A noise function, given (X, Y) as inputs might produce 89; when given (X+1, Y) it might produce 91. (X, Y+1) could yield 87. All these values are close together, and as the inputs vary, the outputs vary *smoothly*.
There seem to be two major candidates for noise generation in amateur projects: [Perlin noise](https://en.wikipedia.org/wiki/Perlin_noise) and [Simplex noise](https://en.wikipedia.org/wiki/Simplex_noise). Perlin noise was popular for a long while, but eventually deemed slow and prone to generating artifacts in the output that disrupted the "natural" feel of its content. Simplex noise is derived from the idea of extruding triangles into higher and higher dimensional space, but beyond that I haven't got a single clue how it works under the hood. I do know that it accepts integers ( in my use case, coordinates on the X,Y plane ) and spits out a floating point value in the range of `[-1,1]`.
# Quantization
This is something I've written about [before](/golang-quantize/), but shockingly, I was entirely wrong about the approach to a solution. At best, I overcomplicated it. Quantization is, using technical terms, transforming inputs in one interval to outputs in another. Specifically, my noise generation algorithm returns floating point values in the range `[-1, 1]`. Conceptually, this is fine; the values produced for adjacent points in the x,y plane are reasonably similar.
Practically speaking, it's pretty bad. When troubleshooting the noise generation and map rendering, trying to compare `1.253e-64` and `1.254e-64` is problematic; these values aren't super meaningful to a human. When expressed in long-form notation, it's almost impossible to properly track the values in your head. Furthermore, the rendering tools I experimented with would have a lot of trouble dealing with infinitesimally small floating point values, from a configuration perspective if not a mathematical one.
In order to make this noise data comprehensible to humans, you can quantize it using, roughly speaking, three sets of parameters:
1) The value being quantized
2) The maximum and minimum input values
3) The maximum and minimum output values
Given these parameters, the function is `(v - (input.Min) ) * ( output.Max - output.Min ) / ( input.Max - input.Min ) + output.Min`. I won't go into explaining the math, because I didn't create it and probably don't understand it fully. But the important thing is that it works; it's pure math with no conditionals, no processing. As long as you provide these five parameters, it will work for all positive and negative inputs.
Now, with the ability to scale my simplex noise into ranges that are useful for humans to look at, I was ready to start generating visualizations of the "maps" produced by this noise function. At long last, I was going to see the worlds I had been creating.
# Until Next Time
This is where I'll close off this post and continue with the [solutions](/posts/unfolding-the-map/).

Binary file not shown.

Binary file not shown.

@ -0,0 +1,165 @@
---
draft: false
title: "Mapping Aardwolf with Graphviz and Golang"
aliases: ["Mapping Aardwolf with Graphviz"]
series: ["aardwolf-adventures"]
series_order: 2
date: "2023-04-06"
author: "Nick Dumas"
cover: ""
keywords: [""]
summary: "Maxing out your CPU for fun and profit with dense graphs, or how I'm attempting to follow through on my plan to work on projects with more visual outputs"
showFullContent: false
tags:
- graphviz
- graph
- aardwolf
- golang
---
## Textual Cartography
Aardwolf has a fairly active developer community, people who write and maintain plugins and try to map the game world and its contents.
I saw one user, Danj, talking about their work on mapping software and my interest was piqued.
The MUSHclient [bundle](https://github.com/fiendish/aardwolfclientpackage/wiki/) provided by Fiendish has a mapper that tracks your movement through ==rooms== and ==exits==. This data is leveraged by a bunch of plugins in a variety of ways, none of which are super relevant to this post.
In practice, I know that I can't possibly compete with existing solutions like the [Gaardian Roominator](http://rooms.gaardian.com/index.php) and the beta SVG version that I don't have a link to at the moment. That doesn't stop me from wanting to gets my hands on the data and see if I can do anything cool with it.
## The Data
The mapper's map data is stored in an sqlite database, and the schema is pretty straightforward. There's a few tables we care about: [[#Areas]], [[#Rooms]], and [[#Exits]].
These tables look like they have a lot of columns, but most of them end up being irrelevant in the context of trying to create a graph representing the rooms and exits connecting them.
The `exits` table is just a join table on `rooms`, so in theory it should be pretty trivial to assemble a list of vertices ( rooms ) and edges ( exits ) and pump them into graphviz, right?
### Areas
```sql
sqlite> .schema areas
CREATE TABLE areas(
uid TEXT NOT NULL,
name TEXT,
texture TEXT,
color TEXT,
flags TEXT NOT NULL DEFAULT '',
`id` integer,
`created_at` datetime,
`updated_at` datetime,
`deleted_at` datetime,
PRIMARY KEY(uid));
CREATE INDEX `idx_areas_deleted_at` ON `areas`(`deleted_at`);
```
### Rooms
```sql
sqlite> .schema rooms
CREATE TABLE rooms(
uid TEXT NOT NULL,
name TEXT,
area TEXT,
building TEXT,
terrain TEXT,
info TEXT,
notes TEXT,
x INTEGER,
y INTEGER,
z INTEGER,
norecall INTEGER,
noportal INTEGER,
ignore_exits_mismatch INTEGER NOT NULL DEFAULT 0,
`id` integer,
`created_at` datetime,
`updated_at` datetime,
`deleted_at` datetime,
`flags` text,
PRIMARY KEY(uid));
CREATE INDEX rooms_area_index ON rooms (area);
CREATE INDEX `idx_rooms_deleted_at` ON `rooms`(`deleted_at`);
```
It wasn't until writing this and cleaning up that `CREATE TABLE` statement to be readable did I notice that rooms have integer IDs. That may be useful for solving the problems I'll describe shortly.
### Exits
```sql
sqlite> .schema exits
CREATE TABLE exits(
dir TEXT NOT NULL,
fromuid TEXT NOT NULL,
touid TEXT NOT NULL,
level STRING NOT NULL DEFAULT '0',
PRIMARY KEY(fromuid, dir));
CREATE INDEX exits_touid_index ON exits (touid);
```
## Almost Right
Getting the edges and vertices into graphviz ended up being pretty trivial. The part that took me the longest was learning how to do database stuff in Go. So far I'd managed to interact with flat files and HTTP requests for getting my data, but I knew that wouldn't last forever.
### A relational tangent
In brief, the Go database workflow has some steps in common:
1) import `database/sql`
2) import your database driver
3) open the database or establish a connection to the server
4) Make a query
5) Scan() into a value
6) use the value
There's some variance with points 5 and 6 on whether you want exactly one or some other number of results ( `Query` vs `QueryRow`) .
To demonstrate, here's a pared down sample of what I'm using in my `aardmapper`.
```go {title="main.go"}
import (
"database/sql"
_ "github.com/mattn/go-sqlite3"
)
type Area struct {
Uid, Name, Flags, Color, Texture sql.NullString
}
func main() {
db, _ := sql.Open("sqlite3", fn)
// error handling is elided for brevity. do not ignore errors.
}
a = Area{}
if err := row.Scan(&a.Uid, &a.Name, &a.Flags, &a.Color, &a.Texture); err != nil {
if err == sql.ErrNoRows {
fmt.Fatalf("no area found: %w", err)
}
}
// do stuff with your queried Area
```
## The graph must grow
Once I was able to query rooms and exits from the database, I was on the fast track. The graphviz API is relatively straightforward when you're using Go:
```go {title="mapper.go"}
gv := graphviz.New()
g := gv.Graph()
for _, room := range rooms { // creation of rooms elided
origin, _ := g.CreateNode("RoomID_AAAAA")
dest, _ := g.CreateNode("RoomID_AAAAB")
edge, _ := g.CreateEdge("connecting AAAAA to AAAAB", origin, dest)
}
// Once again, error handling has been elided for brevity. **Do not ignore errors**.
```
This ended up working great. The rooms and exits matched up to vertices and edges the way I expected.
The only problem was that rendering the entire thing on my DigitalOcean droplet will apparently take more than 24 hours. I had to terminate the process at around 16 hours because I got impatient.
## The lay of the land
This first, naive implementation mostly does the trick. It works really well for smaller quantities of rooms. Below you can see a PNG and SVG rendering of 250 rooms, and the code used to generate it.
```go
if err := gv.RenderFilename(g, graphviz.SVG, "/var/www/renders.ndumas.com/aardmaps/name.ext"); err != nil {
log.Fatal(err)
}
```
{{< figure src="250-rooms.svg" title="250 Rooms (SVG)" alt="a disorderly grid of squares representing rooms connected to each other in a video game" caption="SVG scales a lot better" >}}
{{< figure src="250-rooms.png" title="250 Rooms (PNG)" alt="a disorderly grid of squares representing rooms connected to each other in a video game" caption="Raster images can be simpler and more performant to render" >}}
## What's next?
The current iteration of rendering is really crude:
- The rooms are displayed using their numeric IDs, not human friendly names.
- Rooms are grouped by area, creating subgraphs to describe them will help interpreting the map and probably help rendering.
- The current iteration is very slow
I've also been contemplating the idea of rendering each area one at a time, and then manipulating the resulting SVG to display connections that cross between areas. This would almost certainly be infinitely faster than trying to render 30,00 vertices and 80,000 edges simultaneously.
All my code can be found [here](https://code.ndumas.com/ndumas/aardmapper). It's still early in prototyping so I don't have any stable builds or tags yet.

@ -0,0 +1,28 @@
./automating-caddy-on-my-droplet.md
./making-noise.md
./first-post.md
./integrating-cobra-and-lipgloss.md
./notes-as-tools.md
./mapping-aardwolf-with-graphviz.md
./golang-quantize.md
./prom-primer.md
./beautiful-builds-with-bazel.md
./filtering-hugo-pages-by-type.md
./non-mechanical-ttrpg-fundamentals.md
./one-dimensional-automata-and-you.md
./path-of-market.md
./standing-up-gogs.md
./series-and-navigation.md
./gitea-lfs-and-syncing-obsidian-vaults.md
./genesis-flags.md
./the-joy-of-versioning.md
./validating-yaml-frontmatter-with-jsonschema.md
./genesis-roadmap.md
./bf-in-go.md
./first-aardwolf-remort.md
./pagerduty-synthetic-retrigger-loop.md
./stamping-builds-with-bazel.md
./drone-and-hugo.md
./data-interfaces.md
./selinux-and-nginx.md
./pragmatic-projections-primer.md

@ -0,0 +1,61 @@
---
draft: false
title: "Non-mechanical Tabletop Gaming Fundamentals"
aliases: ["Non-mechanical Tabletop Gaming Fundamentals"]
series: []
date: "2024-04-24"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: "A non-exhaustive list of tips and rules that elevate my tabletop gaming."
showFullContent: false
tags:
- dnd
---
## Introduction
### What I Assume you Know
Almost nothing. These tips are not about balancing encounters or homebrew classes. Running tabletop games is first and foremost a *social* experience and that's what I'm going to focus on.
### The Short List
- Stay hydrated.
- Embrace failure.
- Start small.
- Slow is smooth, smooth is fast.
- If you take proper notes, work is never wasted. Only deferred.
- If you haven't rehearsed your disaster recovery procedure, you do not have a disaster recovery procedure.
## Stay hydrated
Talking and thinking is thirsty work. Your body **needs** water to function. Do not deprive your body of what it needs to function.
Consider scheduled breaks for you and your players to:
- make sure they have water
- to stretch their legs
- to focus their eyes on something other than a screen or a their character sheet for a few minutes
> Making sure the body is cared for can help keep the mind sharp and spirits up.
## Embrace failure
Failure stings, but it's important to acknowledge the stakes of a tabletop game: effectively none. If you make an unlikeable NPC, it's unlikely that your marriage will fall apart or you'll lose your job. It's important to take these moments and use them to learn something.
> Seek feedback early and often. Receive it *gracefully*. Act on it *promptly*.
## Start small
The problem here boils down to a relatively universal phenomenon in creative/artisan fields: the gap between capacity and taste.
As adults, even teenagers, we've been around long enough to have developed a sense of what we think is "good" and what isn't. We know what music we like sounds like, we know what kind of books we like to read. And so we begin a project to create music or a book or a drawing and find ourselves tensioned between the fact that we know what we like and the realization that we have no idea how it's made. This dissonance can be a dealbreaker for a lot of people.
> Be kind to yourself. Remember that you are learning, and it's okay to do simple things before you do hard things.
## Slow is smooth, smooth is fast
Thoughtless haste does nobody any good. Sitting in the hotseat can be extremely stressful, being responsible for representing the motives and actions of an entire universe is a lot of pressure. Rushing through your responses will *only make this harder*.
You *can* tell your players "I need a few minutes to prepare a response" if they surprise you with a question or plan of action. Didn't expect them to wander into a random cave? That's fine, take five minutes to draw a simple floor plan and figure out what's inside. Decided to adopt an NPC? Take five minutes to figure out their motives and connections to the rest of the story if applicable.
> You and your players are on the same team. Don't be afraid to ask them for help or time.
## Notes
Note-taking is a deeply personal thing so I won't be too prescriptive here. I'm just going to list some questions I think you should ask while considering solutions:
- Is there another person who can tell me "you're not allowed to access these anymore"? Are my notes contingent on spending money?
- How hard is it for me to make backups of my notes? To move my notes from one place, tool, service to another?
> If you take proper notes, your work is never wasted, only deferred.
### Backups
Part of the "proper notes" package is proper backups. This article is not technical so I won't get into the weeds. You want your backups to be stored on a different device at different location from the source data.
> If you have not rehearsed your disaster recovery plan, you do not have a disaster recovery plan.

@ -0,0 +1,68 @@
---
draft: true
title: "Notes as tools: A mental model"
aliases: ["Notes as tools: A mental model"]
series: []
date: "2024-04-24"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: "Note-taking can present an overwhelming abundance of possibility. Explicitly modelling your notes as tools can grant clarity when creating and organizing your knowledge."
showFullContent: false
tags:
- obsidian
- note-taking
---
## Outline
- What I assume you know
- Why take notes
- Tools and you
- Tools with purpose
- Tools without
- Conclusion
## What I assume you know
Nothing in particular, this article isn't even specifically about Obsidian.
## Why take notes?
It's a question worth really thinking about. *Why* are you taking notes? Do you enjoy writing? Does it help you remember? Is it part of a grade? Your answer might be one or more or none of these. Regardless of the answer, what I'm here to argue is that your notes should not be an end in and of themselves.
## Tools and you
My central thesis is that **notes are tools**. Tools are objects or processes that people create or alter in order to make some part of their life easier or better. Some very obvious examples of tools are things like knives, guns, and cars. Tools can be more complex, though. Factories are tools, eyeglasses and smartphones are tools.
Tools are not constrained to physical space either. Mnemonic devices we use to memorize how to tie our shoes or our bank passwords are tools as well, existing completely in mental space and active cognitive processes. Beyond the scope of this post but worth mentioning, social constructs are also tools; etiquette, language, art and games are all tools built out of agreements between human beings.
Tools are all around you. It is worth putting some thought into precisely what those tools are useful for, to what degree they benefit you and at what cost. Knives are used for separating objects into smaller objects, which is great, but under the wrong circumstances that object might not be something you want made into smaller pieces so that's something you have to take into account while using, organizing, and storing your knife.
Not all tools are anywhere near as dangerous as knives, but I want to center the idea that understanding the knife's purpose, cutting, informs how you should store it: blade enclosed, handle accessible.
## Tools with purpose
So, your notes are tools. Now what?
Unfortunately, this hasn't armed us with a magic bullet. Physical analogies start to fall apart here; in physical space, hammering a nail has consistent characteristics. A pound of steel weighs the same in China as it does in Haiti, the coefficient of friction for pine planks doesn't change based on who's holding the hammer. Notes, or at least the ways we can transform, share, and apply them are not subject to the kind of restrictions that cars or screwdrivers might be.
In practice, what I try to focus on is **names**.
> A note with a clear purpose will be easy to name.
Think carefully about how you're planning on using this note. Where will you be sitting or standing? What time of day will it be? What are the first words that come to mind when you think about the problem this note solves?
Do you have at least one clear answer for these questions? If not, your note might be doing too much.
## Tools without
Most homes have a place where tools without a clear purpose live. It's usually a drawer in your kitchen
## Conclusion
## What is a tool?
You are, of course, familiar with tools like hammers and knives. You might have even heard of things like wheels, cars, and guns. These are all fairly self-evident examples of tools: **things that humans create or alter to make some part of their life easier or better**.
It's important to understand, however, that the category of "tools" is not restricted to things you can hold in your hand. A low-hanging example of this is math, a *mental* tool humans use to process and understand how quantities of things change in response to different stimuli.
## Tools and You
You work in concert with an endless ocean of tools every day and probably don't think about most of them. This is not a personal failure; a well-designed tool simply requires little to no effort to use. Eyeglasses, cars, phones, books, forks and spoons all augment our natural abilities and let us interact with our environment in ways the benefit us, and most of them are largely invisible in our day to day life.
This deep integration can leave people at a bit of a loss when starting from scratch in a context where tools aren't readily available or familiar. This is often the case when delving into a new skill, hobby, or profession.
## Naming Things
One of the most common problems you're likely to encounter while working on your notes is naming them. It is also, I'd argue, the hardest. The most interesting part about names, though, is that they can be an extremely potent "canary in the coal mine" when note-taking. A note that is hard to name is a note whose purpose you do not fully understand.

@ -0,0 +1,44 @@
---
draft: true
title: "1D Automata and You"
aliases: ["1D Automata and You"]
series: ["automata-and-art"]
date: "2024-04-11"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: ""
showFullContent: false
tags:
- golang
- automata
- procedural-generation
---
## What I assume you know
## What am I Doing?
## Why does it work?
## Why doesn't it work?
## What are the options?
## I picked X
## I implemented X
## Success Story???
## Raw notes
### Modular Cartesian Coordinates
Given $\text{width} = 10 \text{ and height}=1, \text{0-indexed}$:
|$X_\text{original}$|$Y_\text{original}$|$X_\text{wrapped}$|$Y_\text{wrapped}$
---|---|---|---|---
Top Left | 0 | 0 |0 | 0
Top | -1 | 1 | 9 | 0
Top Right | 1 | 1 |1 | 0
Left | -1 | 0 |9 | 0
Identity | 0 | 0 |0 | 0
Right | 1 | 0 | 1 | 0
Bottom Left | -1 | -1 | 9 | 0
Bottom | 0 | -1 | 0 | 0
Bottom Right | 1 | -1 | 1 | 0

@ -0,0 +1,51 @@
---
draft: false
title: "Never forget is_alert_recovery"
aliases: ["Never forget is_alert_recovery"]
series: []
date: "2023-03-05"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: "Making sure PagerDuty leaves you alone"
showFullContent: false
tags:
- pagerduty
- datadog
- devops
---
## Synthetics and You
In the context of monitoring, a synthetic test is one that simulates an actual user. This is a useful and important part of the visibility triad: logs, **metrics**, and traces. Synthetics let you take (approximate) measurements of what a real user might experience which can help maintain SLAs or act as health checks for your connection between origins an CDNs.
## Hands Off Metrics
The system we have is working great. The synthetics are provisioned into Datadog with a very clever system that pulls from a YAML file, sparing us from having to hard code every single monitor.
Alerts are handled via PagerDuty which is a pretty good enterprise paging solution.
Together, these monitor internal (micro)services as well as performing synthetic testing by loading a headless browser instance to navigate the site. This gives us great visibility into what's healthy and what's not after deployments or maintenance.
## This alert will retrigger every 10 minutes
Around 0230 Sunday morning, I got an alert. A synthetic targeting one of our key user-facing pages had triggered. First step is to open the incident in PagerDuty.
Nothing looked out of the ordinary, and when I followed the links the monitor showed the page returned a 300 error for about 3 minutes and then resumed with the 200s. I thought nothing of it, and went to sleep after a long and productive weekend.
I woke up to find out the incident had been paging all night. What happened?
I loaded the monitor and it had been green for hours. Not a great sign.
## is_alert_recovery
After a bit of investigation and an assist from a good friend, we traced down the root cause.
Your Datadog monitors have a field you can use to define a string that will be used as the message for pages. Confusingly, this string is the same place where you configure where the message is sent.
You'd think an enterprise application would let you send different messages to different destinations. Oh well.
The monitor message was the culprit here. It turns out that there's a very important variable: `is_alert_recovery`. If you don't use this, Datadog will not send PagerDuty the "stop triggering this incident" signal, even when the monitor resolves.
```
{{#is_alert_recovery}} Customer facing page failed to return an HTTP 200 response within 5 seconds. @pagerduty-Orgname-teamname @teams-Orgname-teamname
{{/is_alert_recovery}}
```
This was a real pain in the ass. The monitor was re-triggering **every ten minutes**. Luckily I have a good team to work with, and I was familiar with the monitors since I created them. The solution? Manually resolve the incident. Fixed. It didn't retrigger.
## A good night's sleep
I didn't read the documentation when creating my monitor, or check for best-practices. This one's fully on me. Hopefully I'll remember next time.

@ -0,0 +1,108 @@
---
draft: false
title: "Path of Market: Part 1"
aliases: ["Path of Market: Part 1"]
series: ["path-of-market"]
series_order: 1
author: "Nick Dumas"
cover: ""
summary: "Tracking market trends in Path of Exile with Prometheus"
showFullContent: false
date: "2019-07-08"
tags:
- golang
- prometheus
- path-of-exile
---
Path of Exile is an ARPG similar to Diablo: procedurally generated maps, kill monsters to get loot so you can kill monsters faster. It's pretty fun and offers a really flexible build system that allows for a lot of creativity in how you achieve your goals. Of particular interest is the API exposed by the development team.
# Stashes
Each character has a set of "stashes". These are storage boxes which can be flagged a public. Public boxes are exposed via the [api](https://www.pathofexile.com/developer/docs/api-resource-public-stash-tabs) endpoint. This API is interesting in how it handles paging; each request gives you an arbitrary number of stashes, and a GUID indicating the last stash provided by the API. Subsequent requests to the API can include the aforementioned GUID in the `id` url parameter to request the next batch of stashes. This is a sorta crude stream, in practice. Maybe one day they'll reimplement it as a websocket API or something fun like that.
# The Market
This API is what powers the market for the Path of Exile community. There's [quite](https://poe.watch/prices?league=Legion) [a](https://poe.trade/) few sites and tools leveraging this API, including the [official market site](https://www.pathofexile.com/trade/search/Legion). These market sites are very handy because they offer complex search functionality and various levels of "live" alerting when new items become available.
What I found fascinating, though, is the ability to monitor trends, more than finding individual items. As a former EVE player, I was used to relatively advanced market features like price histories, buy/sell orders, advanced graphing options etc and it's something I've missed everywhere I've gone since. After some investigation, I found that Prometheus and Grafana could offer a powerful base to build upon. Prometheus is a tool for storing time-based "metrics", and Grafana is a visualizer that can connect to Prometheus and other data sources and provide graphs, charts, tables, and all sorts of tools for seeing your data. Below is an example of a chart showing memory usage on a Kubernetes pod.
{{< figure src="" alt="Original image lost." caption="Grafana memory usage chart">}}
# First Steps
Obviously, the first step is talking to the official Path of Exile API and getting these stashes into a format that I can work with programmatically. The JSON payload was moderately complex, but with the help of some [tooling](https://mholt.github.io/json-to-go/) and unit testing I was able to build out some Go structs that contained all the metadata available.
A particularly fun challenge was this one, describing how "gem" items could be slotted into an item. This was a challenge because the API can return either a string *or* a boolean for a specific set of fields. This is, in my opinion, not as "well behaved" API but you don't always get the luxury of working with ones that are well behaved. This unmarshaling solution helps account for this inconsistency and populates the relevant fields accordingly.
```go
type SocketAttr struct {
Type string
Abyss bool
}
func (sa *SocketAttr) UnmarshalJSON(data []byte) error {
var val interface{}
err := json.Unmarshal(data, &val)
if err != nil {
return err
}
switch val.(type) {
case string:
sa.Type = val.(string)
case bool:
sa.Abyss = val.(bool)
}
return nil
}
type SocketColour struct {
Colour string
Abyss bool
}
func (sc *SocketColour) UnmarshalJSON(data []byte) error {
var val interface{}
err := json.Unmarshal(data, &val)
if err != nil {
return err
}
switch val.(type) {
case string:
sc.Colour = val.(string)
case bool:
sc.Abyss = val.(bool)
}
return nil
}
```
With that done, I had passing tests that parsed a variety of sample items I had manually extracted from the API. Next was turning these into a "stream" that I could process. Channels seemed like a natural fit for this task; the API did not guarantee any number of results at any time and only declares that you periodically request the last ID you were given by the API.
The full code is [here](https://github.com/therealfakemoot/pom/blob/master/poe/client.go), but I'll highlight the parts that are interesting, and not standard issue HTTP client fare.
```go
d := json.NewDecoder(resp.Body)
err = d.Decode(&e)
if err != nil {
sa.Err <- StreamError{
PageID: sa.NextID,
Err: err,
}
log.Printf("error decoding envelope: %s", err)
continue
}
log.Printf("next page ID: %s", e.NextChangeID)
sa.NextID = e.NextChangeID
for _, stash := range e.Stashes {
sa.Stashes <- stash
}
```
This snippet is where the magic happens. JSON gets decoded, errors are pushed into a channel for processing. Finally, stashes are pushed into a channel to be consumed outside inside the main loop. And here's where I'll leave off for now. There's quite a bit more code to cover, and I'm still refactoring pieces of it relatively frequently, so I don't want to write too much about things that I expect to change.

@ -0,0 +1,51 @@
---
draft: false
title: "Pragmatic Projections: A Primer"
aliases: ["Pragmatic Projections: A Primer"]
series: []
date: "2024-04-15"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: "Projections are abstract, but abstractions are a powerful tool for helping us model the world."
showFullContent: false
tags:
- note-taking
- philosophy
---
## Intro
I want to talk about a topic that fascinates me: projections. The [Wikipedia article](https://en.wikipedia.org/wiki/Projection_(mathematics)) is pretty dense and extremely math-focused which makes it a poor option for an introduction, particularly when there's some very practical metaphors we can work with. In this article, I'd like to define ( in a broad sense ) what a projection is by way of example, and then talk a bit about how projections can be used as a mental model for organizing information.
## What I Assume You Know
The biggest assumption is that you can see. Most of these examples rely on vision-based sensory experiences. Beyond that, no technical knowledge is needed.
## What is a projection?
The technical term for a projection is a "mapping"; it relates an input to an output in a specific, consistent way. This is very abstract, but I hope some examples will provide some illumination.
### Shadows
Perhaps the most common projection you'll meet is shadows. Let's say we have an infinite void, empty except for a light source infinitely far away. Now let's add a flat plane, just a nice featureless surface. Finally, put a sphere in between the light source and the plane.
Think about what happens with the shadow, as we change certain details about this scenario. Make sure to ask "What do I not see?" as well as "What do I see?"
Imagine that instead of a sphere, we placed a cylinder. Our two questions start getting a lot trickier now. What the shadow looks like depends on the orientation of the cylinder; if it's pointed directly at the light source, its shadow would be indistinguishable from the shadow of a sphere, but rotate it so its long axis is facing the light source and now you have a shadow that looks like a *rectangle*.
The shadow of this object is a projection. It "maps" part of a three-dimensional objects onto a two-dimensional surface. In the simplest case, the sphere maps onto the plane as a circle. No matter how you rotate it or move the light source, it always comes out as a circle. But not all mappings are quite so trivial.
The cylinder does the best job illustrating both the utility and confusing nature of projections here. Here, the shadow, the projection, becomes far less reliable at telling us what we might be working with. If we only ever got to see a photograph of the cylinder's shadow, it's very reasonable that we might draw extremely false conclusions about the nature of this object. This is simply the nature of data and communication: if you fail to provide sufficient context your work can and probably will be misinterpreted.
The utility of projections, however, cannot be understated. This thought experiment was deliberately contrived, working with a small number of simple objects. It may not not be immediately obvious why it's useful to create such an "incomplete copy" of a thing like this.
### Maps
Until, that is, you begin to think about how *complicated* the real world is. It's only very rarely that you'll find yourself floating in an infinite, empty void kept company only by Platonic geometric figures. In the real world, accomplishing a task usually only demands a small subset of the information available to you about something.
If you're trying to figure out the fastest way to get across town in time for happy hour at the buffet, you'd probably want a *subway map*. The subway map is a *projection* of a more complex object: the city and landscape it is embedded within. In this moment where you need to get across town, information about the sand/clay/loam ratio of a given area would not be helpful, nor would information about where sewage lines cross railroad tracks.
To this end, you can have dozens and dozens of different maps that all faithfully represent "the city" and look nothing alike, have no discernible shared qualities other than the name written on the piece of paper. Each one is correct, useful.
## Projections And You
### Information Management
With the city/map metaphor, I hope I have brought the point home. Projections are a tool for taking complex, "higher dimensional" objects and laying them out on a surface that we can work with. Projections aren't guaranteed to capture *every* piece of information about an object, and this is their strength; we already *have* the complex object in hand/brain. If we were able to work with it, it wouldn't be an object so complex we have to create tools to assist us.
### Projections and Notes
When I take notes on a subject, I *never* try to capture every imaginable detail about it. All of my notes are simply projections. They take some complex concept or object and project them onto a text file, capturing a "photo" of its "shadow", specifically one suited to the problem this note is solving. My notes about work do not mention the weather, when I take notes about philosophy I don't create notes defining concepts I already know, and so on.
Projections are also not limited to single notes. All of the notes in my `Logs/Health` are together a projection of my life: one that only reveals details about my health. My blog is another projection of my life, my knowledge. I haven't written about recipes or tabletop gaming (yet) but I'll get there.
And that brings us to the grand conclusion: your *vault* is a projection too. Layers and layers of projections, of "shadows". Remember to ask yourself: "What can I see?". "What can I not see?"

@ -0,0 +1,83 @@
---
draft: false
title: "Prometheus Primer: the first few pages of the documentation"
aliases: ["Prometheus Primer: the first few pages of the documentation"]
series: []
author: "Nick Dumas"
cover: ""
summary: "I'm trying to teach myself Prometheus, so I'm writing about it."
showFullContent: false
date: "2019-07-04"
tags:
- prometheus
- devops
- monitoring
---
# Querying Basics
Queries run against *metrics*, which are sets of timeseries data. They have millisecond granularity and are stored as floating point values.
# Using Queries
Queries reference individual metrics and perform some analysis on them. Most often you use the `rate` function to "bucket" a metric into time intervals. Once the metric in question has been bucketed into time intervals, you can do comparisons.
```
(rate(http_response_size_bytes[1m])) > 512
```
This query takes the size of http responses in bytes and buckets it into one minute intervals and drops any data points smaller than 512 bytes. Variations on this query could be used to analyse how bandwidth is being consumed across your instrumented processes; a spike or trending rise in high bandwidth requests could trigger an alert to prevent data overages breaking the bank.
```
sum without(instance, node_name, hostname, kubernetes_io_hostname) (rate(http_request_duration_microseconds[1m])) > 2000
```
This query looks at the metric `http_request_duration_microseconds`, buckets it into one minute intervals, and then drops all data points that are smaller than 2000 microseconds. Increases in response durations might indicate network congestion or other I/O contention.
## Labels
Prometheus lets you apply labels to your metrics. Some are specificed in the scrape configurations; these are usually things like the hostname of the machine, its datacenter or geographic region, etc. Instrumented applications can also specify labels when generating metrics; these are used to indicate things known at runtime like the specific HTTP route ( e.g. `/blog` or `/images/kittens` ) being measured.
Prometheus queries allow you to specify labels to match against which will let you control how your data is grouped together; you can query against geographic regions, specific hostnames, etc. It also supports regular expressions so you can match against patterns instead of literal strict matches.
```
(rate(http_response_size_bytes{kubernetes_io_hostname="node-y3ul"}[1m])) > 512
(rate(http_response_size_bytes{version=~"v1\.2\.*"}[1m])) > 512
```
An important consideration is that when querying, prometheus considers metrics with any difference in labels as distinct sets of data. Two HTTP servers running in the same datacenter can have different hostnames in their labels; this is useful when you want to monitor error rates per-container but can be detrimental when you want to examine the data for the datacenter as a whole.
To that end, prometheus gives you the ability to strip labels off the metrics in the context of a given query. This is useful for generating aggregate reports.
```
sum without(instance, node_name, hostname, kubernetes_io_hostname)(rate(go_goroutines[1m]))
```
# Alerts
All of this is fun to play with, but none of it is useful if you have to manually run the queries all the time. On its own, prometheus can generate "alerts" but these don't go anywhere on their own; they're set in the config file and look like this:
```
groups:
- name: example
rules:
- alert: HighErrorRate
expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5
for: 10m
labels:
severity: page
annotations:
summary: High request latency
- alert: TotalSystemFailure
expr: job:avg_over_time(up{job="appName"}[5m]) < .5
for: 5m
labels:
severity: page
annotations:
summary: Large scale application outage
```
Alerts can have labels and metadata applied much like regular data sources. On their own, however, they don't *do* anything. Fortunately, the prometheus team has released [AlertManager](https://github.com/prometheus/alertmanager) to work with these alerts. AlertManager receives these events and dispatches them to various services, ranging from email to slack channels to VictorOps or other paging services.
AlertManager lets you define teams and hierarchies that alerts can cascade through and create conditions during which some subsets of alerts are emporarily muted; if a higher priority event is breaking, more trivial alerts can be ignored for a short time if desired.

@ -0,0 +1,53 @@
---
draft: false
title: "Another fight with SELinux"
aliases: ["Fighting with SELinux yet again"]
series: []
author: "Nick Dumas"
cover: ""
summary: "Extending Quartz's single.html to link between posts."
showFullContent: false
date: "2018-04-13"
tags:
- selinux
- nginx
- fedora
---
# SELinux
DigitalOcean's Fedora droplets include SELinux. I don't know a great deal about SELinux but it's presumably a good thing for helping prevent privilege escalations and so on. Unfortunately, it can be troublesome when trying to do simple static site stuff with nginx.
## nginx
With Fedora and nginx and selinux all in use simultaneously, you are allowed to tell nginx to serve files that are owned/grouped under a user other than nginx's. This is phenomenally useful when working with something like hugo. This is possible because SELinux monitors/intercepts syscalls relating to file access and approves/denies them based on context, role, and type. SELinux concepts are covered pretty thoroughly [here](https://www.digitalocean.com/community/tutorials/an-introduction-to-selinux-on-centos-7-part-1-basic-concepts) and [here](https://www.digitalocean.com/community/tutorials/an-introduction-to-selinux-on-centos-7-part-2-files-and-processes).
By default, nginx runs under the SELinux `system_u` user, the `system_r` role, and the `httpd_t` type:
```
$ ps -efZ|grep 'nginx'
system_u:system_r:httpd_t:s0 root 30543 1 0 Apr09 ? 00:00:00 nginx: master process /usr/sbin/nginx
system_u:system_r:httpd_t:s0 nginx 30544 30543 0 Apr09 ? 00:00:02 nginx: worker process
system_u:system_r:httpd_t:s0 nginx 30545 30543 0 Apr09 ? 00:00:00 nginx: worker process
$
```
Roughly speaking, SELinux compares nginx's user, role, and type against the same values on any value it's trying to access. If the values conflict, SELinux denies access. In the context of "I've got a pile of files I want nginx to serve", this denial manifests as a 403 error. This has caused issues for me repeatedly. genesis generates terrain renders as directories containing html and json files, and during the development and debugging process I just copy these directly into the `/var/www` directory for my renders.ndumas.com subdomain. Before I discovered the long term fix described below, every one of these new pages was throwing a 404 because this directory and its files did not have the `httpd_sys_content_t` type set. This caused nginx to be denied permission to read them and a 403 error.
A correctly set directory looks like this:
```
$ ls -Z
unconfined_u:object_r:httpd_sys_content_t:s0 demo/ unconfined_u:object_r:user_home_t:s0 sampleTest2/ unconfined_u:object_r:httpd_sys_content_t:s0 test2/
unconfined_u:object_r:httpd_sys_content_t:s0 sampleTest1/ unconfined_u:object_r:httpd_sys_content_t:s0 test1/
$
```
# The solution
There are two solutions to serving static files in this way. You can set the `httpd_sys_content_t` type for a given directory and its contents, or you can alter SELinux's configuration regarding the access of user files.
## Short Term
The short term fix is rather simple: `chcon -R -t httpd_sys_content_t /var/www/`. This sets a type value on the directory and its contents that tells SELinux that the `httpd_t` process context can read those files.
## Long Term
Unfortunately, in the context of my use case, I had to run that `chcon` invocation every time I generated a new page. I hate manual labor, so I had to find a way to make this stick. Fortunately, [StackOverflow](https://stackoverflow.com/questions/22586166/why-does-nginx-return-a-403-even-though-all-permissions-are-set-properly#answer-26228135) had the answer.
You can tell SELinux "httpd processes are allowed to access files owned by other users" with the following command: `setsebool -P httpd_read_user_content 1`. This is pretty straightforward and I confirmed that any content I move into the /var/www directories can now be read by nginx.

@ -0,0 +1,139 @@
---
draft: false
title: "Adding Series and Navigation links to Hugo page"
aliases: ["Adding Series and Navigation links to Hugo page"]
series: ["blogging-with-quartz"]
series_order: 3
date: "2023-03-07"
author: "Nick Dumas"
cover: ""
summary: "Extending Quartz's single.html to link between posts."
showFullContent: false
keywords:
- quartz
- webdev
- hugo
tags:
- quartz
- webdev
- hugo
---
## What am I Doing?
As I write more posts, I realize I'm writing groups of posts that are narratively linked, to speak. The best example is this series, Blogging with Quartz. There are others, but I'm still working on a system for cross-linking between posts. More on that later.
I also realized that there's simply no way to view anything but the most recent N posts on the index page. I've forgotten what the current value of N is but that's neither here nor there.
Users can't navigate free. The closest they can get is walking the tag graph and hoping that gets them somewhere.
## Why does it work?
Quartz is great, looks awesome and uses Hugo which means its super configurable. The templates are powerful and very polarizing.
## Why doesn't it work?
Quart's layout seems to be build around organize discovery of notes through hand crafted landing pages. For the blog, I'm not interested in that. I want users to be able to page through my posts backwards and forwards chronologically like any actual blog site.
Quartz has tags but it lacks a way of saying "These posts aren't chronologically adjacent but they form a sequence". It looks like most tools call this a "series" of posts, so that's what I went with.
## Making it happen
### Chronological Adjacency
Hugo helps us here by providing [page variables](https://gohugo.io/variables/page/) specifically for this: `Next` and `NextInSection`. This partial ends up being really straightforward. It's also got a style, `pagination` that I'm going to leverage.
```html
{{partial "prev-next.html" .}}
```
Just a few `if` statements and calling `.Permalink` to get a URL. I chose to use the `*InSection` variants because I probably will be introducing more content types over time and I may as well fix this beforehand. Below is `layouts/partials/prev-next.html`
```html
{{ if or .NextInSection .PrevInSection }}
<nav class="pagination">
{{ if .PrevInSection }}
<ol>
<li>
<a href="{{ .PrevInSection.Permalink }}">
<span class="button__icon"></span>
<span class="button__text">{{ .PrevInSection.Title }}</span>
</a>
</li>
{{ end }}
{{ if .NextInSection }}
<li>
<a href="{{ .NextInSection.Permalink }}">
<span class="button__text">{{ .NextInSection.Title }}</span>
<span class="button__icon"></span>
</a>
</li>
</ol>
{{ end }}
</nav>
{{ end }}
```
There's also a small block of custom CSS, necessary to reset some padding and make the links flow horizontally with a bit of a margin to keep them legible. Shown is a portion of `asset/styles/custom.scss`.
```css
...
nav.pagination {
border: 1px solid black;
ol {
padding-inline-start: 0px;
}
li {
margin: .25em;
display: inline-block;
}
}
...
```
{{< figure src="prev-next-links-example.png" alt="Two links pointing to a post published before this one titled 'Ashes to Ashes, Buffs to Buffs', and one published later titled 'Never forget is_alert_recovery'" caption="Pretty snazzy, right?" >}}
### Planning for the Future
Tags are cool, but they don't tell a story. My ambition vastly outstrips my talent and so no project I undertake will ever fit in a single post.
To that end, I put this together. I definitely got this from somewhere, but I can't remember where now. Sorry. Another very simple invocation.
```html {title="layouts/_default/single.html"}
{{partial "series-header.html" . }}
```
This generates a short list linking to all the other posts in a given series.
```html {title="layouts/partials/series-header.html"}
{{- if .Params.series -}}
{{- with index (.GetTerms "series") 0 -}}
<div class="post-series notice--info">
<h3 id="series">This post is part of the <a href="{{ .Permalink }}" style="font-weight: bold">{{ .LinkTitle }}</a> series.</h3>
{{- end -}}
{{- $series := where .Site.RegularPages.ByDate ".Params.series" "intersect" .Params.series -}}
{{- with $series -}}
<ol>
{{- range . -}}
<li>
{{- if eq .File.UniqueID $.File.UniqueID -}}
<b>{{ .Title }}</b>
{{- else -}}
<a href="{{ .Permalink }}">{{ .Title }}</a>
{{- end -}}
</li>
{{- end -}}
</ol>
{{- end -}}
</div>
{{- end -}}
```
`notice--info` just pulls a color from the base definitions. It ads a little visual pop.
```css {title="asset/styles/custom.scss"}
...
.notice--info {
@include notice(var(--primary));
}
...
```
{{< figure src="series-insert-example.png" alt="Screenshot of text on a website describing an article and its membership in a series called 'blogging-with-qurtz' " caption="You can even click the name to take you to a list of the rest of the posts in the series">}}
## Webdev is tedious
I'd love to be able to test this locally. It only takes approximately thirty seconds from pushing a change to having a fresh build of the site, but that's still hella slow when you're trying to rapidly iterate on CSS and stuff.
I'm really happy with how this looks, though, and I'm excited to keep tweaking things. I've got 30 tabs open with Hugo templates and toolkits that I'm gonna rifle through and try to find inspiration in.

Binary file not shown.

Binary file not shown.

@ -0,0 +1,105 @@
---
draft: false
title: "Stamping Builds with Bazel"
aliases: ["Stamping Builds with Bazel"]
series: ["building-with-bazel"]
series_order: 3
date: "2024-05-15"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: "Versioning is a critical part of delivering software to users. With bazel, you can derive per-build values and inject them anywhere in your build process."
summary: "Versioning is a critical part of delivering software to users. With bazel, you can derive per-build values and inject them anywhere in your build process."
showFullContent: false
tags:
- bazel
- golang
---
## What am I Doing?
In my [last post](/2024/09/the-joy-of-versioning/) I spent some time talking about how more rigorous versioning helped reduce wasted time debugging and upgrading code as it started getting broken into lots of little pieces.
That post was pretty light on direct `bazel` usage but I promise, it'll pay off. Here, we're going to cover how to use these tags *in* your builds to tag docker images or inject build information into compiled binaries.
I'm assuming that you've read the [first bazel post](/2023/08/beautiful-builds-with-bazel/) in this series, or that you've already got your bazel + bzlmod setup going.
## Stamping and you
Bazel includes functionality that it calls "stamping". Bazel has to separate this into its own conceptual space because one of the core design principles is build reproducibility: for bazel's caching to work, inputs have to be deterministic and ideally change infrequently between runs.
Bazel's [documentation](https://bazel.build/docs/user-manual#workspace-status) covers the bare essentials, with a small caveat. Stamping requires a script , the "workplace status" script, that emits space-separated key-value pairs, e.g. `STABLE_KEY_NAME VALUE`. An example script is included below.
```bash {title="tools/workspace_status.sh"}
#! /usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
echo "STABLE_STAMP_VERSION $(git describe --tags --dirty=-dev)"
echo "STABLE_STAMP_COMMIT $(git rev-parse HEAD)"
echo "STABLE_STAMP_BRANCH $(git rev-parse --abbrev-ref HEAD)"
```
One important detail that the documentation doesn't cover is that your workspace status script **cannot** live in the root of your bazel project. You have two options:
- Place the status script somewhere in your `$PATH`
- Place the status script in a subdirectory
Still not sure why, but if you simply do `bazel --workplace_status_command=status.sh`, `bazel` will *only* look for it in your `$PATH`.
## Build Injection
Using the variables created by your workspace status script ends up being incredibly simple, if you're using `rules_go`. the `x_defs` parameter lets you override values at compile-time, exactly for cases like this.
```
x_defs = {
       "Version": "{STABLE_STAMP_VERSION}",
       "Build": "{STABLE_STAMP_COMMIT}",
   },
```
This is equivalent to the Go build flag `-ldflags "-X PACKAGENAME.Version=whatever -X PACKAGENAME.BUILD=whatever"`. It's important to note that the raw Go flags require a fully qualified package name be specified. Bazel is smart enough to derive the necessary package name on its own, all you have to do is tell it which variable needs to be overriden with what value.
## Putting it all together
The final, full invocation for stamping your builds should look something like this.
```
wikilink-obsidian-resolver on  main [⇡] via 🐹 v1.22.2
bazel run --stamp --workspace_status_command=tools/workspace_status.sh //cmd/version
INFO: Analyzed target //cmd/version:version (0 packages loaded, 0 targets configured).
INFO: Found 1 target...
Target //cmd/version:version up-to-date:
dist/bin/cmd/version/version_/version
INFO: Elapsed time: 1.262s, Critical Path: 1.10s
INFO: 2 processes: 1 internal, 1 darwin-sandbox.
INFO: Build completed successfully, 2 total actions
INFO: Running command line: dist/bin/cmd/version/version_/version
Version: v0.1.4-1-g5792d62
Build: 5792d623fc9fc1852aeb09dd008eabb640cb6711
```
Bazel runs your binary, injects variables generated by your workspace_status script, and it all finally comes together.
The stamping also works for builds:
```
wikilink-obsidian-resolver on  main [⇡] via 🐹 v1.22.2
bazel build --stamp --workspace_status_command=tools/workspace_status.sh //cmd/version
INFO: Analyzed target //cmd/version:version (0 packages loaded, 0 targets configured).
INFO: Found 1 target...
Target //cmd/version:version up-to-date:
dist/bin/cmd/version/version_/version
INFO: Elapsed time: 0.246s, Critical Path: 0.06s
INFO: 1 process: 1 internal.
INFO: Build completed successfully, 1 total action
wikilink-obsidian-resolver on  main [⇡] via 🐹 v1.22.2
./dist/bin/cmd/version/version_/version
Version: v0.1.4-1-g5792d62
Build: 5792d623fc9fc1852aeb09dd008eabb640cb6711
```
## What next?
Stamping is not going to accomplish any of my goals on its own, but it was an important part of preparing my toolkit. If I'm going to release software for other people to consume, I need versioning. Injecting a git tag into a Go binary is a trivial proof of concept; I'll be using this to automatically tag and push OCI images and probably other stuff I haven't come up with yet.
## Notes, Warnings, Caveats
- Important: the `workspace-status-script` *cannot* live in the root of the bazel project. It has to be in a subdirectory for some reason.
- If you're defining `x_defs` on a `rules_go` `go_library`, you cannot fully qualify the variable names.
- To find where bazel places artifacts, use `bazel cquery --output=files`

@ -0,0 +1,43 @@
---
draft: false
title: "Standing up Gogs and fighting with SELinux"
aliases: ["Standing up Gogs and fighting with SELinux"]
author: "Nick Dumas"
series: []
cover: ""
summary: "SELinux like all security tools end up getting in the way of users who know what they're doing."
showFullContent: false
date: "2018-02-20"
tags:
- nginx
- gogs
- selinux
---
# The Reveal
It took me way longer than I'd like to admit but I finally discovered why I was not able to properly reverse proxy a subdomain ( [git.ndumas.com](http://git.ndumas.com) ). As it turns out, SELinux was the culprit. I wanted to write a short post about this partly to reinforce my own memory and maybe to make this sort of thing easier to find for others encountering 'mysterious' issues when operating on SELinux Fedora installations like Digital Ocean droplets.
# Symptoms
SELinux interference with nginx doing its business will generally manifest as a variation on "permission denied". Here's one such error message:
```
2018/02/20 23:32:51 [crit] 4679#0: *1 connect() to 127.0.0.1:3000 failed (13: Permission denied) while connecting to upstream, client: xxx.xxx.xxx.xxx, server: git.ndumas.com, request: "GET /favicon.ico HTTP/1.1", upstream: "http://127.0.0.1:3000/favicon.ico", host: "git.ndumas.com", referrer: "http://git.ndumas.com/"
```
# Solution
Resolving this requires whitelisting whatever action/context SELinux is restricting. There's a useful tool, `audit2allow`, that will build a module for you automatically. You can invoke it like this:
```
sudo cat /var/log/audit.log|grep nginx|grep denied|audit2allow -M filename
```
Once you've done this, you'll have a file named `filename.pp`. All you have to do next is:
```
semodule -i filename.pp
```
SELinux will revise its policies and any restrictions nginx encountered will be whitelisted.

@ -0,0 +1,66 @@
---
draft: false
title: The Joy of Versioning
aliases:
- The Joy of Versioning
series:
- building-with-bazel
series_order: 2
date: "2023-09-02"
author: Nick Dumas
cover: ""
keywords: []
summary: "Investing in tooling makes adhering to good practices almost easy enough to be joyful."
showFullContent: false
tags:
- bazel
- golang
---
## What am I Doing?
Too many times this year I've found myself struggling to improve my [blog pipeline](https://blog.ndumas.com/series/blogging-with-quartz/) because I couldn't keep track of when code stopped and started doing what it was supposed to do. This was entirely my own fault, I was not observing best-practices:
- I wasn't using semantic versioning
- I wasn't tagging
- all development happened on main
- etc etc
All of this worked well enough for private use monoliths, one-offs and skunkworks projects but these Drone pipelines presented a new challenge.
Drone pipelines tend to be structured as a series of docker images operating on a mount that gets injected into all of them so they can share their work. This is fine, docker images are an easy fire-and-forget solution for deploying tools.
As things grew more complex, my sloppy coding practices put me in a lot of unnecessary tight spots.
- Some parts of the pipeline were idempotent, others weren't.
- Some parts of the pipeline were affected by each other's work. For example, one step scans files for attachments and copies them into Hugo-appropriate directories, and the next transforms links from Obsidian to Hugo layouts.
- I frequently wanted to implement multiple features/fixes simultaneously but when this took longer than planned, rolling back to a known-good version was impossible because my docker images are only tagged with `latest`.
All of this added up to things breaking for far longer than they needed to, more often than they needed to. Eventually, enough was enough. I drew a line in the sand and decided that I wasn't going to live like this anymore.
After some digging I found resources that helped me build a Makefile to take care of things. That first Makefile added a **lot** but I'm only going to cover the tooling for semantic versioning and git tagging; the rest of that Makefile was go cross-compilation and docker image stuff that I'm replacing with bazel.
To handle automatically incrementing semver values, I landed on `bump`. Because it's written in Go, I was able to fork it and patch a few minor issues and make sure that it keeps working for the foreseeable future.
## Why does it work?
My current solution relies on a few pieces: `bump` and my Makefile invoking some git commands.
```Makefile {title="Makefile"}
VERSION ?= $(shell git -C "$(MD)" describe --tags --dirty=-dev)
COMMIT_ID := $(shell git -C "$(MD)" rev-parse HEAD | head -c8)
setup-bump:
go install github.com/therealfakemoot/bump@latest
bump-major: setup-bump
bump major
bump-minor: setup-bump
bump minor
bump-patch: setup-bump
bump patch
```
[bump](https://github.com/guilhem/bump) is a golang utility that'll read a git repository's tags and apply a [semantic versioning](https://semver.org/) compliant version increment. `bump patch` bumps `v0.0.1` to `v0.0.2`. `bump major` goes from `v2.24.5` to `v3.0.0`. You get the idea.
All together, this suite works perfectly for handling tagging. I don't have a super rigorous policy on what constitutes a major, minor, or patch version but being able to `make bump-patch` to tag a specific known-good commit made a world of difference. My drone pipelines became drastically more reliable thanks to version pinning.
# But what about Bazel?
Bazel isn't directly involved in manipulating tags yet. To do that, I'll need to add bazel build files to the `bump` repo. I'll cover that in the next post, where I cover how to use bazel's stamping funtionality.

@ -0,0 +1,146 @@
---
draft: false
title: "Validating YAML frontmatter with JSONSchema"
aliases: ["Validating YAML frontmatter with JSONSchema"]
series: []
date: "2023-06-01"
author: "Nick Dumas"
cover: ""
keywords: ["", ""]
summary: "As a collection of Markdown documents grows organically, maintaining consistency is important. JSONSchema offers a way to automatically ensure frontmatter stays up to spec."
showFullContent: false
tags:
- yaml
- jsonschema
- golang
- obsidian
-
---
## Consistency is hard
Over my time using Obsidian, I've independently authored around 400 notes. Over time I've had a relatively consistent schema for my tags and frontmatter attributes:
```markdown
---
publish: false
summary: ""
aliases: []
title: ""
source: []
tags:
- Status/New
---
```
Getting too deep into what all of these mean is outside the scope of this post. For now, it's enough to know that for any Obsidian note, these properties must be present in order for my pipelines to do their job.
## Manually Managed Metadata
Until now, I managed my note frontmatter by hand, or with `sed`/`grep`. I've got a bit of experience using these tools to manipulate text files, so it's been relatively comfortable but extremely manual.
## Configuration Drift
The problem is that over time, humans get sloppy, forget things, decide to do things differently. In practice, this doesn't impact the usage of my vault in Obsidian; I access most of my notes via the Quick Switcher so filenames and aliases are the things I really focus on.
A place where consistency does matter is when you're automating tasks. Tools that work with Markdown like static site generators care a lot about frontmatter metadata.
For these tools to work the way I expect and need them to, I need to **guarantee** that my notes are configured correctly.
## What are the options?
This is a project I've been meditating on for a long time. The specific problem I had is that most markdown frontmatter is YAML. I'd done cursory searching and come up with no satisfying results for a "YAML schema engine", something to formally validate the structure and content of a YAML document.
I was a fool. For years I'd know that YAML was a superset of JSON, and I'd assume that the superset part meant that no tool that expects JSON could ever be guaranteed work on YAML and that's not acceptable for automation.
The detail that matters is that only the *syntax* is a superset of JSON. The underlying data types: null, bool, integer, string, array, and object, still map onto JSON 1 to 1. With that revelation, my work could finally begin.
## golang and jsonschema
My implementation language of choice is Go, naturally. Speed, type-safety, and cross-compilation all make for a great pipeline.
```go
import (
"fmt"
"io"
"github.com/santhosh-tekuri/jsonschema/v5"
_ "github.com/santhosh-tekuri/jsonschema/v5/httploader"
"gopkg.in/yaml.v3"
)
func Validate(schemaURL string, r io.Reader) error {
var m interface{}
dec := yaml.NewDecoder(r)
err := dec.Decode(&m)
if err != nil {
return fmt.Errorf("error decoding YAML: %w", err)
}
compiler := jsonschema.NewCompiler()
schema, err := compiler.Compile(schemaURL)
if err != nil {
return fmt.Errorf("error compiling schema: %w", err)
}
if err := schema.Validate(m); err != nil {
return fmt.Errorf("error validating target: %w", err)
}
return nil
}
```
`Validate()` is basically all you need in terms of Go code. The [full code repo](https://code.ndumas.com/ndumas/obsidian-pipeline) has a bit more complexity because I'm wiring things through Cobra and stuff, but here's some sample output:
```
go run cmd/obp/*.go validate -s https://schemas.ndumas.com/obsidian/note.schema.json -t Resources/blog/published/
2023/06/01 10:31:27 scanning "mapping-aardwolf.md"
2023/06/01 10:31:27 scanning "schema-bad.md"
2023/06/01 10:31:27 validation error: &fmt.wrapError{msg:"error validating target: jsonschema: '' does not validate with https://schemas.ndumas.com/obsidian/note.schema.json#/required: missing properties: 'title', 'summary', 'tags'", err:(*jsonschema.ValidationError)(0xc0000b3740)}
2023/06/01 10:31:27 error count for "schema-bad.md": 1
2023/06/01 10:31:27 scanning "schema-good.md"
```
You get a relatively detailed summary of why validation failed and a non-zero exit code, exactly what you need to prevent malformed data from entering your pipeline.
### how to schema library?
You might notice that when I specify a schema, it's hosted at `schemas.ndumas.com`. [Here](https://code.ndumas.com/ndumas/json-schemas) you can find the repository powering that domain.
It's pretty simple, just a handful of folders and the following Drone pipeline:
```yaml
kind: pipeline
name: publish-schemas
clone:
depth: 1
steps:
- name: publish
image: drillster/drone-rsync
settings:
key:
from_secret: BLOG_DEPLOY_KEY
user: blog
port: 22
delete: true
recursive: true
hosts: ["schemas.ndumas.com"]
source: /drone/src/
target: /var/www/schemas.ndumas.com/
include: ["*.schema.json"]
exclude: ["**.*"]
```
and this Caddy configuration block:
```caddy
schemas.ndumas.com {
encode gzip
file_server {
browse
}
root * /var/www/schemas.ndumas.com
}
```
Feel free to browse around the [schema site](https://schemas.ndumas.com).
## Success Story???
At time of writing, I haven't folded this into any pipelines. This code is basically my proof-of-concept for only a small small part of a larger rewrite of my pipeline.
### Future Use Cases
The one use-case that seemed really relevant was for users of the Breadcrumbs plugin. That one uses YAML metadata extensively to create complex hierarchies and relationships. Perfect candidate for a schema validation tool.

@ -0,0 +1,5 @@
---
title: "Trying to break Aardwolf"
summary: "A game of life and death and stockpiling pocket portals"
---
In a friendly competitive sense, of course. The game at its core is about speed and finding shortcuts, usually through space.

@ -0,0 +1,8 @@
---
title: "Authoring a blog in Markdown"
summary: "Blogging for fun and posterity (myself)"
---
The core thesis of this project is using [Obsidian](https://obsidian.md) for authoring my blog posts. I'm a big fan of Markdown for authoring and Obsidian's already where I keep my notes. It's gone through a lot of iterations, but the current version uses Gitea, Drone, and a few Docker images I've built that do various types of sanitizations/transformations.
## A Confession
I'm not using quartz anymore. Oops. This series has been in the works for a long time and I ended up committing more firmly to Hugo than to Quartz in particular. For a few reasons I'd rather not do a bunch of renaming, I'm just going to preserve the name as-is. Apologies for any confusion.

@ -0,0 +1,5 @@
---
title: "Building with Bazel"
summary: "Bazel is an extremely opinionated but powerful build system"
---
Bazel is a build system with a lower profile than stuff like cmake or gradle, and it's more esoteric by a significant margin. In particular, the ecosystem is going through a migration between different ways to handle dependencies and it seems to be going about as smoothly as the Go 1.11 transition.

@ -0,0 +1,3 @@
---
title: "Genesis Development"
---

@ -0,0 +1,4 @@
---
title: "Path of Market"
---
A brief foray into market manipulation in an ARPG. I wanted to learn Prometheus and make a bit of walking-money on the side.

@ -1 +1,2 @@
<script defer data-domain="{{ index .Site.Params.AnalyticsURL urls.AbsURL}}" src="https://analytics.ndumas.com/js/script.js"></script> {{ $trimmedURL := trim (index (split (urls.AbsURL "") "://") 1) "/" }}
<script defer data-domain="{{ $trimmedURL }}" src="https://analytics.ndumas.com/js/script.js"></script>

Loading…
Cancel
Save