1 Commits

Author SHA1 Message Date
3e1be7519e initial netlify form test 2020-10-18 14:50:16 -04:00
25 changed files with 96 additions and 1411 deletions

View File

@@ -1,16 +0,0 @@
{
"image": "mcr.microsoft.com/devcontainers/jekyll:bookworm",
"customizations": {
"vscode": {
"extensions": [
"albert.TabOut",
"ecmel.vscode-html-css",
"redhat.vscode-yaml",
"rebornix.Ruby"
]
}
},
"forwardPorts": [4000]
}

View File

@@ -20,6 +20,6 @@ layout: default
<div class="container"> <div class="container">
<h1>404</h1> <h1>404</h1>
<p><strong>Page not found</strong></p> <p><strong>Page not found :(</strong></p>
<p>The requested page could not be found.</p> <p>The requested page could not be found.</p>
</div> </div>

View File

@@ -15,8 +15,6 @@ source "https://rubygems.org"
gem "jekyll" gem "jekyll"
gem "minimal-mistakes-jekyll" gem "minimal-mistakes-jekyll"
gem "jekyll-sitemap"
gem "webrick"
# The following plugins are automatically loaded by the theme-gem: # The following plugins are automatically loaded by the theme-gem:
# gem "jekyll-paginate" # gem "jekyll-paginate"

View File

@@ -1,104 +1,93 @@
GEM GEM
remote: https://rubygems.org/ remote: https://rubygems.org/
specs: specs:
addressable (2.8.5) addressable (2.7.0)
public_suffix (>= 2.0.2, < 6.0) public_suffix (>= 2.0.2, < 5.0)
colorator (1.1.0) colorator (1.1.0)
concurrent-ruby (1.2.2) concurrent-ruby (1.1.7)
em-websocket (0.5.3) em-websocket (0.5.2)
eventmachine (>= 0.12.9) eventmachine (>= 0.12.9)
http_parser.rb (~> 0) http_parser.rb (~> 0.6.0)
eventmachine (1.2.7) eventmachine (1.2.7)
faraday (2.7.10) faraday (1.0.1)
faraday-net_http (>= 2.0, < 3.1) multipart-post (>= 1.2, < 3)
ruby2_keywords (>= 0.0.4) ffi (1.13.1)
faraday-net_http (3.0.2)
ffi (1.15.5)
forwardable-extended (2.6.0) forwardable-extended (2.6.0)
google-protobuf (3.24.0-aarch64-linux) http_parser.rb (0.6.0)
google-protobuf (3.24.0-x86_64-linux) i18n (1.8.5)
http_parser.rb (0.8.0)
i18n (1.14.1)
concurrent-ruby (~> 1.0) concurrent-ruby (~> 1.0)
jekyll (4.3.2) jekyll (4.1.1)
addressable (~> 2.4) addressable (~> 2.4)
colorator (~> 1.0) colorator (~> 1.0)
em-websocket (~> 0.5) em-websocket (~> 0.5)
i18n (~> 1.0) i18n (~> 1.0)
jekyll-sass-converter (>= 2.0, < 4.0) jekyll-sass-converter (~> 2.0)
jekyll-watch (~> 2.0) jekyll-watch (~> 2.0)
kramdown (~> 2.3, >= 2.3.1) kramdown (~> 2.1)
kramdown-parser-gfm (~> 1.0) kramdown-parser-gfm (~> 1.0)
liquid (~> 4.0) liquid (~> 4.0)
mercenary (>= 0.3.6, < 0.5) mercenary (~> 0.4.0)
pathutil (~> 0.9) pathutil (~> 0.9)
rouge (>= 3.0, < 5.0) rouge (~> 3.0)
safe_yaml (~> 1.0) safe_yaml (~> 1.0)
terminal-table (>= 1.8, < 4.0) terminal-table (~> 1.8)
webrick (~> 1.7) jekyll-feed (0.15.0)
jekyll-feed (0.17.0)
jekyll (>= 3.7, < 5.0) jekyll (>= 3.7, < 5.0)
jekyll-gist (1.5.0) jekyll-gist (1.5.0)
octokit (~> 4.2) octokit (~> 4.2)
jekyll-include-cache (0.2.1) jekyll-include-cache (0.2.0)
jekyll (>= 3.7, < 5.0) jekyll (>= 3.7, < 5.0)
jekyll-paginate (1.1.0) jekyll-paginate (1.1.0)
jekyll-sass-converter (3.0.0) jekyll-sass-converter (2.1.0)
sass-embedded (~> 1.54) sassc (> 2.0.1, < 3.0)
jekyll-sitemap (1.4.0) jekyll-sitemap (1.4.0)
jekyll (>= 3.7, < 5.0) jekyll (>= 3.7, < 5.0)
jekyll-watch (2.2.1) jekyll-watch (2.2.1)
listen (~> 3.0) listen (~> 3.0)
kramdown (2.4.0) kramdown (2.3.0)
rexml rexml
kramdown-parser-gfm (1.1.0) kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0) kramdown (~> 2.0)
liquid (4.0.4) liquid (4.0.3)
listen (3.8.0) listen (3.2.1)
rb-fsevent (~> 0.10, >= 0.10.3) rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10) rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.4.0) mercenary (0.4.0)
minimal-mistakes-jekyll (4.24.0) minimal-mistakes-jekyll (4.20.2)
jekyll (>= 3.7, < 5.0) jekyll (>= 3.7, < 5.0)
jekyll-feed (~> 0.1) jekyll-feed (~> 0.1)
jekyll-gist (~> 1.5) jekyll-gist (~> 1.5)
jekyll-include-cache (~> 0.1) jekyll-include-cache (~> 0.1)
jekyll-paginate (~> 1.1) jekyll-paginate (~> 1.1)
jekyll-sitemap (~> 1.3) jekyll-sitemap (~> 1.3)
octokit (4.25.1) multipart-post (2.1.1)
faraday (>= 1, < 3) octokit (4.18.0)
sawyer (~> 0.9) faraday (>= 0.9)
sawyer (~> 0.8.0, >= 0.5.3)
pathutil (0.16.2) pathutil (0.16.2)
forwardable-extended (~> 2.6) forwardable-extended (~> 2.6)
public_suffix (5.0.3) public_suffix (4.0.6)
rb-fsevent (0.11.2) rb-fsevent (0.10.4)
rb-inotify (0.10.1) rb-inotify (0.10.1)
ffi (~> 1.0) ffi (~> 1.0)
rexml (3.2.6) rexml (3.2.4)
rouge (4.1.3) rouge (3.23.0)
ruby2_keywords (0.0.5)
safe_yaml (1.0.5) safe_yaml (1.0.5)
sass-embedded (1.64.2-aarch64-linux-gnu) sassc (2.4.0)
google-protobuf (~> 3.23) ffi (~> 1.9)
sass-embedded (1.64.2-x86_64-linux-gnu) sawyer (0.8.2)
google-protobuf (~> 3.23)
sawyer (0.9.2)
addressable (>= 2.3.5) addressable (>= 2.3.5)
faraday (>= 0.17.3, < 3) faraday (> 0.8, < 2.0)
terminal-table (3.0.2) terminal-table (1.8.0)
unicode-display_width (>= 1.1.1, < 3) unicode-display_width (~> 1.1, >= 1.1.1)
unicode-display_width (2.4.2) unicode-display_width (1.7.0)
webrick (1.8.1)
PLATFORMS PLATFORMS
aarch64-linux ruby
x86_64-linux
DEPENDENCIES DEPENDENCIES
jekyll jekyll
jekyll-sitemap
minimal-mistakes-jekyll minimal-mistakes-jekyll
webrick
BUNDLED WITH BUNDLED WITH
2.4.18 2.1.4

View File

@@ -23,7 +23,7 @@ name : "Ray Lyon"
description : "Linux, self-hosting, and privacy." description : "Linux, self-hosting, and privacy."
url : "https://rayagainstthemachine.net" url : "https://rayagainstthemachine.net"
baseurl : # the subpath of your site, e.g. "/blog" baseurl : # the subpath of your site, e.g. "/blog"
repository : "skoobasteeve/rayagainstthemachine.net" repository : "skoobasteeve/skoobasteeve.github.io.2"
teaser : # path of fallback teaser image, e.g. "/assets/images/500x300.png" teaser : # path of fallback teaser image, e.g. "/assets/images/500x300.png"
logo : # path of logo image to display in the masthead, e.g. "/assets/images/88x88.png" logo : # path of logo image to display in the masthead, e.g. "/assets/images/88x88.png"
masthead_title : # overrides the website title displayed in the masthead, use " " for no title masthead_title : # overrides the website title displayed in the masthead, use " " for no title
@@ -96,20 +96,23 @@ author:
email : email :
links: links:
- label: "E201 06CB 86FE 0B4D" - label: "E201 06CB 86FE 0B4D"
icon: "fas fa-fw fa-fingerprint" icon: "fas fa-fingerprint"
url: "https://keybase.io/scubasteve/pgp_keys.asc?fingerprint=2dc3a1066bba7040fe7963d9e20106cb86fe0b4d" url: "https://keybase.io/scubasteve/pgp_keys.asc?fingerprint=2dc3a1066bba7040fe7963d9e20106cb86fe0b4d"
- label: "Email" - label: "Email"
icon: "fas fa-fw fa-envelope-square" icon: "fas fa-fw fa-envelope-square"
url: "mailto:ray@rayagainstthemachine.net" url: "mailto:ray@raylyon.net"
- label: "Keybase" - label: "Keybase"
icon: "fab fa-fw fa-keybase" icon: "fab fa-keybase"
url: "https://keybase.io/scubasteve" url: "https://keybase.io/scubasteve"
- label: "Website" - label: "Website"
icon: "fas fa-fw fa-link" icon: "fas fa-fw fa-link"
# url: "https://your-website.com" # url: "https://your-website.com"
- label: "Mastodon" - label: "Twitter"
icon: "fab fa-fw fa-mastodon" icon: "fab fa-fw fa-twitter-square"
url: "https://fosstodon.org/@skoobasteeve" # url: "https://twitter.com/"
- label: "Facebook"
icon: "fab fa-fw fa-facebook-square"
# url: "https://facebook.com/"
- label: "GitHub" - label: "GitHub"
icon: "fab fa-fw fa-github" icon: "fab fa-fw fa-github"
url: "https://github.com/skoobasteeve" url: "https://github.com/skoobasteeve"
@@ -276,14 +279,7 @@ defaults:
author_profile: true author_profile: true
read_time: true read_time: true
comments: true comments: true
share: false share: true
related: true related: true
classes: wide classes: wide
show_date: true show_date: true
# _pages
- scope:
path: ""
type: pages
values:
layout: single
author_profile: true

View File

@@ -1,20 +0,0 @@
<div class="page__footer-follow">
<ul class="social-icons">
{% if site.data.ui-text[site.locale].follow_label %}
<li><strong>{{ site.data.ui-text[site.locale].follow_label }}</strong></li>
{% endif %}
{% if site.footer.links %}
{% for link in site.footer.links %}
{% if link.label and link.url %}
<li><a href="{{ link.url }}" rel="nofollow noopener noreferrer"><i class="{{ link.icon | default: 'fas fa-link' }}" aria-hidden="true"></i> {{ link.label }}</a></li>
{% endif %}
{% endfor %}
{% endif %}
<li><a href="{% if site.atom_feed.path %}{{ site.atom_feed.path }}{% else %}{{ '/feed.xml' | relative_url }}{% endif %}"><i class="fas fa-fw fa-rss-square" aria-hidden="true"></i> {{ site.data.ui-text[site.locale].feed_label | default: "Feed" }}</a></li>
<li><a rel="me" href="https://fosstodon.org/@skoobasteeve"><i class="fab fa-mastodon" aria-hidden="true"></i> Mastodon</a></li>
</ul>
</div>
<div class="page__footer-copyright">&copy; {{ site.time | date: '%Y' }} {{ site.name | default: site.title }}. {{ site.data.ui-text[site.locale].powered_by | default: "Powered by" }} <a href="https://jekyllrb.com" rel="nofollow">Jekyll</a> &amp; <a href="https://mademistakes.com/work/minimal-mistakes-jekyll-theme/" rel="nofollow">Minimal Mistakes</a>.</div>

View File

@@ -8,6 +8,6 @@ I love Linux, open-source, and technology in general.
This blog was built using [Jekyll](https://jekyllrb.com/) and the [Minimal Mistakes](https://github.com/mmistakes/minimal-mistakes) theme. All updates and tweaks can be followed on by GitHub repo linked in the sidebar. This blog was built using [Jekyll](https://jekyllrb.com/) and the [Minimal Mistakes](https://github.com/mmistakes/minimal-mistakes) theme. All updates and tweaks can be followed on by GitHub repo linked in the sidebar.
I'm not very active on social media (besides <a rel="me" href="https://fosstodon.org/@skoobasteeve">Mastodon</a>), but feel free to email me or leave a comment if you want to get in touch. You'll likely receive a quick response as I jump on any opportunity to talk tech. I'm not very active on social media, but feel free to email me or leave a comment if you want to get in touch. You'll likely receive a quick response as I jump on any opportunity to talk tech.
Thanks and Happy Hacking! Thanks and Happy Hacking!

25
_pages/contact.md Normal file
View File

@@ -0,0 +1,25 @@
---
layout: single
title: Contact
permalink: /contact/
---
<form name="contact" netlify>
<p>
<label>Name <input type="text" name="name" /></label>
</p>
<p>
<label>Email <input type="email" name="email" /></label>
</p>
<p>
<label>Phone <input type="text" name="phone" /></label>
</p>
<p>
<label>+1 (If applicable) <input type="text" name="plusone" /></label>
</p>
<p>
<label>Comments <input type="text" name="comments" /></label>
</p>
<p>
<button type="submit">Send</button>
</p>
</form>

View File

@@ -1,8 +1,7 @@
--- ---
layout: single layout: single
title: "On-Demand NFS and Samba Connections in Linux with Systemd Automount" title: "Painless On-Demand NAS Connections in Linux with Systemd Automount"
date: 2020-10-07 19:00:00 date: 2020-10-07 19:00:00
last_modified_at: 2021-08-26
categories: [Linux Administration] categories: [Linux Administration]
tags: linux samba nas systemd ubuntu tags: linux samba nas systemd ubuntu
comments: true comments: true
@@ -22,7 +21,7 @@ If you're not familiar with Systemd unit files and how they work, I would highly
You'll need to create dedicated folders on your machine where the shares will be mounted. You'll need to create dedicated folders on your machine where the shares will be mounted.
```bash ``` bash
$ sudo mkdir -p /mnt/smb/sambashare $ sudo mkdir -p /mnt/smb/sambashare
$ sudo mkdir -p /mnt/nfs/nfsshare $ sudo mkdir -p /mnt/nfs/nfsshare
``` ```
@@ -31,7 +30,7 @@ $ sudo mkdir -p /mnt/nfs/nfsshare
If your Samba server uses authentication, you'll need to create a file with your login details that Systemd can use to connect. These should be saved in a safe location with restricted permissions. If your Samba server uses authentication, you'll need to create a file with your login details that Systemd can use to connect. These should be saved in a safe location with restricted permissions.
```bash ``` bash
$ sudo nano /etc/samba/smbcreds $ sudo nano /etc/samba/smbcreds
``` ```
@@ -40,7 +39,7 @@ username=[USERNAME]
password=[PASSWORD] password=[PASSWORD]
``` ```
```bash ``` bash
$ sudo chmod 600 /etc/samba/smbcreds $ sudo chmod 600 /etc/samba/smbcreds
``` ```
@@ -48,13 +47,13 @@ $ sudo chmod 600 /etc/samba/smbcreds
#### Samba #### Samba
```bash ``` bash
$ sudo apt install samba cifs-utils $ sudo apt install samba cifs-utils
``` ```
#### NFS #### NFS
```bash ``` bash
$ sudo apt install nfs-common $ sudo apt install nfs-common
``` ```
@@ -64,7 +63,7 @@ To make this work, we need (2) unit files for each connection: the **mount** uni
The below instructions assume your samba share is located at `//example.server/sambafiles`. The below instructions assume your samba share is located at `//example.server/sambafiles`.
```bash ``` bash
$ sudo nano /etc/systemd/system/mnt-smb-sambashare.mount $ sudo nano /etc/systemd/system/mnt-smb-sambashare.mount
``` ```
@@ -87,22 +86,20 @@ WantedBy=multi-user.target
``` ```
A few notes on the above file: A few notes on the above file:
* `vers=2.1` - adjust this based on the version of samba running on your server * `vers=2.1` - adjust this based on the version of samba running on your server
* `uid=1000` - adjust this based on your local user ID to avoid permissions problems. This is usually 1000 on a desktop system. * `uid=1000` - adjust this based on your local user ID to avoid permissions problems. This is usually 1000 on a desktop system.
\ \
Next we need to create the automount file in the same location. Next we need to create the automount file in the same location.
***EDIT 2021-08-26*** Reader flansuse pointed out that including `Requires=network-online.target` in the automount file did not conform with systemd guidelines. I confirmed that the mounts work perfectly well without that line, so it's been removed. ``` bash
```bash
$ sudo nano /etc/systemd/system/mnt-smb-sambashare.automount $ sudo nano /etc/systemd/system/mnt-smb-sambashare.automount
``` ```
``` ```
[Unit] [Unit]
Description=samba automount for yourfiles Description=samba automount for yourfiles
Requires=network-online.target
[Automount] [Automount]
Where=/mnt/smb/sambashare Where=/mnt/smb/sambashare
@@ -116,10 +113,8 @@ WantedBy=multi-user.target
The below instructions assume your NFS share is located at `example.server:/srv/nfsfiles`. The below instructions assume your NFS share is located at `example.server:/srv/nfsfiles`.
***EDIT 2021-08-26*** Reader Denis suggested adding the `TimeoutSec` parameter to the below file to prevent lock-ups when the share isn't present on your local network. Thank you Denis for the contribution! ``` bash
$ sudo nano /etc/systemd/system/mnt-nfs-nfssahre.mount
```bash
$ sudo nano /etc/systemd/system/mnt-nfs-nfsshare.mount
``` ```
``` ```
@@ -131,7 +126,6 @@ What=example.server:/srv/nfsfiles
Where=/mnt/nfs/nfsshare Where=/mnt/nfs/nfsshare
Type=nfs Type=nfs
Options=defaults Options=defaults
TimeoutSec=5
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target
@@ -140,13 +134,14 @@ WantedBy=multi-user.target
\ \
Same as before, we need to create the automount file in the same location. Same as before, we need to create the automount file in the same location.
```bash ``` bash
$ sudo nano /etc/systemd/system/mnt-nfs-nfsshare.automount $ sudo nano /etc/systemd/system/mnt-smb-nfsshare.automount
``` ```
``` ```
[Unit] [Unit]
Description=nfs automount for nfsfiles Description=nfs automount for nfsfiles
Requires=network-online.target
[Automount] [Automount]
Where=/mnt/nfs/nfsshare Where=/mnt/nfs/nfsshare

View File

@@ -1,284 +0,0 @@
---
layout: single
title: "Better Nextcloud Photo Albums with Preview Generator and Exiftool"
date: 2020-10-27 18:00:00
excerpt: "If you host and use a Nextcloud server, you know that it's good at many things. Unfortunately, displaying photos is not one of them."
categories: [Linux Administration]
tags: linux ubuntu nextcloud photos exiftool
comments: true
---
![nextcloud-photos-hero](/assets/images/screenshots/nextcloud-photos-hero.png){:class="img-responsive" .align-center}
If you host and use a Nextcloud server, you know that it's good at many things. Unfortunately, displaying photos is not one of them. Since Nextcloud does not read photo metadata, your albums will often appear out-of-order. You may also notice that thumbnails and previews take a very long time to load and flipping quickly through a bunch of photos becomes a painful waiting game.
The good news is that because Nextcloud is a wonderful piece of FOSS that we're self-hosting, we can make some modifications to smooth out these pain points.
There are (2) pieces of software we'll be using to accomplish this:
- [Preview Generator](https://apps.nextcloud.com/apps/previewgenerator) (Nextcloud app)
- [exiftool](https://exiftool.org/) (Linux app)
Both are freely available from Nextcloud and your distro's package manager, respectively. Let's dive in!
# Previews and Thumbnails
First we need to fix Nextcloud's preview generation. By default, Nextcloud generates photo previews and thumbnails on-demand, leading to slow load times. To fix this, we're going to use the Preview Generator app for Nextcloud to pre-generate previews on a regular basis. That way, your photos are ready to view as soon as you open the folder.
1. **Install the Preview Generator app for Nextcloud**. From a Nextcloud account with admin permissions, navigate to the Apps section and locate Preview Generator under the Multimedia category. Click Download and enable.
![preview generator](/assets/images/screenshots/nextcloud-photos-01.jpg){:class="img-responsive" .align-center}
2. **Configure preview and thumbnail settings.** While the default settings work well from a performance standpoint, they cause Nextcloud to generate a huge number of previews and thumbnails for each photo. Once you add a lot of photos, you'll notice that these previews eat into your storage significantly (sometimes more than the photos themselves). Fortunately, Preview Generator is highly configurable.
SSH into your Nextcloud server and follow the instructions below.
**Note:** I've found that the below settings provide a good balance of resolution, performance, and storage usage for my environment. You can tweak them depending on your needs.
Set the default thumbnail sizes by using the following occ commands:
```bash
sudo -u www-data php /var/www/nextcloud/occ config:app:set --value="32 256" previewgenerator squareSizes
sudo -u www-data php /var/www/nextcloud/occ config:app:set --value="256 384" previewgenerator widthSizes
sudo -u www-data php /var/www/nextcloud/occ config:app:set --value="256" previewgenerator heightSizes
```
Next, edit your config.php to specify the maximum preview size for images. This is going to effect the appearance and load time of images when you click on them.
```bash
sudo nano /var/www/nextcloud/config/config.php
```
Find the below lines toward the end of the file. If they don't exist, add them to the block.
```php
'preview_max_x' => '2048',
'preview_max_y' => '2048',
'jpeg_quality' => '60',
```
Save the file and restart the web server.
```bash
sudo service apache2 restart
```
3. **Generate initial previews.** Open a terminal on your Nextcloud server and run the following command:
```bash
sudo -u www-data php /var/www/nextcloud/occ preview:generate-all -vvv
```
The above command is run as the web server user since they are typically the owner of the Nextcloud directory, though you may need to tweak it based on your server configuration.
**NOTE:** Depending on the amount of photos you have, this could take a while to complete and use a high amount of resources. If you have users beyond yourself, it's probably best to run it during a low-activity period.
4. **Add a cron job.** This allows Preview Generator to run continuously generate previews as new photos are added to Nextcloud. To avoid permissions problems, we'll edit the crontab of the web server user:
```bash
sudo -u www-data crontab -e
```
Add the following below line to the file. The below example runs the command every 10 minutes, but you can set the frequency to whatever you want.
```bash
*/10 * * * * /usr/bin/php -f /var/www/nextcloud/occ preview:pre-generate
```
**NOTE:** If you have a specific version of PHP installed beyond your distro's default, you'll want to specify that version of the binary above (e.g. `php7.4` instead of `php`).
# Photo Sorting
**UPDATE 2021-01-26:** Added methods to run the script on both client and server systems.
{: .notice--info}
Nextcloud is first and foremost a file sharing application, so it views files just as your file system would. This is great until you get to photo albums, where the EXIF metadata of the photo is more relevant to sorting than the filename or modified date. To be sure that all photos in Nextcloud display in chronological order, we need to get creative.
That's where the wonderful exiftool comes in. This powerful command-line application allows you to read and manipulate the EXIF data of a photo. The feature that solves our problem is the ability to read the original capture date of a photo and apply it to both the *last modified* attribute and the *filename*. This way, no matter how you sort the images in Nextcloud, they'll display in **chronological** order.
You have **two options** for using exiftool:
1. Run a script from a synced **client** computer.
2. Run a script on the data directory on the **server**.
The advantage of running it on the server is that you can automate periodic scans of your photos so they are always sorted properly and up-to-date. The disadvantage is that there are some extra steps involved that may take a long time depending on the speed of your server.
To get started using exiftool, follow the instructions below on one of your **Nextcloud clients.**
### Write the script
1. Install exiftool on your chosen Nextcloud client.
```bash
sudo apt install exiftool
```
2. Create a new file with the below contents and save it as a script.
```shell
sudo nano photo-cleanup.sh
```
**Option 1: Client Script:**
```shell
#!/bin/sh
albumdir=$1
# use below variables if running on the server
# nextclouddir="/var/www/nextcloud"
# user who owns the photos
# nextclouduser=""
echo "Changing modified date to shot date..."
exiftool "-filemodifydate<datetimeoriginal" -r "$albumdir"
echo "Renaming files to shot date..."
exiftool '-FileName<DateTimeOriginal' -r -d "%Y-%m-%d_%H.%M.%S%%-c.%%e" "$albumdir"
# uncomment the below command if running on the server
# echo "Re-scanning your Nextcloud data directory..."
# sudo -u www-data php "$nextclouddir"/occ files:scan "$nextclouduser"
exit 0
```
**Option 2: Server Script:**
```shell
#!/bin/sh
albumdir=$1
# use below variables if running on the server
nextclouddir="/var/www/nextcloud"
# user who owns the photos
nextclouduser=""
echo "Changing modified date to shot date..."
exiftool "-filemodifydate<datetimeoriginal" -r "$albumdir"
echo "Renaming files to shot date..."
exiftool '-FileName<DateTimeOriginal' -r -d "%Y-%m-%d_%H.%M.%S%%-c.%%e" "$albumdir"
# uncomment the below command if running on the server
echo "Re-scanning your Nextcloud data directory..."
sudo -u www-data php "$nextclouddir"/occ files:scan "$nextclouduser"
exit 0
```
I'll break down what's happening above:
```shell
albumdir=$1
```
Sets your album directory as the first argument when you execute the script.
```shell
nextclouddir="/var/www/nextcloud"
```
**Server script only**: set to your Nextcloud install folder.
```shell
nextclouduser=""
```
**Server script only**: set to the Nextcloud user that owns the photo directory.
```shell
exiftool "-filemodifydate<datetimeoriginal" -r "$albumdir"
```
Instructs exiftool to read `datetimeoriginal` (date and time the photo was shot) from each photo and apply it as the `filemodifydate`. The `-r` flag runs the command recursively so it will work its way through all folders in the photos directory.
```shell
exiftool '-FileName<DateTimeOriginal' -r -d "%Y-%m-%d_%H.%M.%S%%-c.%%e" "$albumdir"
```
Similar to the previous command but applies `datetimeoriginal` to the filename. The `-d` flag specifies how the date and time data are laid out in the new filename.
### Test the script
**Warning:** If you're choosing to run the **server** script, keep in mind that the final files:scan command can take a long time to complete depending on the amount of photos and speed of your instance. It's best to run it during a scheduled maintenance window.
{: .notice--warning}
Let's test this simple script on a directory of photos.
Give the script permission to execute.
```shell
chmod +x photo-cleanup.sh
```
You can see below a directory filled with photos. Note the modify dates and filenames on the right side.
```shell
ls -al Photos
total 35340
drwxrwxr-x 2 raylyon raylyon 4096 Aug 6 18:49 .
drwxr-xr-x 6 raylyon raylyon 4096 Oct 25 21:51 ..
-rwxr-xr-x 1 raylyon raylyon 6280032 Oct 19 01:42 P1060462.jpg
-rwxr-xr-x 1 raylyon raylyon 4056662 Oct 19 02:02 P1060468.jpg
-rwxr-xr-x 1 raylyon raylyon 4915105 Oct 19 02:05 P1060481.jpg
-rwxr-xr-x 1 raylyon raylyon 6375408 Oct 19 02:06 P1060530.jpg
-rwxr-xr-x 1 raylyon raylyon 6635572 Oct 19 02:06 P1060546.jpg
-rwxr-xr-x 1 raylyon raylyon 7903375 Oct 19 02:08 P1060552.jpg
```
Now we'll run the script on this directory.
```shell
./photo-cleanup.sh "/home/raylyon/Nextcloud/Photos"
Changing modified date to shot date...
1 directories scanned
6 image files updated
Renaming files to shot date...
1 directories scanned
6 image files updated
```
```shell
ls -al Photos
total 35340
drwxrwxr-x 2 raylyon raylyon 4096 Oct 25 21:56 .
drwxr-xr-x 6 raylyon raylyon 4096 Oct 25 21:51 ..
-rwxr-xr-x 1 raylyon raylyon 6280032 Jul 31 11:27 2020-07-31_11.27.39.jpg
-rwxr-xr-x 1 raylyon raylyon 4056662 Jul 31 12:09 2020-07-31_12.09.05.jpg
-rwxr-xr-x 1 raylyon raylyon 4915105 Jul 31 12:35 2020-07-31_12.35.29.jpg
-rwxr-xr-x 1 raylyon raylyon 6375408 Aug 1 08:22 2020-08-01_08.22.03.jpg
-rwxr-xr-x 1 raylyon raylyon 6635572 Aug 1 19:46 2020-08-01_19.46.18.jpg
-rwxr-xr-x 1 raylyon raylyon 7903375 Aug 1 19:47 2020-08-01_19.47.24.jpg
```
Just like that, our filenames and metadata are updated.
### (Optional) Add a Cron job
You could run the script manually each time you add photos, but who has time for that? If you're frequently adding new photos and directories, you can automate it.
To do so, we'll add an entry to the crontab on the client or server.
**Client (user crontab)**
```bash
crontab -e
```
**Server (system crontab)**
```shell
sudo crontab -e
```
```shell
*/30 * * * * /path/to/your/script/photo-cleanup.sh "/path/to/Nextcloud/Photos"
```
The above entry runs the script every 30 minutes, checking the directory and any sub-directories for new photos and updating them accordingly. Adjust the timing to your needs.
Have any great Nextcloud photo tips that I missed? Shoot me an email or leave a comment below. Thanks for reading and happy hacking!

View File

@@ -1,197 +0,0 @@
---
layout: single
title: "Backups: Planning an Effective Strategy"
date: 2021-01-10 18:10:00
categories: [Linux Administration]
tags: linux backups BorgBackup borg rsync nextcloud
comments: true
---
If you've ever asked a sysadmin or computer geek for advice, you've probably heard the universal refrain: "Always have a backup" or "RAID is not a backup" or my personal favorite "One is none" (referring to copies of your data).
While virtually everyone working in this space agrees on the above, you'll get wildly different answers to your next question: "How do I do it?"
The intention of this post is to cut through the noise and give you an understanding of the concepts you should be thinking about before committing to a particular technology or piece of software.
***NOTE*** - Due to the nature of this blog, most of the example applications I provide are FOSS and Linux-compatible, but you can apply the same concepts to other apps and operating systems.
## Break Your Data Into "Tiers"
Whether you're talking about a server or a desktop, it's a safe bet that not every file on your hard drive(s) holds equal importance. That is, there are some files you care about vastly more than others. When setting up a backup system, sorting these files by priority will save you time, money, and disk space down the road.
### Tier 1: Files you can't afford to lose
Family pictures, tax documents, home videos; things that aren't replaceable or would incur significant cost to replace, financial or otherwise. **These should exist in at least 3 places**.
### Tier 2: Files that you *can* replace, but don't want to
Music and movies, either ripped from physical media or "obtained" from elsewhere, are good contenders for this category. You may have spent hours meticulously cataloging your media collection, but those files can always be re-downloaded or ripped. **These should exist in at least 2 places**.
### Tier 3: Files that don't matter
Installed applications, games, operating system files, miscellaneous downloads, etc. Files in this category can be easily replaced with a simple re-install or are publicly available on the internet. It's not necessary to back up these files unless you've got extra terabytes burning a hole in your pocket.
## Choose a Strategy for Each Tier
There are a huge amount of software options for handling your backups, and the strengths and weaknesses of each primarily depend on the type of data your backing up. It's important to understand these characteristics and so you can apply them to your different data tiers.
### Cloud Sync Services
Services like Google Drive, Dropbox, or self-hosted [Nextcloud](https://nextcloud.com/) (my personal choice) use desktop and mobile applications to automatically sync your files between your devices and a centralized cloud server. Files that you save to a defined folder on your computer are instantly uploaded to the cloud and downloaded to the other devices. These files are accessible from anywhere via a web interface.
**Advantages**
+ **Easy to set up** - Pick a service, create an account, download the application and sync your files.
+ **Instant and automatic** - Backups occur whenever a file is added or changed. No need to schedule a backup ahead of time or manually press a button.
+ **Resilient** - The web applications almost always have a "trash bin" for deleted file recovery, version control for rollbacks, and server infrastructure that gets its own backups. The files also exist offline on your synced devices.
+ **Readily Available** - Your files are accessible from any computer anywhere in the world via a web browser, including your smartphone.
**Disadvantages**
- **Expensive for high volumes** - The free tiers of most sync services only give you around 5-15GB of storage, so for anything larger than that you'll have to pay for the additional storage.
- **Cumbersome for large files** - It's easy for the desktop apps to get bogged down with large files and slow down your computer, and once your get up into the hundreds of gigabytes, you could fill up the hard drives of all your devices pretty quickly. You'll also be severely limited by the slow upload speeds of most home broadband connections.
- **Reliance on third-party** - You have to trust that Google, Dropbox, or other services will respect your privacy and keep your files safe. **EXCEPTION:** [Self-hosted Nextcloud server.](https://docs.nextcloud.com/server/20/admin_manual/installation/)
**Best for**
- **Tier 1** - Documents take up very little space and upload quickly. Even a years worth of photos from your smartphone will only use a few GB in most cases.
- **Tier 2 (smaller files)** - Application config files or even mp3s if you have a small collection.
### Local File Sync
This is as simple as it gets: plug in a USB hard drive or connect to a network share and copy your files to it. While it's possible to accomplish this with a drag-and-drop, there are many great software tools that can help: [FreeFileSync,](https://freefilesync.org/) [DirSyncPro](https://www.dirsyncpro.org/), and [rsync](https://wiki.archlinux.org/index.php/rsync) are all great options for the desktop.
**Advantages**
- **SImple** - Does not require extensive knowledge to set up and understand what's happening, and files aren't hidden behind compression or custom folder structures. Great for people who want visual verification of a successful backup.
- **Fast** - A local USB3 or gigabit LAN connection is going be faster than your internet upload speed 9 times out of 10.
- **Cheap** - In the long run, local storage is always cheaper than cloud.
- **Offline and in your control** - No need to rely on any third-party service or server that may be untrustworthy.
**Disadvantages**
- **Not as resilient as other options** - File sync applications, while simple, don't always have more advanced features like versioning and verification, making it easier to run into problems without realizing it.
**Best for**
- **Tier 2** - Without the storage limitations of the cloud, it's great to use on all your files, no matter how large.
### Local Backups
What's the difference between local backup and local sync? It's largely semantics, but for the purposes of this post I'll make some distinctions. The file sync applications I mentioned above are great for just that: ensuring that a folder in one location is an exact copy of a folder in another location. This is great, but if you want to get *serious* about your backups, a more specialized application is what you want.
Backup applications like [BorgBackup](https://www.borgbackup.org/), [Duplicati](https://www.duplicati.com/), and [BackupPC](https://backuppc.github.io/backuppc/) can also back up to a USB HDD or network share, but they also provide features that make your backups more efficient and resilient: versioning, encryption, de-duplication, and compression to name a few.
**Advantages**
- **Secure** - Encryption allows you to protect your backups with a password.
- **Resilient** - Retention of multiple versions and built-in error checking lowers your chances of losing data significantly.
- **Cheap** - Same hardware requirements as local sync, and all the software examples I gave are free and open-source.
- **Offline and in your control** - No need to rely on any third-party service or server that may be untrustworthy.
**Disadvantages**
- **Complexity** - The additional features these apps offer can make them more difficult to set up, and the extra features mean the backups will be hidden by specialized file names and folder structures.
**Best for**
- **Tier 2** - Without the storage limitations of the cloud, it's great to use on all your files, no matter how large.
- **Tier 1** - Combine with a cloud option and you'll have hard time ever losing your data.
### Cloud Backups
We already established the cloud as a great option for your Tier 1 files, so what makes cloud backups different from cloud sync services? Cloud backup applications use the same methodology as the local backup applications I mentioned above, but instead of going to a USB HDD or NAS, we're sending them to a cloud storage provider. Encryption, compression, de-duplication and versioning are features you can expect to find, and many of the apps I referenced previously are cloud-compatible. [BorgBackup](https://www.borgbackup.org/), [Restic](https://restic.net/), and [Duplicati](https://www.duplicati.com/) are all great options in this space.
**Advantages**
- **Secure** - Encryption allows you to use third-party hosting and storage services with fewer privacy and security concerns. They can't see your files, only the encrypted data.
- **Resilient** - Retention of multiple versions and built-in error checking lowers your chances of losing data significantly.
- **No hardware required** - No need to connect to local hard drive or network share, all you need is an internet connection and the backup will run in the background at a scheduled time.
- **Cheaper storage options than cloud sync** - Since device sync and web availability isn't a requirement for this type of backup, you're options are opened up to virtually any cloud storage vendor.
**Disadvantages**
- **Can still be expensive for high volumes** - While usually cheaper than cloud sync services, you'll still have to pay a subscription fee based on your usage.
- **Complexity** - Just like local backups, the applications take more work to set up than a simple sync service and the backups are stored in esoteric formats and folder structures.
**Best for**
- **Tier 1** - When your files are too large to practically store on a sync service. Also good for use in addition to a sync service.
## Local Backups - USB or NAS?
If you're starting from nothing, getting a USB hard drive will likely be your first step. If you don't have one lying around already, they can be purchased cheaply and are easy to plug in and use. However, if you can afford it, the ideal solution is either a pre-built NAS appliance (Synology, iXSystems, or QNAP) or a DIY NAS using either a traditional x86 PC or something like a Raspberry Pi or RockPro64. Either way, it's important to understand the advantages and limitations of both options.
### USB Hard Drives
**Advantages**
- **Cheap** - 4TB drives are less than $100 at most retailers.
- **Simple to set up** - Have you ever plugged in a USB cable before? That's it!
- **Portable** - Keep them on your desk or hide them in a drawer when you're done.
**Disadvantages**
- **Not good for laptops** - You probably don't want to carry your laptop around with a USB hard drive hanging out of it. With a mobile device, you'll have to remember to periodically plug it in and run the backup manually.
- **Current or resilient: pick one** - If you have a computer that can be tethered to a USB HDD 24/7, you can automate your backups and run them as often as you like. However, you then open yourself up to a single-point-of-failure situation where your O.S. could accidentally corrupt the data, or a disaster like an electrical surge or fire could take out both at once. To avoid this, you can detach the drive and store it in a secure location after the backup completes, but then you have to remember to plug it back in and your backups likely won't remain current as a result.
### NAS
**Advantages**
- **Shareable** - Since a NAS resides on your network, you can back up multiple laptops, desktops, and servers at once without having to plug in a cable.
- **Great for laptops** - Since you don't need to plug in a hard drive, your laptop can back itself up automatically at a scheduled time.
- **Separate from your devices** - Since a NAS is not directly connected to any one computer, the single-point-of-failure scenario is less likely.
**Disadvantages**
- **Pre-built devices can be expensive** - Even Synology's cheapest option with 4TB will be close to 4x the price of a USB hard drive with the same storage.
- **Setup and maintenance** - Whether pre-built or custom, a NAS is a full computer running on your network that needs to be configured, updated, and maintained. To minimize this, choose a purpose-built device from Synology or QNAP.
## So what should you do?
Everybody's situation is different, but here are some general conclusions you can draw from this information:
1. **Tier 1** data should be backed up both locally and with a cloud sync or backup service.
2. **Tier 2** data should be backed up locally in at least one additional location.
3. Local backups are preferred over simple file syncs for their reliability and resiliency.
4. If you can't maintain a local backup, combine a cloud sync service with a cloud backup application.
5. **The most important thing is that you do *something***. Your data is too valuable and your computer's hard drive is too volatile to leave things up to chance.
## Bonus: What I Use
- Some **Tier 1** data is synced to a Nextcloud server hosted on DigitalOcean and that server is backed up to my local NAS using [BorgBackup](https://www.borgbackup.org/) and [Borgmatic](https://torsion.org/borgmatic/).
- All **Tier 1** data on my NAS is backed up nightly to [BorgBase](https://www.borgbase.com/) using BorgBackup and Borgmatic.
- **Tier 2** data on my NAS is backed up nightly to external hard drives using ZFS and [Sanoid/Syncpoid](https://github.com/jimsalterjrs/sanoid).
Have a great backup system of your own? As always, feel free to leave a comment or reach out to me directly with questions or feedback. Since BorgBackup is clearly a favorite of mine, I'll be sure to cover it in detail in a future post.
Thanks for reading and happy hacking!

View File

@@ -1,28 +0,0 @@
---
layout: single
title: "I Was About to Write a To-Do App for the Linux Desktop... and then I found Kalendar."
date: 2023-01-08 12:45:00
excerpt: "2022 was a great year for my Python skills. I had some unique problems to solve in my day job that got me over the hump of learning the language, and finally I was able to write comfortably without googling syntax every five minutes..."
categories: [Software Development]
tags: linux python qt6 qt desktop kde kalendar
comments: true
---
![kalendar01](/assets/images/screenshots/kalendar01.png){:class="img-responsive" .align-center}
2022 was a great year for my Python skills. I had some unique problems to solve in my day job that got me over the hump of learning the language, and finally I was able to write comfortably without Googling syntax every five minutes. Quickly my team's Github repo filled up with borderline-unnecessary one-off scripts to solve all sorts of niche problems in our environment. Due to the nature of being a system administrator at a SaaS-heavy company, most of these scripts deal with third-party APIs: moving data from "service a" to "service b", pulling information about "service c" and correlating it with "service d", etc. These types of scripts are fun to write because they have narrow scopes and easily achievable goals, and I find completing them to be immensely satisfying.
Filled with confidence in my Python skills, I set out to embark on my first GUI project: a desktop to-do application with CalDAV sync. This is an app I feel has been missing on Linux, something akin to Apple Reminders where I can use my own backend for sync. To get started, I built a local-only terminal client, bought a book to start learning PyQt, and I sat down today to write the first of a series of blog posts where I would document the project. I got to the part of the blog post where I confidently say that there are "currently no working Linux desktop apps with this functionality". Then I thought, *maybe I should Google this once more and confirm there really is nothing out there*. Well, shit.
## Enter Kalendar
The last time I researched this space, there were no functional standalone to-do apps that supported CalDAV sync. The closest I could find was Thunderbird, my beloved email client, which is far more complex than what I was looking for. [Kalendar](https://apps.kde.org/kalendar/) didn't even pop up on my radar. Even today when I searched, I almost didn't find it. I ended up seeing it on the [Nextcloud Tasks Github page](https://github.com/nextcloud/tasks#apps-which-sync-with-nextcloud-tasks-using-caldav) in a list of compatible apps with sync. Within minutes, I had it installed and synced with my tasks in Nextcloud, and **wow**, this thing is good.
Kalendar bills itself mainly as a new calendar app, but my task lists feel right at home here. The app opens instantly, and the task view is designed almost exactly as I envisioned for my own app; toggleable lists on the left and tasks on the right. Type on the bottom and hit enter to quickly create a new task and it syncs right up to Nextcloud. Right click on a task to easily set priority and due date, or add a subtask. I hate how good this is.
## What now?
I'm now a happy Kalendar user, and I've lost all motivation to write my own app, but I still want to learn GUI development! I've got remedial Python skills and my QT6 book ready to go! If this was ten years ago, there would probably be lots of voids to fill in the Linux app space, but as it stands today there are very few missing pieces. It feels like there's a great app for pretty much everything.
Does anyone have any desktop app ideas? Something missing from their day-to-day workflow? Are any projects you know of using PyQt that I could contribute to? Please let me know in the comments or send me a message. Trying to seize on some motivation here!

View File

@@ -1,76 +0,0 @@
---
layout: single
title: "Building a Reproducible Nextcloud Server, Part one: Choosing the stack"
date: 2023-08-27 10:00:00
excerpt: "After successfully hosting a Nextcloud instance on the same VPS for 7 years, I decided to rebuild it from scratch with modern tooling."
categories: [Self-Hosting, Linux Administration]
tags: linux nextcloud podman docker container vps
comments: true
---
Nextcloud was the first application I *really* self-hosted. I don't mean self-hosting like running the Plex app in the system tray on your gaming PC; I mean a dedicated VPS, exposed to the world, hosting my personal data. The stakes were high, and over the last seven years, it pushed me to grow my Linux knowledge and ultimately made me a far better sysadmin.
A lot happened during that seven years. Containers and infrastructure-as-code blew up and changed the IT industry. Nextcloud as a company and an application grew tremendously. I got married. Throughout all these changes, my little $5 DigitalOcean droplet running Nextcloud on the LAMP stack kept right on ticking. Despite three OS upgrades, two volume expansions, and fifteen(!) Nextcloud major-version upgrades, that thing refused to die. It continued to host my (and my wife's) critical data until the day I decommissioned it just under 60 days ago.
# Why change?
As a sysadmin and a huge Linux nerd, I'd been following the technology and industry changes closely, and every time I heard about something new or read a blog post I couldn't help but wonder "if I rebuilt my Nextcloud server today, how would I do it?". Everything is a container now, and infrastructure and system configuration is all defined as text files, making it reproducible and popularizing the phrase "cattle, not pets". I wanted a chance to embrace these concepts and use the skills I spent the last seven years improving. Plus, what sysadmin doesn't like playing with the new shiny?
# Goals
So what did I want to accomplish with this change?
1. **Cutting-edge technologies** - Not only did I want to play with the latest tools, I wanted to become proficient with them by putting them into production.
2. **Reproducibility** - Use infrastructure-as-code tooling so I could spin up the whole stack and tear it back down with only a few commands.
3. **Reliability** - Whatever combination of hardware and technologies I ended up with, it needed to be absolutely rock-solid. The only reason this thing should break is if I tell it to (intentionally or not)
# Hosting provider
I chose DigitalOcean back in 2016 mainly due to its excellent guides and popularity around the Jupiter Broadcasting community (got that sweet $100 promo code!). It was much easier to use than most other VPS providers and could have you up-and-running with an Ubuntu server and a public IP in minutes. In 2023, the VPS market is a bit more commoditized and there are some other great options out there. Linode initially came to mind, but their future became a bit murkier after they got acquired by Akamai in 2022, while hyperscalers like AWS and Azure are too expensive for this use-case. I eventually landed on [Hetzner Cloud](https://www.hetzner.com/cloud) for the following reasons:
- Incredible value - for roughly $5 USD per month you get 2 vCPUs and 2GB of ram with 20TB of monthly traffic. That's basically double the specs of competing offerings.
- Great reputation - Hetzner has been around for 20+ years and has lots of goodwill in the tech community for their frugal dedicated server offerings. I wouldn't have chose them initially since their Cloud product didn't have offerings in the U.S., but recently they've expanded to include VPSs in Virginia and Oregon.
- Full-featured Terraform provider - This isn't unique to Hetzner, but it was a requirement for my new setup and their provider works great.
### Why not self host?
While I have a reliable server at home and 300mbps uploads, it's never going to match the bandwidth and reach of a regional data center. This wouldn't matter to me for most things, but I treat my Nextcloud server as a full Dropbox replacement, and it needs to perform as such. On that same note, I feel comfort knowing that it's separated from the more experimental environment of my homelab.
# Linux Distribution
One of the great benefits of containerized applications is that the host operating system matters much less than it used to, and the choice mostly comes down to personal preference. As long as it can run your chosen container runtime and you're familiar with the tooling, your choice will probably work as well as any other.
I've been running Ubuntu on my servers for years due to ease-of-use and my familiarity with it on the desktop. However, I've recently been using Fedora on my home computers and have gotten accustomed to Red Hat / RPM quirks and tooling in recent years. For this reason, and the ease of getting the latest Podman release (more below), I ended up choosing [CentOS Stream 9](https://www.centos.org/centos-stream/).
# Docker vs. Podman
I've been using [Docker](https://www.docker.com/) to host a number of applications on my home server for the last few years with great success, and Docker is still far-and-away the most popular way to run individual containers. However, as the [OCI standard](https://opencontainers.org/) has become more widely adopted, other tools like [Podman](https://podman.io/) have started to appear. Podman, backed by Red Hat, offers near 1:1 command compatibility with Docker and has some lovely added benefits such as:
- Designed to run without root - Podman runs containers as a standard user, greatly reducing the risk to the server if one of the containers is compromised.
- No daemon required - On the same note, there isn't a continuously running daemon in the background with root access to your system. The risks of the Docker socket are [well-documented](https://docs.docker.com/engine/security/protect-access/), and this negates that risk entirely.
- Modern and lightweight - One of the benefits of not being first is that you can learn from everyone else's mistakes. Podman is built using lessons learned from Docker while creating an easy pathway to move from individual containers to full Kubernetes deployments.
Podman has been under rapid development recently, and there's a lot of excitement about it in Linux circles. While Docker would have worked just fine for my purposes, I decided to use this project as an opportunity to get familiar with Podman and see if it could potentially replace my other Docker-based applications.
# Deployment
Unlike my previous Nextcloud server which was like a zen garden that I tended carefully, I wanted my new server to be completely reproducible on a moment's notice. Using containers accomplishes part of this approach, but still leaves many parts of the server configuration to automate! Thankfully, there are a ton of tools available in 2023 to help with this.
## Terraform
To deploy the server itself, with associated volumes, firewall, etc, [Terraform](https://www.terraform.io/) was the obvious choice. While there are some competitors coming up like [Pulumi](https://www.pulumi.com/), Terraform is still the dominant player in the field and popularized the infrastructure-as-code concept. I had some experience using it at work, but I had never had the opportunity to build something from scratch with it. After reading the documentation for the [Hetzner Cloud provider](https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs), I was confident Terraform would be able to give me everything I needed.
## Ansible
Once the VPS is deployed and I have SSH access, Terraform's job stops. This is where I would typically connect to the server and start installing packages, configuring the webserver, and doing all the other server setup tasks I've done a thousand times over the years. If only there was a tool that could do all these steps for me while simultaneously documenting the entire setup!
Enter [Ansible](https://www.ansible.com/). Anything you could possibly think to do on a Linux box, Ansible can do for you. Think of it like a human-readable Bash script that handles all the rough edges for you. While writing the playbooks takes some work, once you have them written, you can run them again and again and expect (mostly) the same results each time. I chose Ansible due to it's stateless, agent-less architecture and the ability to run it from any computer with SSH access to the target hosts. Like Terraform, I love that the entire configuration is text-based and easily managed with Git.
# What's next?
This post talked about the ideas and goals I had going into this project, and in Part 2 I'll talk about the details of the implementation, and how sometimes things seem a lot easier in a blog post than they turn out to be in reality! If you're interested in the nitty-gritty of how these tools work for a project like this, stay tuned for the next post in the series.
[*Link to Part two*]({% link _posts/2023-10-03-nextcloud-podman-part2.md %})

View File

@@ -1,589 +0,0 @@
---
layout: single
title: "Building a Reproducible Nextcloud Server, Part two: Podman containers and Systemd units"
date: 2023-10-03 08:00:00
excerpt: "In the second installment of my Nextcloud server rebuild, we'll get our containers set up with Podman and deploy them on a public-facing server."
categories: [Self-Hosting, Linux Administration]
tags: linux nextcloud podman docker container vps
comments: true
---
[*Link to Part one*]({% link _posts/2023-08-27-nextcloud-podman.md %})
## Overview
Now that I've established the stack, let's dive in to setting up the Nextcloud application with Podman. In this post, we'll get our containers running on your local computer and generate Systemd service files that we can move into a production server. If all goes well, you'll have rootless Nextcloud running on a publicly accessible domain.
### Steps
* [Create a Podman Pod](#create-a-pod)
* [Create the containers](#create-the-containers)
* [Generate Systemd service files](#generate-systemd-files)
* [Move to production](#move-to-production)
* [Troubleshooting](#troubleshooting)
### Requirements
* Computer with [Podman](https://podman.io/) installed
* Linux server with a publicly routable IP address
* Domain name and the ability to add an "A" record
### Notes on rootless Podman
One of the big advantages of using Podman over Docker is that Podman was designed from the beginning to run without root privileges. This has many positive security implications, but there also a few "gotchas" to be aware of, and I'll be pointing them out as I go through the instructions.
For more details, the Podman project maintains a helpful doc on their Github: [The Shortcomings of Rootless Podman](https://github.com/containers/podman/blob/main/rootless.md).
## Create a pod
Podman "pods" are logical groupings of containers that depend on one another. Think of a pod like a Service in Docker Compose; a group of containers that work together to run a single application or service. Once we have a pod that contains our containers, we can stop and start all of them with a single command. Containers within a pod also share a private network so they can exchange data freely with one another.
For a much more thorough explanation on what pods are and how they work, check out this [excellent post](https://developers.redhat.com/blog/2019/01/15/podman-managing-containers-pods) on the Red Hat developer blog.
**Rootless Gotcha #1**
In most Linux distributions, unprivileged applications are not allowed to bind themselves to ports below 1024. Before we get started, we'll need to update a system parameter via `sysctl` to solve this issue:
``` shell
sudo sysctl net.ipv4.ip_unprivileged_port_start=80
```
To make the change persist on reboot, create a new file under `/etc/sysctl.d/` named `99-podman.conf` and paste the line `net.ipv4.ip_unprivileged_port_start=80`. You'll need to use `sudo` privileges for this.
After that's done, let's create a new pod called "nextcloud".
``` shell
podman pod create \
--publish 80:80 \
--publish 443:443 \
--network slirp4netns:port_handler=slirp4netns \
nextcloud
```
\
You can see the newly created pod by running `podman pod ps`.
``` shell
POD ID NAME STATUS CREATED INFRA ID # OF CONTAINERS
d1b78054d6f4 nextcloud Created 2 minutes ago f4a80daae64f 1
```
#### Options explained
* `--publish 80:80` and `--publish 443:443` opens ports 80 and 443 for the webserver. Containers within pods can communicate with eachother fully on their own isolated network, but for outside traffic to reach the containers, we need to open the necessary ports at the pod level. If you plan to use different ports and and put these containers behind a load balancer, you can use different values here.
* `--network slirp4netns:port_handler=slirp4netns` solves **Rootless Gotcha #2**. By default, the webserver in rootless mode sees all HTTP requests as originating from the container's local IP address. This isn't very helpful for accurate logs, so the above option changes the pod's port handler to fix the issue. There may be some performance penalties for doing this, but for low to medium traffic servers it shouldn't be a problem.
## Create the containers
To get Nextcloud up and running, we'll use the following containers:
* [nextcoud-fpm](https://hub.docker.com/_/nextcloud/) - A minimal install of Nextcloud that requires a separate webserver.
* [mariadb](https://hub.docker.com/_/mariadb) - Database officially supported by Nextcloud.
* [caddy](https://hub.docker.com/_/caddy) - The Caddy webserver, which I love for the simplicity of its config and the built-in automatic SSL via Let's Encrypt.
First, create a working directory structure where you'll store all the container data. For this project, I broke mine out like this:
``` shell
.podman
└── nextcloud
├── caddy
│   ├── config
│   └── data
├── mariadb
└── nextcloud
├── config
└── data
```
\
Next, I'll go over each container, showing you the full command I used to create them and explaining each option.
{: .notice--info}
**Note on container image versions**
As general advice when using container images, use a major version tag (e.g. `mariadb:11`) instead of `:latest` or a specific point release. This is a happy medium where minor versions and security fixes get pulled automatically when you run `podman pull` or `podman auto-update`, but you still retain control on when to update to the latest major version.
### MariaDB
We'll create the database container first since it doesn't technically depend on either of the other containers.
``` shell
podman run \
--detach \
--env MYSQL_DATABASE=nextcloud \
--env MYSQL_USER=nextcloud \
--env MYSQL_PASSWORD=nextcloud \
--env MYSQL_ROOT_PASSWORD=nextcloud \
--volume $HOME/.podman/nextcloud/mariadb:/var/lib/mysql:z \
--name mariadb \
--pod nextcloud \
docker.io/library/mariadb:11
```
#### Options explained
* `--env MYSQL_DATABASE=nextcloud` - Name of the database Nextcloud will use, created the first time you run the `mariadb` container.
* `--env MYSQL_USER=nextcloud` - Database user Nextcloud will use, created the first time you run the `mariadb` container.
* `--env MYSQL_PASSWORD=nextcloud` - Password for the Nextcloud database user. Be sure to change this to something more secure and save it somewhere!
* `--env MYSQL_ROOT_PASSWORD=nextcloud` - Password for the database root user. Like the above, be sure to change this to something more secure and save it somewhere! Note that Nextcloud will not use this password, but you'll want it for any manual database maintenance you have to do in the future.
* `--volume $HOME/.podman/nextcloud/mariadb:/var/lib/mysql:z` - Creates a bind mount in the folder you created for MariaDB to store its database and configuration data. The `:z` option is needed to give directory access on selinux systems.
* `--name mariadb` - Sets the name of the container so we can easily reference it later.
* `--pod nextcloud` - Attaches the container to the `nextcloud` pod we previously created.
* `docker.io/library/mariadb:11` - Container image we're going to download and run.
<br>
After you run the command, you can check if the container is running with the `podman ps` command.
``` shell
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f4a80daae64f localhost/podman-pause:4.7.0-1695839078 About an hour ago Up 29 seconds 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp d1b78054d6f4-infra
c5961a86a474 docker.io/library/mariadb:11 mariadbd 29 seconds ago Up 29 seconds 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp mariadb
```
**Note:** The other container you see in the output, `d1b78054d6f4-infra`, is a helper container for the nextcloud pod.
### Nextcloud
``` shell
podman run \
--detach \
--env MYSQL_HOST=mariadb \
--env MYSQL_DATABASE=nextcloud \
--env MYSQL_USER=nextcloud \
--env MYSQL_PASSWORD=nextcloud \
--volume $HOME/.podman/nextcloud/nextcloud/config:/var/www/html:z \
--volume $HOME/.podman/nextcloud/nextcloud/data:/var/www/html/data:z \
--name nextcloud-app \
--pod nextcloud \
docker.io/library/nextcloud:27-fpm
```
#### Options explained
* `--env MYSQL_HOST=mariadb` - Name of the container hosting the database. Thanks to Podman's built-in DNS, container names will resolve to their private IP address, so all we have to do is point Nextcloud at `mariadb` and it will find the database on its internal pod network.
* `--env MYSQL_DATABASE=nextcloud` - Name of the database Nextcloud will use, the same that you created in the `mariadb` container.
* `--env MYSQL_USER=nextcloud` - Database user Nextcloud will use, the same that you created in the `mariadb` container.
* `--env MYSQL_PASSWORD=nextcloud` - Password for the Nextcloud database user, the same that you created in the `mariadb` container.
* `--volume $HOME/.podman/nextcloud/nextcloud/config:/var/www/html:z` - Creates a bind mount in the folder you created for Nextcloud to store its configuration files.
* `--volume $HOME/.podman/nextcloud/nextcloud/data:/var/www/html/data:z` - Creates a bind mount in the folder you created for Nextcloud's data directory.
* `--name nextcloud-app` - Sets the name of the container (container names can't be the same as a pod, hence the `-app` in the name.)
* `--pod nextcloud` - Attaches the container to the `nextcloud` pod we previously created.
* `docker.io/library/nextcloud:27-fpm` - Container image we're going to download and run, `27` being the latest major version of Nextcloud as of this writing.
<br>
You should now have two containers running, plus the pod helper:
``` shell
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f4a80daae64f localhost/podman-pause:4.7.0-1695839078 About an hour ago Up 18 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp d1b78054d6f4-infra
c5961a86a474 docker.io/library/mariadb:11 mariadbd 18 minutes ago Up 18 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp mariadb
13d5c43c0b4d docker.io/library/nextcloud:27-fpm php-fpm 5 seconds ago Up 5 seconds 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp nextcloud-app
```
### Caddy
Before we start the Caddy container, we'll need to write a config in the form of a [Caddyfile](https://caddyserver.com/docs/caddyfile). Since we're just focused on getting the containers working locally, let's do a simple configuration without HTTPS.
Create file named `Caddyfile` in `$HOME/.podman/nextcloud/caddy/config/` and paste the below contents.
```
http://localhost:80 {
root * /var/www/html
file_server
php_fastcgi nextcloud-app:9000 {
root /var/www/html
env front_controller_active true
}
}
```
The above is a bare-minimum configuration to run Nextcloud locally on port 80. We'll make lots of tweaks to this file before we move to production.
Assuming the Caddyfile is in place, run the below command to spin up the final container:
``` shell
podman run \
--detach \
--volume $HOME/.podman/nextcloud/nextcloud/config:/var/www/html:z \
--volume $HOME/.podman/nextcloud/caddy/config/Caddyfile:/etc/caddy/Caddyfile:z \
--volume $HOME/.podman/nextcloud/caddy/data:/data:z \
--name caddy \
--pod nextcloud \
docker.io/library/caddy:2
```
#### Options explained
* `--volume $HOME/.podman/nextcloud/nextcloud/config:/var/www/html:z` - Creates a bind mount in the folder you created for Nextcloud to store its configuration files. This is the content Caddy serves to the web, so it needs access.
* `--volume $HOME/.podman/nextcloud/caddy/config/Caddyfile:/etc/caddy/Caddyfile:z` - Creates a bind mount for the CaddyFile.
* `--volume $HOME/.podman/nextcloud/caddy/data:/data:z` - Creates a bind mount for Caddy's data folder.
* `--name caddy` - Sets the name of the container.
* `--pod nextcloud` - Attaches the container to the `nextcloud` pod we previously created.
* `docker.io/library/caddy:2` - Container image we're going to download and run.
Verify that all (3) containers are running with `podman ps`.
``` shell
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f4a80daae64f localhost/podman-pause:4.7.0-1695839078 2 hours ago Up 45 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp d1b78054d6f4-infra
c5961a86a474 docker.io/library/mariadb:11 mariadbd 45 minutes ago Up 45 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp mariadb
13d5c43c0b4d docker.io/library/nextcloud:27-fpm php-fpm 26 minutes ago Up 26 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp nextcloud-app
b29486a99286 docker.io/caddy:2 caddy run --confi... 4 minutes ago Up 4 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp caddy
```
\
Go to http://localhost in your browser and...
![nextcloud-podman01](/assets/images/screenshots/nextcloud-podman01.png){:class="img-responsive"}
Ta-da! We have Nextcloud!
Since the containers are part of the `nextcloud` pod, you can stop and start all of them with one command. You can run `podman pod stop` to take them down and `podman pod start` to bring them back up. Pretty cool huh?
## Generate Systemd files
Even better than starting and stopping your containers at the pod level is doing it with systemd! This will allow you to manage your Nextcloud pod the same way as any other systemd service, including enabling it to run at system start.
Instead of writing all the systemd unit files by hand, we're going to use a handy subcommand of the podman application, `podman generate systemd`.
First, make sure the pod and all its containers are running. Then, run the below command:
``` shell
podman generate systemd --new --files --name nextcloud
/home/raylyon/container-nextcloud-app.service
/home/raylyon/container-caddy.service
/home/raylyon/container-mariadb.service
/home/raylyon/pod-nextcloud.service
```
\
The output gives you the path to each file, and we'll need to move these files into the systemd user directory, `$HOME/.config/systemd/user/`. Create the directory if it doesn't already exist.
``` shell
mkdir -p $HOME/.config/systemd/user
```
\
Copy each of the files into the above directory.
``` shell
cp $HOME/*.service $HOME/.config/systemd/user/
```
\
Reload the systemd user daemon.
``` shell
systemctl --user daemon-reload
```
\
Start the service corresponding to the pod.
``` shell
systemctl --user start pod-nextcloud
```
\
`podman ps` should show that all your containers are running. If you have issues, you can troubleshoot the same way you would for another systemd service.
Check the status of the pod.
``` shell
systemctl --user status pod-nextcloud
```
\
Check the status of an individual container.
``` shell
systemctl --user status container-nextcloud-app
```
\
Check the service output for errors (note that you need `sudo` for this one).
``` shell
sudo journalctl -xe
```
## Move to production
Up until now we've been working with our containers on localhost, but now it's time to move them to a public-facing server with a public IP and domain name. This step in the process highlights one of the biggest selling points of containers; we can develop and configure locally, then push that exact working configuration to another server and it Just Works™. Beyond that, our systemd unit files save us the trouble of remembering the exact podman commands to run on the server, so we can simply copy the files and start the service.
First, copy the `*.service` files from your computer to the public-facing server with a tool like `scp` or `rsync`.
``` shell
scp $HOME/.config/systemd/user/*.service user@your.server.com:/home/user/
```
\
Then, on the **production server** recreate the folder structure you used locally.
``` shell
mkdir -p $HOME/.podman/nextcloud/nextcloud/config
mkdir -p $HOME/.podman/nextcloud/nextcloud/data
mkdir -p $HOME/.podman/nextcloud/caddy/config
mkdir -p $HOME/.podman/nextcloud/caddy/data
mkdir -p $HOME/.podman/nextcloud/mariadb
```
\
Also, create the systemd folder if it's not already there.
``` shell
mkdir -p $HOME/.config/systemd/user
```
\
Copy the service files into the systemd user directory and reload systemd.
``` shell
cp $HOME/*.service $HOME/.config/systemd/user/
systemctl --user daemon-reload
```
### Caddyfile
The Caddyfile we used earlier won't be suitable for production since it doesn't use a FQDN or HTTPS. Create a new Caddyfile on the server in `$HOME/.podman/nextcloud/caddy/config/` with the below contents, replacing the domain with one you've set up for the server.
```
your.server.com {
root * /var/www/html
file_server
php_fastcgi nextcloud-app:9000 {
root /var/www/html
env front_controller_active true
}
encode gzip
log {
output file /data/nextcloud-access.log
}
header {
Strict-Transport-Security "max-age=15768000;includeSubDomains;preload"
}
# .htaccess / data / config / ... shouldn't be accessible from outside
@forbidden {
path /.htaccess
path /data/*
path /config/*
path /db_structure
path /.xml
path /README
path /3rdparty/*
path /lib/*
path /templates/*
path /occ
path /console.php
}
respond @forbidden 404
redir /.well-known/carddav /remote.php/dav 301
redir /.well-known/caldav /remote.php/dav 301
}
```
The above configuration will use Caddy's built-in automatic HTTPS to pull a certificate from Let's Encrypt. It also blocks web access to certain directories in your Nextcloud folder and adds redirects for Nextcloud's CalDAV and CardDAV endpoints.
### MariaDB optimizations
After running this setup in production for a couple months and going through my first Nextcloud version upgrade, I had issues with Nextcloud losing access to the database during the upgrade process. I did some research and found this [helpful article](https://docs.nextcloud.com/server/latest/admin_manual/configuration_database/linux_database_configuration.html) in Nextcloud's documentation which points to some MariaDB options we can use to fix these issues.
The MariaDB container allows us to pass any additional configuration options as command line arguments to the container run command. This makes it simple to tweak our systemd service file to enable the optimizations.
Open the `container-mariadb.service` file in a text editor and add the following arguments after `docker.io/library/mariadb:11` in the `ExecStart` block:
``` systemd
--transaction-isolation=READ-COMMITTED \
--log-bin=binlog \
--binlog-format=ROW \
--max_allowed_packet=256000000
```
The `ExecStart` block should look something like this when you're done:
``` systemd
ExecStart=/usr/bin/podman run \
--cidfile=%t/%n.ctr-id \
--cgroups=no-conmon \
--rm \
--pod-id-file %t/pod-nextcloud.pod-id \
--sdnotify=conmon \
--replace \
--detach \
--env MYSQL_DATABASE=nextcloud \
--env MYSQL_USER=nextcloud \
--env MYSQL_PASSWORD=nextcloud \
--env MYSQL_ROOT_PASSWORD=nextcloud \
--volume %h/.podman/nextcloud/mariadb:/var/lib/mysql:z \
--name mariadb docker.io/library/mariadb:11 \
--transaction-isolation=READ-COMMITTED \
--log-bin=binlog \
--binlog-format=ROW \
--max_allowed_packet=256000000
```
### Nextcloud maitenance cron job
Nextcloud has an ongoing [background task](https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html) that needs to run on a regular basis. There are a few different ways to schedule this, but the recommend method is using cron on the host server.
Edit your **user crontab** by running `crontab -e` (without `sudo`) and add the following line:
```
*/5 * * * * podman exec -u 33 nextcloud-app php /var/www/html/cron.php
```
The command is opening a shell inside the `nextcloud-app` container and running Nextcloud's `cron.php` script every 5 minutes. The `-u 33` option is telling Podman to run the command as UID 33, which is the UID of the www-data user inside the Nextcloud container.
### (Optional) Use an env file for credentials in systemd files
Instead of pasting the database credentials and other secrets directly into the systemd unit files, we can use the `EnvironmentFile` parameter to dump those into a `.env` file with locked-down permissions.
Create the `.env` file somewhere on the system that makes sense. I recommend placing it in the `$HOME/.podman/nextcloud` directory and naming it `.nextcloud-env`. The syntax of the file should look like this:
``` shell
NEXTCLOUD_VERSION=27
MYSQL_PASSWORD=SuperSecretPassword
MYSQL_DATABASE=nextcloud
MYSQL_USER=nextcloud
MYSQL_ROOT_PASSWORD=EvenMoreSuperSecretPassword
```
\
Update the permissions of the file so that only your user on the host system can read it. Replace `youruser` in the below command with the user running your containers.
``` shell
chown youruser:youruser $HOME/.podman/nextcloud/.nextcloud-env
chmod 0600 $HOME/.podman/nextcloud/.nextcloud-env
```
\
Update each of your systemd unit files that need to access the file with the `EnvironmentFile` parameter in the `[Service]` block:
``` systemd
EnvironmentFile=%h/.podman/nextcloud/.nextcloud-env
```
`%h` in systemd lingo is a variable for your home directory.
Lastly, replace the values in your systemd unit files with `${VARIABLE_NAME}`. In the end your files will look something like this, using the `container-mariadb.service` file as an example:
``` systemd
[Unit]
Description=Podman container-mariadb.service
Documentation=man:podman-generate-systemd(1)
Wants=network-online.target
After=network-online.target
RequiresMountsFor=%t/containers
BindsTo=pod-nextcloud-pod.service
After=pod-nextcloud-pod.service
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
EnvironmentFile=%h/.podman/nextcloud/.nextcloud-env
Restart=on-failure
TimeoutStopSec=70
ExecStartPre=/bin/rm -f %t/%n.ctr-id
ExecStart=/usr/bin/podman run \
--cidfile=%t/%n.ctr-id \
--cgroups=no-conmon \
--rm \
--pod-id-file %t/pod-nextcloud-pod.pod-id \
--sdnotify=conmon \
--replace \
--detach \
--env MYSQL_DATABASE=${MYSQL_DATABASE} \
--env MYSQL_USER=${MYSQL_USER} \
--env MYSQL_PASSWORD=${MYSQL_PASSWORD} \
--env MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} \
--volume %h/.podman/nextcloud/mariadb:/var/lib/mysql:z \
--name mariadb docker.io/library/mariadb:11 \
--transaction-isolation=READ-COMMITTED \
--log-bin=binlog \
--binlog-format=ROW \
--max_allowed_packet=256000000
ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id
ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id
Type=notify
NotifyAccess=all
[Install]
WantedBy=default.target
```
### Start your service!
At this point, everything should be in place for your Nextcloud production server. Make sure of the following:
* A DNS A record exists pointing to the public IP address of your server.
* That domain matches the domain in your Caddyfile.
* The host firewall is allowing incoming ports `80` and `443`. This is usually `firewalld` on REHL-based systems or `ufw` on Debian-based.
Before starting the service, reload the systemd user daemon.
``` shell
systemctl --user daemon-reload
```
\
Enable the pod service so it starts on boot.
``` shell
systemctl --user enable pod-nextcloud
```
\
**Rootless gotcha #3**: enable lingering for your user. This allows non-root users to start services at boot without a console login.
``` shell
sudo loginctl enable-linger youruser
```
\
If you haven't done so already, make the change to update the unprvivileged ports that I referenced [earlier](#create-a-pod) in the post.
``` shell
sudo sysctl net.ipv4.ip_unprivileged_port_start=80
```
Don't forget to create the file at `/etc/sysctl.d/99-podman.conf` so it persists on reboot!
\
Finally, start the Nextcloud service!
``` shell
systemctl --user start pod-nextcloud
```
\
On the first run, it may take a few mintues for Podman to pull down the container images. Check the output of `podman ps` and you should see the containers appearing there one after the other, eventually showing all three.
``` shell
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f4a80daae64f localhost/podman-pause:4.7.0-1695839078 2 hours ago Up 45 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp d1b78054d6f4-infra
c5961a86a474 docker.io/library/mariadb:11 mariadbd 45 minutes ago Up 45 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp mariadb
13d5c43c0b4d docker.io/library/nextcloud:27-fpm php-fpm 26 minutes ago Up 26 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp nextcloud-app
b29486a99286 docker.io/library/caddy:2 caddy run --confi... 4 minutes ago Up 4 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp caddy
```
\
At this point you should have rootless Nextcloud accessible at your FQDN on the public internet with HTTPS!
![nextcloud-podman02](/assets/images/screenshots/nextcloud-podman02.png){:class="img-responsive"}
Walk through the first-time setup of Nextcloud to create your admin account and install apps.
![nextcloud-podman03](/assets/images/screenshots/nextcloud-podman03.png){:class="img-responsive"}
![nextcloud-podman04](/assets/images/screenshots/nextcloud-podman04.png){:class="img-responsive"}
I recommend navigating to **Administration Settings -> Overview** and reading the "Security & setup warnings". The Nextcloud app always has a few recommendations for fixes and changes to the configuration, with documentation to back it up.
![nextcloud-podman05](/assets/images/screenshots/nextcloud-podman05.png){:class="img-responsive"}
## Troubleshooting
If the Nextcloud page isn't loading as expected or you're getting an error when launching your service, the container output logs are your friends! Run `podman ps` to see if your containers are running. If they are, use `podman logs <container name>` to see the latest output from each container. It's usually pretty easy to spot red flags there.
If the containers aren't running, use `sudo journalctl -xe` to check the output of each service. You may have to scroll up a bit to get useful information, since services will often try to restart multiple times after an error and fill up the output. Make sure you scroll up past the messages that say "service start request repeated too quickly" and try to find the first messages shown from each container's service.
**Common problems**
* Directory or file referenced in the `*.service` file doesn't exist or is in the wrong location (your container directories and Caddyfile). Make sure the paths are consistent in all your files.
* Caddy can't get the certificate from Let's Encrypt. Make sure your A record points to the correct IP and that it's had time to propagate across the web. This takes up to 30 minutes after you add the record.
* Firewall blocking ports 80 and 443. Beyond `ufw` and `firewalld` on the system, make sure there aren't any additional firewalls set up in your VPS provider or home network that could be blocking the incoming ports.
* Nextcloud can't connect to the database. Make sure the `$MYSQL_HOST` value matches the container name of the MariaDB container. Make sure the same is true for the database username and password.
**Helpful links**
* [Nextcloud documentation](https://docs.nextcloud.com/)
* [podman run](https://docs.podman.io/en/stable/markdown/podman-run.1.html)
* [podman generate systemd](https://docs.podman.io/en/stable/markdown/podman-generate-systemd.1.html)
## Next steps
Now that we have a working server, let's make sure we never have to do it by hand again! In Part 3 of the series, I'll go over how you can automate the entire configuration with an [Ansible playbook](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_intro.html). Not only can you re-use that playbook to spin up multiple servers or re-deploy on a new hosting provider, it also acts as documentation that writes itself.
As always, feel free to leave a comment below with any questions or suggestions. You can also reach me by [email](mailto:ray@rayagainstthemachine.net) or [Mastodon](https://fosstodon.org/@skoobasteeve).
Happy hacking!

View File

@@ -1,108 +0,0 @@
---
layout: single
title: "Easily Update Your Containers with Podman Auto-Update"
date: 2023-10-08 16:00:00
excerpt: "Use this handy built-in feature of Podman to update all your container images with a single command."
categories: [Linux Administration]
tags: linux nextcloud podman docker container update
comments: true
---
I've written previously about the joys of using Podman to manage your containers, including the benefits of using it over Docker, but one of my favorite quality-of-life features is the [podman auto-update](https://docs.podman.io/en/stable/markdown/podman-auto-update.1.html) command.
In short, it replaces the series of commands you would normally run to update containers, for example:
1. `podman pull nextcloud-fpm:27`
2. `podman stop nextcloud-fpm`
3. `podman rm nextcloud-fpm`
4. `podman run [OPTIONS] nextcloud-fpm:27`
5. Repeat for each container.
Not only does podman auto-update save you all these steps, it will also automatically roll back to the previous image version if there are errors starting the new version, giving you some peace of mind when updating important applications.
## Requirements
* Podman installed
* Containers [managed with systemd](https://docs.podman.io/en/stable/markdown/podman-generate-systemd.1.html)
* Containers you want to update must use the `--label "io.containers.autoupdate=registry"` run option
## Instructions
Recreate your existing systemd-managed containers with the `--label "io.containers.autoupdate=registry"` option. To do this, just edit your container's service file to include the option. See the below partial example for my Nextcloud container:
``` systemd
ExecStartPre=/bin/rm -f %t/%n.ctr-id
ExecStart=/usr/bin/podman run \
--cidfile=%t/%n.ctr-id \
--cgroups=no-conmon \
--rm \
--pod-id-file %t/pod-nextcloud-pod.pod-id \
--sdnotify=conmon \
--replace \
--detach \
--env MYSQL_HOST=mariadb \
--env MYSQL_DATABASE=nextcloud \
--env MYSQL_USER=${MYSQL_USER} \
--env MYSQL_PASSWORD=${MYSQL_PASSWORD} \
--volume %h/.podman/nextcloud/nextcloud-config:/var/www/html:z \
--volume /mnt/nextcloud-data/data:/var/www/html/data:z \
--label "io.containers.autoupdate=registry" \
--log-driver=journald \
--name nextcloud-app docker.io/library/nextcloud:${NEXTCLOUD_VERSION}-fpm
ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id
```
\
Once you're done, reload the systemd daemon and restart the service.
``` shell
systemctl --user daemon-reload
systemctl --user restart container-nextcloud.service
```
\
Next, run the auto-update command with the `--dry-run` option. With this option, you'll get a preview of which containers will be updated without the update taking place.
``` shell
podman auto-update --dry-run
UNIT CONTAINER IMAGE POLICY UPDATED
pod-nextcloud-pod.service 643fd5d3e2cb (nextcloud-app) docker.io/library/nextcloud:27-fpm registry pending
pod-nextcloud-pod.service 71e48b691447 (mariadb) docker.io/library/mariadb:10 registry pending
pod-nextcloud-pod.service 9ed555fecdfa (caddy) docker.io/library/caddy registry pending
```
### Output explained
* `podman auto-update` will show updates for every container that has the "io.containers.autoupdate=registry" label and do them all at once
* The `UNIT` column shows the same "pod" service for each container. This is because my containers are all managed by a single Podman pod.
* The `UPDATED` column shows "pending", which means there is an update available from the container registry.
\
Once you're ready to update, run the command again without the `--dry-run` option.
``` shell
podman auto-update
```
\
Podman will begin pulling the images from the registry, which may take a few minutes depending on your connection speed. If it completes successfully, you'll get fresh output with the `UPDATED` column changed to `true`.
``` shell
UNIT CONTAINER IMAGE POLICY UPDATED
pod-nextcloud-pod.service 643fd5d3e2cb (nextcloud-app) docker.io/library/nextcloud:27-fpm registry true
pod-nextcloud-pod.service 71e48b691447 (mariadb) docker.io/library/mariadb:10 registry true
pod-nextcloud-pod.service 9ed555fecdfa (caddy) docker.io/library/caddy registry true
```
\
During this process, the containers were restarted automatically with the latest image. You can verify this with `podman ps`.
``` shell
podman ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
0c5523997648 localhost/podman-pause:4.6.1-1692961071 2 minutes ago Up 2 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp 0e075fb7b67b-infra
4ba992e83eeb docker.io/library/caddy:latest caddy run --confi... 2 minutes ago Up 2 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp caddy
2a7d448b1b6b docker.io/library/nextcloud:27-fpm php-fpm 2 minutes ago Up About a minute 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp nextcloud-app
9ec017721f16 docker.io/library/mariadb:10 --transaction-iso... 2 minutes ago Up 2 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp mariadb
```
\
That's it! Your Podman containers were updated to the latest image version with a single command. This is a small feature, but one I've come to love in my time using Podman. If you get stuck, check out the project's [documentation for the auto-update command](https://docs.podman.io/en/stable/markdown/podman-auto-update.1.html). If you have broader questions about running Podman, I recommend reading my [series on building a reproducible Nextcloud server with Podman]({% link _posts/2023-08-27-nextcloud-podman.md %}).
Happy hacking!

View File

@@ -270,7 +270,7 @@ body:hover .visually-hidden button {
.fa-mastodon, .fa-mastodon,
.fa-mastodon-square { .fa-mastodon-square {
color: #000; color: $mastodon-color;
} }
.fa-pinterest, .fa-pinterest,

Binary file not shown.

Before

Width:  |  Height:  |  Size: 335 KiB

After

Width:  |  Height:  |  Size: 612 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 530 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 471 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 575 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 662 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 563 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 541 KiB