From a85e47072ff808846242da49295b4b73af23b840 Mon Sep 17 00:00:00 2001 From: johannst Date: Wed, 21 Jun 2023 21:15:05 +0000 Subject: deploy: 35dc3c48a50594148554010ac626480161ad357a --- print.html | 306 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 300 insertions(+), 6 deletions(-) (limited to 'print.html') diff --git a/print.html b/print.html index 13a45f6..219f8c0 100644 --- a/print.html +++ b/print.html @@ -84,7 +84,7 @@ @@ -3459,6 +3459,19 @@ $(realpath fname1 fname2 ..) =symbols show search path for symbol lookup =bindings show against which definition a symbol is bound +

LD_LIBRARY_PATH and dlopen(3)

+

When dynamically loading a shared library during program runtime with +dlopen(3), only the LD_LIBRARY_PATH as it was during program startup is +evaluated. +Therefore the following is a code smell:

+
// at startup LD_LIBRARY_PATH=/moose
+
+// Assume /foo/libbar.so
+setenv("LD_LIBRARY_PATH", "/foo", true /* overwrite */);
+
+// Will look in /moose and NOT in /foo.
+dlopen("libbar.so", RTLD_LAZY);
+

Libraries specified in LD_PRELOAD are loaded from left-to-right but initialized from right-to-left.

@@ -3952,6 +3965,8 @@ clean:
  • cryptsetup
  • swap
  • input
  • +
  • acl
  • +
  • zfs
  • systemd

    systemctl

    @@ -4333,6 +4348,285 @@ int main(int argc, char* argv[]) {

    [mousedev]: TODO /home/johannst/dev/linux/drivers/input/mousedev.c [evdev]: TODO /home/johannst/dev/linux/drivers/input/evdev.c

    +

    access control list (acl)

    +
    +

    This describes POSIX acl.

    +
    +

    The access control list provides a flexibel permission mechanism next to the +UNIX file permissions. This allows to specify fine grained permissions for +users/groups on filesystems.

    +

    Filesystems which support acl typically have an acl option, which must be +specified while mounting when it is not a default option. +Filesystems must be mounted with the acl option if not enabled as default +option.

    +

    Files or folder that have an acl defined, can be identified by the + sign +next to the UNIX permissions.

    +

    The following shows on example for a zfs filesystem.

    +
    # mount | grep tank
    +tank on /tank type zfs (rw,xattr,noacl)
    +tank/foo on /tank/foo type zfs (rw,xattr,posixacl)
    +
    +# ls -h /tank
    +drwxrwxr-x+ 2 root root 4 11. Jun 14:26 foo/
    +
    +

    Show acl entries

    +
    # List current acl entries.
    +getfacl /tank/foo
    +
    +

    Modify acl entries

    +
    # Add acl entry for user "user123".
    +setfacl -m "u:user123:rwx" /tank/foo
    +
    +# Remove entry for user "user123".
    +setfacl -x "u:user123" /tank/foo
    +
    +# Add acl entry for group "group456".
    +setfacl -m "g:group456:rx" /tank/foo
    +
    +# Add acl entry for others.
    +setfacl -m "o:rx" /tank/foo
    +
    +# Remove extended acl entries.
    +setfacl -b /tank/foo
    +
    +

    Masking of acl entries

    +

    The mask defines the maximum access rights that can be given to users and +groups.

    +
    # Update the mask.
    +setfacl -m "m:rx" /tank/foo
    +
    +# List acl entries.
    +getfacl /tank/foo
    +# file: tank/foo
    +# owner: root
    +# group: root
    +user::rwx
    +user:user123:rwx     # effective:r-x
    +group::r-x
    +mask::r-x
    +other::rwx
    +
    +

    References

    + +

    zfs

    +

    Pools are managed with the zpool(8) command and have the +following hierarchy:

    + +

    Data stored in a pool is distributed and stored across all vdevs by zfs. +Therefore a total failure of a single vdev can lead to total loss of a pool.

    +

    A dataset is a logical volume which can be created on top of a pool. Each +dataset can be configured with its own set of properties like +encryption, quota, .... +Datasets are managed with the zfs(8) command.

    +

    zfs pool management

    +

    Pools are by default mounted at /<POOL>.

    +

    Create, modify and destroy zfs pools

    +
    # Create a pool MOOSE with a two mirror vdevs.
    +zpool create moose mirror <dev1> <dev2> mirror <dev3> <dev4>..
    +
    +# Add new raidz1 vdev to a pool.
    +zpool add moose raidz1 <devA> <devB> <devC>..
    +
    +# Remove a vdev from a pool.
    +zpool remove moose <vdevX>
    +
    +# Destroy a pool.
    +zpool destroy moose
    +
    +
    +

    For stable device names in small home setups it is recommended to use names +from /dev/disk/by-id.

    +
    +

    Inspect zfs pools

    +
    # Show status of all pools or a single one.
    +zpool status [<pool>]
    +
    +# Show information / statistics about pools or single one.
    +zpool list [<pool>]
    +
    +# Show statistics for all devices.
    +zpool list -v
    +
    +# Show command history for pools.
    +zpool history
    +
    +

    Modify vdevs

    +
    # vdev MIRROR-0 with two devs.
    +zpool status
    +    NAME            STATE     READ WRITE CKSUM
    +    moose           ONLINE       0     0     0
    +      mirror-0      ONLINE       0     0     0
    +        virtio-200  ONLINE       0     0     0
    +        virtio-300  ONLINE       0     0     0
    +
    +# Attach new device to an existing vdev.
    +zpool attach moose virtio-200 virtio-400
    +
    +# vdev MIRROR-0 with three devs.
    +zpool status
    +    NAME            STATE     READ WRITE CKSUM
    +    moose           ONLINE       0     0     0
    +      mirror-0      ONLINE       0     0     0
    +        virtio-200  ONLINE       0     0     0
    +        virtio-300  ONLINE       0     0     0
    +        virtio-400  ONLINE       0     0     0
    +
    +# Detach device from vdev.
    +zpool detach moose virtio-200
    +
    +

    Replace faulty disk

    +
    # MIRROR-0 is degraded as one disk failed, but still intact.
    +zpool status
    +    NAME            STATE     READ WRITE CKSUM
    +    moose           DEGRADED     0     0     0
    +      mirror-0      DEGRADED     0     0     0
    +        virtio-200  UNAVAIL      0     0     0  invalid label
    +        virtio-300  ONLINE       0     0     0
    +
    +# Replace faulty disk, in mirror.
    +# No data is lost since mirror still has one good disk.
    +zpool replace moose virtio-200 virtio-400
    +
    +# MIRROR-0 back in ONLINE (good) state.
    +zpool status
    +    NAME            STATE     READ WRITE CKSUM
    +    moose           ONLINE       0     0     0
    +      mirror-0      ONLINE       0     0     0
    +        virtio-400  ONLINE       0     0     0
    +        virtio-300  ONLINE       0     0     0
    +
    +

    Import or export zfs pools

    +

    When moving pools between hosts, the pool must be exported on the currently +active host and imported on the new host.

    +
    # Export a pool called MOOSE.
    +zpool export moose
    +
    +# List pools that can be imported using BY-ID deivce names (for example).
    +zpool import -d /dev/disk/by-id
    +
    +# Import pool MOOSE using BY-ID device names (for example).
    +zpool import -d /dev/disk/by-id moose
    +
    +
    +

    Device names used by an existing pool can be changed by exporting and +importing a pool again.

    +
    +

    zfs dataset management

    +

    Datasets are by default mounted at /<POOL>/<DATASET>.

    +

    Create and destroy zfs datasets

    +
    # Create dataset FOO on pool MOOSE.
    +zfs create moose/foo
    +
    +# Destroy dataset.
    +zfs destroy moose/foo
    +
    +

    List all zfs datasets

    +
    # List all zfs datasets.
    +zfs list
    +
    +

    Mount zfs datasets

    +
    # List currently mounted datasets.
    +zfs mount
    +
    +# Mount dataset.
    +zfs mount moose/foo
    +
    +# Unmount dataset.
    +zfs unmount moose/foo
    +
    +

    Encrypted datasets

    +

    Encryption is a readonly property, can only be set when creating a dataset.

    +
    # Create encrypted dataset ENC on pool MOOSE.
    +zfs create -o encryption=on -o keyformat=passphrase moose/foo
    +
    +# Mount encrypte dataset (if key is not loaded).
    +zfs mount -l moose/foo
    +
    +# Unmount dataset and unload encryption key (unload is optional).
    +zfs umount -u moose/foo
    +
    +

    Manage zfs encryption keys

    +
    # Preload encryption key for dataset.
    +zfs load-key moose/foo
    +
    +# Preload encryption key for all datasets.
    +zfs load-key -a
    +
    +# Change encryption key for dataset.
    +zfs change-key moose/foo
    +
    +# Unload encryption key for dataset.
    +zfs unload-key moose/foo
    +
    +

    Manage dataset properties

    +
    # Get all properties for dataset.
    +zfs get quota moose/foo
    +
    +# Get single property for dataset.
    +zfs get all moose/foo
    +
    +# Get single property for all datasets.
    +zfs get quota
    +
    +# Set property on dataset.
    +zfs set quota=10G moose/foo
    +
    +

    Snapshots

    +
    # Create snapshot called V2 for dataset moose/foo.
    +zfs snapshot moose/foo@v2
    +
    +# List all snapshots.
    +zfs list -t snapshot
    +
    +# Make .zfs direcotry visible in the root of the dataset.
    +zfs set snapdir=visible moose/foo
    +
    +# Browse available snapshots in visible .zfs direcotry (readonly).
    +ls /moose/foo/.zfs/snapshot
    +v1/  v2/
    +
    +# Create a new dataset based on the V1 snapshot
    +zfs clone moose/foo@v1 moose/foov1
    +
    +# Destroy snapshot.
    +zfs destroy moose/foo@v1
    +
    +

    Access control list

    +

    Focus on posix acl.

    +
    # Set the ACL type for the FOO dataset to POSIXACL.
    +zfs set acltype=posixacl moose/foo
    +
    +# Get the ACL type of a given dataset.
    +zfs get acltype moose/foo
    +
    +
    +

    For performance reasons it is recommended to also set zfs set xattr=sa moose/foo [ref].

    +
    +

    Example: zfs pool import during startup (systemd)

    +

    The default zpool cache file is /etc/zfs/zpool.cache. When pools are imported +the cache is updated.

    +

    Enable the following targets / services to automatically import pools from the +cache.

    +
    systemctl list-dependencies
    +  ...
    +    └─zfs.target
    +      └─zfs-import.target
    +        └─zfs-import-cache.service
    +

    Network