From a85e47072ff808846242da49295b4b73af23b840 Mon Sep 17 00:00:00 2001 From: johannst Date: Wed, 21 Jun 2023 21:15:05 +0000 Subject: deploy: 35dc3c48a50594148554010ac626480161ad357a --- linux/acl.html | 265 ++++++++++++++++++++++++++++++ linux/coredump.html | 2 +- linux/cryptsetup.html | 2 +- linux/index.html | 4 +- linux/input.html | 6 +- linux/ptrace_scope.html | 2 +- linux/swap.html | 2 +- linux/systemd.html | 2 +- linux/zfs.html | 416 ++++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 692 insertions(+), 9 deletions(-) create mode 100644 linux/acl.html create mode 100644 linux/zfs.html (limited to 'linux') diff --git a/linux/acl.html b/linux/acl.html new file mode 100644 index 0000000..4166152 --- /dev/null +++ b/linux/acl.html @@ -0,0 +1,265 @@ + + + + + + acl - Notes + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+
+

access control list (acl)

+
+

This describes POSIX acl.

+
+

The access control list provides a flexibel permission mechanism next to the +UNIX file permissions. This allows to specify fine grained permissions for +users/groups on filesystems.

+

Filesystems which support acl typically have an acl option, which must be +specified while mounting when it is not a default option. +Filesystems must be mounted with the acl option if not enabled as default +option.

+

Files or folder that have an acl defined, can be identified by the + sign +next to the UNIX permissions.

+

The following shows on example for a zfs filesystem.

+
# mount | grep tank
+tank on /tank type zfs (rw,xattr,noacl)
+tank/foo on /tank/foo type zfs (rw,xattr,posixacl)
+
+# ls -h /tank
+drwxrwxr-x+ 2 root root 4 11. Jun 14:26 foo/
+
+

Show acl entries

+
# List current acl entries.
+getfacl /tank/foo
+
+

Modify acl entries

+
# Add acl entry for user "user123".
+setfacl -m "u:user123:rwx" /tank/foo
+
+# Remove entry for user "user123".
+setfacl -x "u:user123" /tank/foo
+
+# Add acl entry for group "group456".
+setfacl -m "g:group456:rx" /tank/foo
+
+# Add acl entry for others.
+setfacl -m "o:rx" /tank/foo
+
+# Remove extended acl entries.
+setfacl -b /tank/foo
+
+

Masking of acl entries

+

The mask defines the maximum access rights that can be given to users and +groups.

+
# Update the mask.
+setfacl -m "m:rx" /tank/foo
+
+# List acl entries.
+getfacl /tank/foo
+# file: tank/foo
+# owner: root
+# group: root
+user::rwx
+user:user123:rwx     # effective:r-x
+group::r-x
+mask::r-x
+other::rwx
+
+

References

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + +
+ + diff --git a/linux/coredump.html b/linux/coredump.html index 94beab3..fbc82dd 100644 --- a/linux/coredump.html +++ b/linux/coredump.html @@ -83,7 +83,7 @@ diff --git a/linux/cryptsetup.html b/linux/cryptsetup.html index 77f71c1..a301ed1 100644 --- a/linux/cryptsetup.html +++ b/linux/cryptsetup.html @@ -83,7 +83,7 @@ diff --git a/linux/index.html b/linux/index.html index c7c04bc..3d8d8db 100644 --- a/linux/index.html +++ b/linux/index.html @@ -83,7 +83,7 @@ @@ -155,6 +155,8 @@
  • cryptsetup
  • swap
  • input
  • +
  • acl
  • +
  • zfs
  • diff --git a/linux/input.html b/linux/input.html index 5f56879..836e185 100644 --- a/linux/input.html +++ b/linux/input.html @@ -83,7 +83,7 @@ @@ -275,7 +275,7 @@ int main(int argc, char* argv[]) { - @@ -289,7 +289,7 @@ int main(int argc, char* argv[]) { - diff --git a/linux/ptrace_scope.html b/linux/ptrace_scope.html index f593a69..234dda6 100644 --- a/linux/ptrace_scope.html +++ b/linux/ptrace_scope.html @@ -83,7 +83,7 @@ diff --git a/linux/swap.html b/linux/swap.html index b5f8b3e..42952c3 100644 --- a/linux/swap.html +++ b/linux/swap.html @@ -83,7 +83,7 @@ diff --git a/linux/systemd.html b/linux/systemd.html index 0fec778..80540d8 100644 --- a/linux/systemd.html +++ b/linux/systemd.html @@ -83,7 +83,7 @@ diff --git a/linux/zfs.html b/linux/zfs.html new file mode 100644 index 0000000..78e2847 --- /dev/null +++ b/linux/zfs.html @@ -0,0 +1,416 @@ + + + + + + zfs - Notes + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    +
    +

    zfs

    +

    Pools are managed with the zpool(8) command and have the +following hierarchy:

    +
      +
    • pool: consists of one or more virtual devices (vdev)
    • +
    • vdev: consists of one or more physical devices (dev) and come in +different kinds such as disk, mirror, raidzX, ... +
        +
      • disk: single physical disk (vdev == dev)
      • +
      • mirror: data is identically replicated on all devs (requires at least 2 +physical devices).
      • +
      +
    • +
    +

    Data stored in a pool is distributed and stored across all vdevs by zfs. +Therefore a total failure of a single vdev can lead to total loss of a pool.

    +

    A dataset is a logical volume which can be created on top of a pool. Each +dataset can be configured with its own set of properties like +encryption, quota, .... +Datasets are managed with the zfs(8) command.

    +

    zfs pool management

    +

    Pools are by default mounted at /<POOL>.

    +

    Create, modify and destroy zfs pools

    +
    # Create a pool MOOSE with a two mirror vdevs.
    +zpool create moose mirror <dev1> <dev2> mirror <dev3> <dev4>..
    +
    +# Add new raidz1 vdev to a pool.
    +zpool add moose raidz1 <devA> <devB> <devC>..
    +
    +# Remove a vdev from a pool.
    +zpool remove moose <vdevX>
    +
    +# Destroy a pool.
    +zpool destroy moose
    +
    +
    +

    For stable device names in small home setups it is recommended to use names +from /dev/disk/by-id.

    +
    +

    Inspect zfs pools

    +
    # Show status of all pools or a single one.
    +zpool status [<pool>]
    +
    +# Show information / statistics about pools or single one.
    +zpool list [<pool>]
    +
    +# Show statistics for all devices.
    +zpool list -v
    +
    +# Show command history for pools.
    +zpool history
    +
    +

    Modify vdevs

    +
    # vdev MIRROR-0 with two devs.
    +zpool status
    +    NAME            STATE     READ WRITE CKSUM
    +    moose           ONLINE       0     0     0
    +      mirror-0      ONLINE       0     0     0
    +        virtio-200  ONLINE       0     0     0
    +        virtio-300  ONLINE       0     0     0
    +
    +# Attach new device to an existing vdev.
    +zpool attach moose virtio-200 virtio-400
    +
    +# vdev MIRROR-0 with three devs.
    +zpool status
    +    NAME            STATE     READ WRITE CKSUM
    +    moose           ONLINE       0     0     0
    +      mirror-0      ONLINE       0     0     0
    +        virtio-200  ONLINE       0     0     0
    +        virtio-300  ONLINE       0     0     0
    +        virtio-400  ONLINE       0     0     0
    +
    +# Detach device from vdev.
    +zpool detach moose virtio-200
    +
    +

    Replace faulty disk

    +
    # MIRROR-0 is degraded as one disk failed, but still intact.
    +zpool status
    +    NAME            STATE     READ WRITE CKSUM
    +    moose           DEGRADED     0     0     0
    +      mirror-0      DEGRADED     0     0     0
    +        virtio-200  UNAVAIL      0     0     0  invalid label
    +        virtio-300  ONLINE       0     0     0
    +
    +# Replace faulty disk, in mirror.
    +# No data is lost since mirror still has one good disk.
    +zpool replace moose virtio-200 virtio-400
    +
    +# MIRROR-0 back in ONLINE (good) state.
    +zpool status
    +    NAME            STATE     READ WRITE CKSUM
    +    moose           ONLINE       0     0     0
    +      mirror-0      ONLINE       0     0     0
    +        virtio-400  ONLINE       0     0     0
    +        virtio-300  ONLINE       0     0     0
    +
    +

    Import or export zfs pools

    +

    When moving pools between hosts, the pool must be exported on the currently +active host and imported on the new host.

    +
    # Export a pool called MOOSE.
    +zpool export moose
    +
    +# List pools that can be imported using BY-ID deivce names (for example).
    +zpool import -d /dev/disk/by-id
    +
    +# Import pool MOOSE using BY-ID device names (for example).
    +zpool import -d /dev/disk/by-id moose
    +
    +
    +

    Device names used by an existing pool can be changed by exporting and +importing a pool again.

    +
    +

    zfs dataset management

    +

    Datasets are by default mounted at /<POOL>/<DATASET>.

    +

    Create and destroy zfs datasets

    +
    # Create dataset FOO on pool MOOSE.
    +zfs create moose/foo
    +
    +# Destroy dataset.
    +zfs destroy moose/foo
    +
    +

    List all zfs datasets

    +
    # List all zfs datasets.
    +zfs list
    +
    +

    Mount zfs datasets

    +
    # List currently mounted datasets.
    +zfs mount
    +
    +# Mount dataset.
    +zfs mount moose/foo
    +
    +# Unmount dataset.
    +zfs unmount moose/foo
    +
    +

    Encrypted datasets

    +

    Encryption is a readonly property, can only be set when creating a dataset.

    +
    # Create encrypted dataset ENC on pool MOOSE.
    +zfs create -o encryption=on -o keyformat=passphrase moose/foo
    +
    +# Mount encrypte dataset (if key is not loaded).
    +zfs mount -l moose/foo
    +
    +# Unmount dataset and unload encryption key (unload is optional).
    +zfs umount -u moose/foo
    +
    +

    Manage zfs encryption keys

    +
    # Preload encryption key for dataset.
    +zfs load-key moose/foo
    +
    +# Preload encryption key for all datasets.
    +zfs load-key -a
    +
    +# Change encryption key for dataset.
    +zfs change-key moose/foo
    +
    +# Unload encryption key for dataset.
    +zfs unload-key moose/foo
    +
    +

    Manage dataset properties

    +
    # Get all properties for dataset.
    +zfs get quota moose/foo
    +
    +# Get single property for dataset.
    +zfs get all moose/foo
    +
    +# Get single property for all datasets.
    +zfs get quota
    +
    +# Set property on dataset.
    +zfs set quota=10G moose/foo
    +
    +

    Snapshots

    +
    # Create snapshot called V2 for dataset moose/foo.
    +zfs snapshot moose/foo@v2
    +
    +# List all snapshots.
    +zfs list -t snapshot
    +
    +# Make .zfs direcotry visible in the root of the dataset.
    +zfs set snapdir=visible moose/foo
    +
    +# Browse available snapshots in visible .zfs direcotry (readonly).
    +ls /moose/foo/.zfs/snapshot
    +v1/  v2/
    +
    +# Create a new dataset based on the V1 snapshot
    +zfs clone moose/foo@v1 moose/foov1
    +
    +# Destroy snapshot.
    +zfs destroy moose/foo@v1
    +
    +

    Access control list

    +

    Focus on posix acl.

    +
    # Set the ACL type for the FOO dataset to POSIXACL.
    +zfs set acltype=posixacl moose/foo
    +
    +# Get the ACL type of a given dataset.
    +zfs get acltype moose/foo
    +
    +
    +

    For performance reasons it is recommended to also set zfs set xattr=sa moose/foo [ref].

    +
    +

    Example: zfs pool import during startup (systemd)

    +

    The default zpool cache file is /etc/zfs/zpool.cache. When pools are imported +the cache is updated.

    +

    Enable the following targets / services to automatically import pools from the +cache.

    +
    systemctl list-dependencies
    +  ...
    +    └─zfs.target
    +      └─zfs-import.target
    +        └─zfs-import-cache.service
    +
    + +
    + + +
    +
    + + + +
    + + + + + + + + + + + + + + + + + + +
    + + -- cgit v1.2.3