I am reinstall ghost.
Now i am import successfully zroot pool
I want umount pool but cant.

root@devuan:/# zpool export zroot
cannot unmount '/var/log': pool or dataset is busy
root@devuan:/# zfs get mountpoint
NAME                                      PROPERTY    VALUE       SOURCE
zroot                                     mountpoint  legacy      local
zroot/ROOT                                mountpoint  none        local
zroot/ROOT/25.01-backup-2025-04-29-18-27  mountpoint  /           local
zroot/ROOT/default                        mountpoint  /           local
zroot/ROOT/default@2025-04-29-18:27:42-0  mountpoint  -           -
zroot/home                                mountpoint  /home       local
zroot/tmp                                 mountpoint  /tmp        local
zroot/usr                                 mountpoint  /usr        local
zroot/usr/ports                           mountpoint  /usr/ports  inherited from zroot/usr
zroot/var                                 mountpoint  /var        local
zroot/var/audit                           mountpoint  /var/audit  inherited from zroot/var
zroot/var/crash                           mountpoint  /var/crash  inherited from zroot/var
zroot/var/log                             mountpoint  /var/log    inherited from zroot/var
zroot/var/mail                            mountpoint  /var/mail   inherited from zroot/var
zroot/var/tmp                             mountpoint  /var/tmp    inherited from zroot/var
root@devuan:/# zfs holds
usage:
	holds [-rHp] <snapshot> ...

For the property list, run: zfs set|get

For the delegated permission list, run: zfs allow|unallow

For further help on a command or topic, run: zfs help [<topic>]
root@devuan:/# zpool list -v
NAME        SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
zroot       144G  7.42G   137G        -        4G     0%     5%  1.00x    ONLINE  -
  sdb       145G  7.42G   137G        -        4G     0%  5.15%      -    ONLINE
root@devuan:/# df -h
Filesystem       Size  Used Avail Use% Mounted on
udev             1.9G     0  1.9G   0% /dev
tmpfs            392M  1.1M  391M   1% /run
/dev/sde1         71G   28G   40G  42% /
tmpfs            5.0M  8.0K  5.0M   1% /run/lock
tmpfs            1.6G     0  1.6G   0% /dev/shm
/dev/sdc3        340G  262G   61G  82% /home
zroot/tmp        133G  256K  133G   1% /tmp
zroot/usr/ports  133G  128K  133G   1% /usr/ports
zroot/var/crash  133G  128K  133G   1% /var/crash
zroot/var/audit  133G  128K  133G   1% /var/audit
zroot/var/log    133G  512K  133G   1% /var/log
tmpfs            392M   16K  392M   1% /run/user/1000
root@devuan:/# zfs umount -f zroot/var/log
cannot unmount '/var/log': pool or dataset is busy
root@devuan:/# zpool status
  pool: zroot
 state: ONLINE
status: Some supported and requested features are not enabled on the pool.
	The pool can still be used, but some features are unavailable.
action: Enable all features using 'zpool upgrade'. Once this is done,
	the pool may no longer be accessible by software that does not support
	the features. See zpool-features(7) for details.
config:

	NAME        STATE     READ WRITE CKSUM
	zroot       ONLINE       0     0     0
	  sdb       ONLINE       0     0     0

errors: No known data errors
root@devuan:/# 
root@devuan:/# lsblk
NAME   MAJ:MIN RM   SIZE RO TYPE MOUNTPOINTS
sda      8:0    0 186.3G  0 disk 
├─sda1   8:1    0  10.5G  0 part 
├─sda2   8:2    0     1K  0 part 
├─sda5   8:5    0  29.3G  0 part 
├─sda6   8:6    0  48.8G  0 part 
├─sda7   8:7    0  48.8G  0 part 
└─sda8   8:8    0  48.8G  0 part 
sdb      8:16   0   149G  0 disk 
├─sdb1   8:17   0   256K  0 part 
├─sdb2   8:18   0 144.5G  0 part 
└─sdb3   8:19   0   4.5G  0 part 
sdc      8:32   0 931.5G  0 disk 
├─sdc1   8:33   0  75.7G  0 part 
├─sdc2   8:34   0     4G  0 part 
├─sdc3   8:35   0 345.7G  0 part /home
├─sdc4   8:36   0     1K  0 part 
└─sdc5   8:37   0 506.1G  0 part 
sdd      8:48   0   149G  0 disk 
├─sdd1   8:49   0  70.3G  0 part 
├─sdd2   8:50   0   4.5G  0 part 
├─sdd3   8:51   0    50M  0 part 
└─sdd4   8:52   0  74.2G  0 part 
sde      8:64   0 223.6G  0 disk 
├─sde1   8:65   0  72.3G  0 part /
├─sde2   8:66   0  72.3G  0 part 
├─sde3   8:67   0    75G  0 part 
├─sde4   8:68   0     1K  0 part 
└─sde5   8:69   0     4G  0 part [SWAP]
sdf      8:80   1    30G  0 disk 
├─sdf1   8:81   1   800K  0 part 
└─sdf3   8:83   1   2.8G  0 part 
sr0     11:0    1  1024M  0 rom  
root@devuan:/# 

zfs set canmount=noauto zroot
not help, after reboot, same - busy

SOLVED
remove
/etc/zfs/zpool.cache
and set

# Run `zfs mount -a` during system start?
ZFS_MOUNT='no'

at /etc/default/zfs

why are you attempting to unmount /var/log ?

Why unmount /var/log?
I surmise the user was preparing to export (zpool export) the ZFS pool after importing it in a rescue environment. The export fails unless all mounted datasets, like /var/log, are unmounted first.

The error cannot unmount '/var/log': pool or dataset is busy occurs because a process on the system is actively using /var/log, preventing it from being unmounted. This is common on live systems where logs are constantly being written.

The solution was correct:

  • Removing /etc/zfs/zpool.cache prevents automatic imports with pre-set mountpoints.

  • Setting ZFS_MOUNT='no' in /etc/default/zfs disables automatic mounting of datasets at boot.


This allows one to manually control import and mount behavior, useful for recovery or manual intervention.

Here is a script to import a ZFS pool in a Devuan (or Debian-based) rescue environment, prevent automatic mounting, and allow safe export:

#!/bin/bash
# zfs-rescue-import.sh

set -euo pipefail

POOL="zroot"
MOUNTFILE="/etc/default/zfs"
ZPOOL_CACHE="/etc/zfs/zpool.cache"

check_zfs_version() {
  echo "[*] Checking ZFS versions..."

  USERLAND_VERSION=$(zfs version | awk '/zfs-/{print $2}' | head -n1)
  KERNEL_VERSION=$(modinfo zfs | awk '/^version:/ {print $2}' | head -n1)

  if [ -z "$USERLAND_VERSION" ] || [ -z "$KERNEL_VERSION" ]; then
    echo "[!] Could not detect ZFS versions properly. Exiting."
    exit 1
  fi

  echo "[*] Userland ZFS version: $USERLAND_VERSION"
  echo "[*] Kernel ZFS module version: $KERNEL_VERSION"

  if [ "$USERLAND_VERSION" != "$KERNEL_VERSION" ]; then
    echo "[!] Mismatch detected between userland and kernel ZFS versions."
    echo "[!] Userland: $USERLAND_VERSION | Kernel: $KERNEL_VERSION"
    echo "[!] Aborting to avoid pool damage."
    exit 2
  fi

  echo "[+] ZFS versions match."
}

echo "=== ZFS Rescue Import Script ==="

check_zfs_version

echo "[*] Importing pool: $POOL"
zpool import -f "$POOL"

echo "[*] Disabling automatic mount on boot"
if [ -f "$MOUNTFILE" ]; then
  sed -i 's/^ZFS_MOUNT=.*/ZFS_MOUNT="no"/' "$MOUNTFILE"
else
  echo 'ZFS_MOUNT="no"' > "$MOUNTFILE"
fi

echo "[*] Removing cached pool import settings"
rm -f "$ZPOOL_CACHE"

echo "[*] Unmounting all mounted datasets under $POOL"
zfs unmount -a || true

echo "[*] Forcibly unmounting known busy paths (if needed)"
umount -lf /var/log || true
umount -lf /var/audit || true
umount -lf /var/crash || true
umount -lf /home || true
umount -lf /tmp || true

echo "[*] Attempting pool export"
zpool export "$POOL" && echo "[+] Pool exported successfully" || echo "[!] Export failed. Pool may still be busy."

echo "[*] Done."

Added ZFS consistency check to avoid pool damage as well.

    vimanuelt
    Thanks Man again!)
    And this, i did the first do not harm nothing?
    zfs set canmount=noauto zroot
    or better set it to this
    zfs set canmount=auto zroot

    vimanuelt Here is a script to import a ZFS pool in a Devuan (or Debian-based) rescue environment, prevent automatic mounting, and allow safe export:

    root@devuan:/home/freeartist-devuan/Downloads/zfs# ./zfs-rescue-import.sh
    === ZFS Rescue Import Script ===
    [*] Checking ZFS versions...
    [!] Could not detect ZFS versions properly. Exiting.
    root@devuan:/home/freeartist-devuan/Downloads/zfs# 

    Here is an updated script to handle edge cases.

    #!/bin/bash
    # zfs-rescue-import.sh - ZFS rescue script with version check and safe export
    
    set -euo pipefail
    
    POOL="zroot"
    MOUNTFILE="/etc/default/zfs"
    ZPOOL_CACHE="/etc/zfs/zpool.cache"
    
    check_zfs_version() {
      echo "[*] Checking ZFS versions..."
    
      USERLAND_VERSION=$(zfs version 2>/dev/null | awk '/zfs-/{print $2}' | head -n1)
    
      if [ -z "$USERLAND_VERSION" ]; then
        # Try alternate command
        USERLAND_VERSION=$(zfs --version 2>/dev/null | awk '/zfs-/{print $1}' | sed 's/zfs-//' | head -n1)
      fi
    
      if modinfo zfs >/dev/null 2>&1; then
        KERNEL_VERSION=$(modinfo zfs | awk '/^version:/ {print $2}' | head -n1)
      else
        KERNEL_VERSION=""
      fi
    
      if [ -z "$USERLAND_VERSION" ] || [ -z "$KERNEL_VERSION" ]; then
        echo "[!] Could not detect ZFS versions properly."
        echo "    USERLAND_VERSION='$USERLAND_VERSION'"
        echo "    KERNEL_VERSION='$KERNEL_VERSION'"
        echo "    Tip: Ensure ZFS tools and kernel modules are installed."
        exit 1
      fi
    
      echo "[*] Userland ZFS version: $USERLAND_VERSION"
      echo "[*] Kernel ZFS module version: $KERNEL_VERSION"
    
      if [ "$USERLAND_VERSION" != "$KERNEL_VERSION" ]; then
        echo "[!] Mismatch detected."
        echo "    Userland: $USERLAND_VERSION"
        echo "    Kernel:   $KERNEL_VERSION"
        echo "    Aborting to avoid possible pool damage."
        exit 2
      fi
    
      echo "[+] ZFS versions match."
    }
    
    echo "=== ZFS Rescue Import Script ==="
    
    check_zfs_version
    
    echo "[*] Importing pool: $POOL"
    zpool import -f "$POOL"
    
    echo "[*] Disabling automatic mount on boot"
    if [ -f "$MOUNTFILE" ]; then
      sed -i 's/^ZFS_MOUNT=.*/ZFS_MOUNT="no"/' "$MOUNTFILE"
    else
      echo 'ZFS_MOUNT="no"' > "$MOUNTFILE"
    fi
    
    echo "[*] Removing cached pool import settings"
    rm -f "$ZPOOL_CACHE"
    
    echo "[*] Unmounting all ZFS datasets"
    zfs unmount -a || true
    
    echo "[*] Forcibly unmounting common busy paths"
    umount -lf /var/log || true
    umount -lf /var/audit || true
    umount -lf /var/crash || true
    umount -lf /home || true
    umount -lf /tmp || true
    
    echo "[*] Exporting ZFS pool"
    zpool export "$POOL" && echo "[+] Pool exported successfully" || echo "[!] Export failed."
    
    echo "[*] Done."

    If this version still does not detect properly, then let me know the output of:

    zfs version 
    zfs --version 
    modinfo zfs

      vimanuelt
      Thanks, i am planning test it today.
      So this is all scripts for different objectives - import, export, diagnostic?
      You are true Guru of prorgamming that so quick write this scripts! 🙂

        12sunflowers
        The last script I wrote for you, see below, covers all objectives: diagnostics (via zdb and dry-run), safe read-only import, and optional export. All actions are logged, and a structured report is generated. You can toggle import/export behavior via AUTO_IMPORT and AUTO_EXPORT.

        #!/bin/bash
        # zfs-recovery-toolkit.sh – ZFS recovery script with detailed logs and structured report
        
        set -euo pipefail
        
        # === USER CONFIGURABLE ===
        POOL="zroot"
        DEVICE_HINT="/dev"
        AUTO_IMPORT="yes"
        AUTO_EXPORT="yes"
        # ==========================
        
        TIMESTAMP=$(date +%Y%m%d_%H%M%S)
        HOST=$(hostname)
        OUTDIR="./zfs_recovery_$TIMESTAMP"
        REPORT="$OUTDIR/recovery-report.txt"
        mkdir -p "$OUTDIR"
        
        log()     { echo "[*] $1"; echo "[*] $1" >> "$REPORT"; }
        warn()    { echo "[!] $1" >&2; echo "[!] $1" >> "$REPORT"; }
        error()   { echo "[ERROR] $1" >&2; echo "[ERROR] $1" >> "$REPORT"; }
        fail_exit() { error "$1"; echo "[ABORTED]" >> "$REPORT"; exit 1; }
        
        # === BEGIN REPORT ===
        echo "ZFS Recovery Report" > "$REPORT"
        echo "===================" >> "$REPORT"
        echo "Host: $HOST" >> "$REPORT"
        echo "Time: $(date)" >> "$REPORT"
        echo "Pool: $POOL" >> "$REPORT"
        echo "Device Hint: $DEVICE_HINT" >> "$REPORT"
        echo "Output Dir: $OUTDIR" >> "$REPORT"
        echo >> "$REPORT"
        
        for cmd in zdb zpool zfs; do
          if ! command -v "$cmd" >/dev/null 2>&1; then
            fail_exit "Required command '$cmd' not found."
          fi
        done
        
        log "Step 1: Checking pool visibility..."
        if ! zpool import 2> "$OUTDIR/import-visible-error.txt" | grep -q "$POOL"; then
          warn "Pool not found in import list. Attempting device scan..."
          if ! zpool import -d "$DEVICE_HINT" > "$OUTDIR/import-scan.txt" 2> "$OUTDIR/import-scan-error.txt"; then
            warn "zpool import scan failed. See $OUTDIR/import-scan-error.txt"
          fi
          if grep -q "$POOL" "$OUTDIR/import-scan.txt"; then
            log "Pool found via device hint."
          else
            warn "Pool not found even with device scan. Proceeding cautiously."
          fi
        else
          log "Pool is visible via normal import."
        fi
        
        log "Step 2: zdb metadata analysis..."
        if ! zdb -e -bcsvL "$POOL" > "$OUTDIR/zdb-output.txt" 2> "$OUTDIR/zdb-error.txt"; then
          warn "zdb failed. Output: $OUTDIR/zdb-error.txt"
        else
          log "zdb completed. Output: $OUTDIR/zdb-output.txt"
        fi
        
        log "Step 3: Dry-run import with rollback..."
        if ! zpool import -F -n -o readonly=on -d "$DEVICE_HINT" "$POOL" > "$OUTDIR/zpool-dryrun.txt" 2> "$OUTDIR/zpool-dryrun-error.txt"; then
          warn "Dry-run import failed. See $OUTDIR/zpool-dryrun-error.txt"
        else
          log "Dry-run succeeded. Output: $OUTDIR/zpool-dryrun.txt"
        fi
        
        if [[ "$AUTO_IMPORT" == "yes" ]]; then
          if zpool list | grep -q "^$POOL"; then
            log "Pool already imported. Skipping real import."
          else
            log "Step 4: Performing read-only import..."
            if ! zpool import -F -o readonly=on -d "$DEVICE_HINT" "$POOL" > "$OUTDIR/zpool-import.txt" 2> "$OUTDIR/zpool-import-error.txt"; then
              warn "Import failed. Output: $OUTDIR/zpool-import-error.txt"
            else
              log "Import completed. Output: $OUTDIR/zpool-import.txt"
            fi
          fi
        
          log "Step 5: Listing datasets..."
          if ! zfs list -r "$POOL" > "$OUTDIR/zfs-list.txt" 2> "$OUTDIR/zfs-list-error.txt"; then
            warn "Dataset listing failed. Output: $OUTDIR/zfs-list-error.txt"
          else
            log "Datasets listed. Output: $OUTDIR/zfs-list.txt"
          fi
        
          if [[ "$AUTO_EXPORT" == "yes" ]]; then
            log "Step 6: Exporting pool..."
            if ! zpool export "$POOL" > "$OUTDIR/zpool-export.txt" 2> "$OUTDIR/zpool-export-error.txt"; then
              warn "Export failed. Output: $OUTDIR/zpool-export-error.txt"
            else
              log "Export successful. Output: $OUTDIR/zpool-export.txt"
            fi
          else
            log "AUTO_EXPORT disabled. Pool remains imported (read-only)."
          fi
        else
          log "AUTO_IMPORT disabled. No changes made to system."
        fi
        
        # === FINAL SUMMARY ===
        echo >> "$REPORT"
        echo "Final Notes:" >> "$REPORT"
        echo "  - All operations performed in non-destructive or read-only mode." >> "$REPORT"
        echo "  - No datasets were mounted." >> "$REPORT"
        echo "  - All logs saved under: $OUTDIR" >> "$REPORT"
        echo >> "$REPORT"
        echo "[COMPLETED]" >> "$REPORT"
        
        log "Recovery completed. Review $REPORT for summary."

        12sunflowers
        Writing a shell or python script is convenient and lazy, it is not really the correct approach as a programmer. I would not say that I am doing anything great, besides being a bit lazy. ;-)

          vimanuelt
          No🙂 My option write such scripts its very cool skill🙂 I am newbie here but whant to grow up. Thanks for support and useful help🙂
          Also.
          Laziness is the engine of progress©🙂