zfs

OpenZFS on Linux 0.8.2 - the official OpenZFS implementation for Linux.

For kernel 5.4+

The module OpenZFS v0.8.2 is integrated into the Dragon kernel.

Installation instructions for OpenZFS v0.8.2:

$ sudo add-apt-repository ppa:wip-kernel/zfs-linux
$ sudo apt-get update
$ sudo apt install libnvpair1linux libuutil1linux libzfs2linux libzpool2linux zfs-zed zfsutils-linux zfs-test
$ sudo apt purge zfs-dkms
$ sudo reboot
$ dmesg | egrep ZFS
[    9.051184] ZFS: Loaded module v0.8.2-1ubuntu2, ZFS pool version 5000, ZFS filesystem version 5
# zpool create zdata /dev/sdb1
# zpool status -v zdata
  pool: zdata
 state: ONLINE
  scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        zdata       ONLINE       0     0     0
          sdb1      ONLINE       0     0     0

errors: No known data errors

Read OpenZFS documentation

ZFS MODULE INFO

# modinfo zfs
filename:       /lib/modules/5.4.20-dragon-sandybridge/kernel/zfs/zfs.ko
version:        0.8.2-1ubuntu2
license:        CDDL
author:         OpenZFS on Linux
description:    ZFS
alias:          devname:zfs
alias:          char-major-10-249
srcversion:     29C160FF878154256C93164
depends:        spl,znvpair,icp,zlua,zunicode,zcommon,zavl
retpoline:      Y
name:           zfs
vermagic:       5.4.20-dragon-sandybridge SMP mod_unload modversions 
sig_id:         PKCS#7
signer:         Build time autogenerated kernel key
sig_key:        2A:B3:68:F6:BD:59:E0:EE:4A:2A:F2:6A:4C:FF:98:E0:3D:C4:5C:B0
sig_hashalgo:   sha512
signature:      2F:74:B4:D9:6C:1B:52:5C:66:6A:46:8F:DF:5C:EB:3C:79:41:7A:CA:
                2B:62:D8:66:CD:64:A0:76:E9:38:A8:66:68:E6:4E:9E:DF:68:2A:2B:
                8C:22:D8:F0:B5:42:82:5E:59:56:B8:B3:7E:6D:49:20:DF:6D:A6:61:
                70:56:E9:FB:2F:6E:F8:B5:DD:8E:D8:01:02:26:E1:51:F0:D5:41:F4:
                E1:19:E3:CF:C2:66:20:D9:40:FF:77:3E:8A:2B:D3:0F:5B:BD:36:7C:
                73:2B:E9:67:60:C9:10:04:92:08:4E:A6:04:70:2E:65:CA:B9:94:56:
                A0:BD:2F:E4:B1:AD:CA:0B:E1:D7:26:63:C5:DD:9F:13:FC:90:E3:8A:
                32:3E:0C:64:FA:4E:58:2D:9C:5A:A7:45:F8:D7:DF:08:89:3C:27:8D:
                01:B4:85:CF:E0:05:68:F9:B1:8F:83:88:3A:E1:40:05:9F:EA:D9:36:
                27:73:83:B9:AE:D9:3B:7C:C1:32:B7:4D:89:96:42:8B:8C:F3:5C:9E:
                B7:95:3B:CC:6A:5A:30:88:6A:CE:D0:B7:47:29:5C:99:BD:51:6B:C0:
                7A:C6:9E:6A:7B:BF:06:D2:17:27:E0:C4:C9:09:54:1C:6F:F5:21:EA:
                E9:E5:2E:A0:A3:43:3D:F0:5B:6E:F7:79:A1:ED:61:A3:BE:0A:29:F9:
                6A:57:3A:2F:B4:E0:09:38:8C:D9:E7:8C:D8:82:9D:B9:56:9C:0C:1B:
                AE:5E:70:6F:0B:24:69:57:0E:52:4E:C9:78:89:20:1C:A8:DA:DC:B0:
                13:52:99:84:BA:E0:0F:57:F0:53:77:A5:CC:DA:61:F0:70:72:8A:AF:
                A9:EB:C5:A1:EF:F9:B4:4A:85:CB:FF:E7:16:4A:8C:59:F6:1B:82:2F:
                1B:39:A4:A3:29:E7:3C:AE:D6:02:2B:D3:19:A3:8A:7F:95:50:BD:69:
                DA:E8:AD:59:24:FE:A5:D4:C5:30:3F:33:E0:43:63:8C:CB:52:07:C0:
                09:1C:A3:A3:58:79:13:63:96:CC:76:5F:A3:98:4A:EC:8C:1A:B2:DD:
                A3:D9:16:82:66:D7:54:55:FA:FE:FC:18:FA:0D:7B:19:08:56:29:5D:
                53:F3:F7:E9:37:79:A1:C2:77:FF:71:E9:38:C2:50:43:0B:B3:24:F8:
                E9:D5:6A:A2:3A:C7:2C:68:3C:00:00:A5:82:6D:74:9B:CE:20:49:B9:
                12:C3:37:73:78:F5:EA:9E:E3:02:1C:FA:BD:2E:21:39:E1:50:40:CD:
                4D:CB:28:4B:63:8F:37:97:25:93:B8:BA:96:42:0D:72:B3:57:0F:C2:
                13:D1:81:AF:6E:E3:3A:D8:0B:9B:BB:91
parm:           zvol_inhibit_dev:Do not create zvol device nodes (uint)
parm:           zvol_major:Major number for zvol device (uint)
parm:           zvol_threads:Max number of threads to handle I/O requests (uint)
parm:           zvol_request_sync:Synchronously handle bio requests (uint)
parm:           zvol_max_discard_blocks:Max number of blocks to discard (ulong)
parm:           zvol_prefetch_bytes:Prefetch N bytes at zvol start+end (uint)
parm:           zvol_volmode:Default volmode property value (uint)
parm:           zfs_key_max_salt_uses:Max number of times a salt value can be used for generating encryption keys before it is rotated (ulong)
parm:           zio_slow_io_ms:Max I/O completion time (milliseconds) before marking it as slow (int)
parm:           zio_requeue_io_start_cut_in_line:Prioritize requeued I/O (int)
parm:           zfs_sync_pass_deferred_free:Defer frees starting in this pass (int)
parm:           zfs_sync_pass_dont_compress:Don't compress starting in this pass (int)
parm:           zfs_sync_pass_rewrite:Rewrite new bps starting in this pass (int)
parm:           zio_dva_throttle_enabled:Throttle block allocations in the ZIO pipeline (int)
parm:           zio_deadman_log_all:Log all slow ZIOs, not just those with vdevs (int)
parm:           zfs_commit_timeout_pct:ZIL block open timeout percentage (int)
parm:           zil_replay_disable:Disable intent logging replay (int)
parm:           zil_nocacheflush:Disable ZIL cache flushes (int)
parm:           zil_slog_bulk:Limit in bytes slog sync writes per commit (ulong)
parm:           zfs_object_mutex_size:Size of znode hold array (uint)
parm:           zfs_unlink_suspend_progress:Set to prevent async unlinks (debug - leaks space into the unlinked set) (int)
parm:           zfs_delete_blocks:Delete files larger than N blocks async (ulong)
parm:           zfs_read_chunk_size:Bytes to read per chunk (ulong)
parm:           zfs_immediate_write_sz:Largest data block to write to zil (long)
parm:           zfs_dbgmsg_enable:Enable ZFS debug message log (int)
parm:           zfs_dbgmsg_maxsize:Maximum ZFS debug log size (int)
parm:           zfs_admin_snapshot:Enable mkdir/rmdir/mv in .zfs/snapshot (int)
parm:           zfs_expire_snapshot:Seconds to expire .zfs/snapshot (int)
parm:           zfs_lua_max_instrlimit:Max instruction limit that can be specified for a channel program (ulong)
parm:           zfs_lua_max_memlimit:Max memory limit that can be specified for a channel program (ulong)
parm:           zap_iterate_prefetch:When iterating ZAP object, prefetch it (int)
parm:           zfs_trim_extent_bytes_max:Max size of TRIM commands, larger will be split (uint)
parm:           zfs_trim_extent_bytes_min:Min size of TRIM commands, smaller will be skipped (uint)
parm:           zfs_trim_metaslab_skip:Skip metaslabs which have never been initialized (uint)
parm:           zfs_trim_txg_batch:Min number of txgs to aggregate frees before issuing TRIM (uint)
parm:           zfs_trim_queue_limit:Max queued TRIMs outstanding per leaf vdev (uint)
parm:           zfs_removal_ignore_errors:Ignore hard IO errors when removing device (int)
parm:           zfs_remove_max_segment:Largest contiguous segment to allocate when removing device (int)
parm:           vdev_removal_max_span:Largest span of free chunks a remap segment can span (int)
parm:           zfs_removal_suspend_progress:Pause device removal after this many bytes are copied (debug use only - causes removal to hang) (int)
parm:           zfs_vdev_raidz_impl:Select raidz implementation.
parm:           zfs_vdev_aggregation_limit:Max vdev I/O aggregation size (int)
parm:           zfs_vdev_aggregation_limit_non_rotating:Max vdev I/O aggregation size for non-rotating media (int)
parm:           zfs_vdev_aggregate_trim:Allow TRIM I/O to be aggregated (int)
parm:           zfs_vdev_read_gap_limit:Aggregate read I/O over gap (int)
parm:           zfs_vdev_write_gap_limit:Aggregate write I/O over gap (int)
parm:           zfs_vdev_max_active:Maximum number of active I/Os per vdev (int)
parm:           zfs_vdev_async_write_active_max_dirty_percent:Async write concurrency max threshold (int)
parm:           zfs_vdev_async_write_active_min_dirty_percent:Async write concurrency min threshold (int)
parm:           zfs_vdev_async_read_max_active:Max active async read I/Os per vdev (int)
parm:           zfs_vdev_async_read_min_active:Min active async read I/Os per vdev (int)
parm:           zfs_vdev_async_write_max_active:Max active async write I/Os per vdev (int)
parm:           zfs_vdev_async_write_min_active:Min active async write I/Os per vdev (int)
parm:           zfs_vdev_initializing_max_active:Max active initializing I/Os per vdev (int)
parm:           zfs_vdev_initializing_min_active:Min active initializing I/Os per vdev (int)
parm:           zfs_vdev_removal_max_active:Max active removal I/Os per vdev (int)
parm:           zfs_vdev_removal_min_active:Min active removal I/Os per vdev (int)
parm:           zfs_vdev_scrub_max_active:Max active scrub I/Os per vdev (int)
parm:           zfs_vdev_scrub_min_active:Min active scrub I/Os per vdev (int)
parm:           zfs_vdev_sync_read_max_active:Max active sync read I/Os per vdev (int)
parm:           zfs_vdev_sync_read_min_active:Min active sync read I/Os per vdev (int)
parm:           zfs_vdev_sync_write_max_active:Max active sync write I/Os per vdev (int)
parm:           zfs_vdev_sync_write_min_active:Min active sync write I/Os per vdev (int)
parm:           zfs_vdev_trim_max_active:Max active trim/discard I/Os per vdev (int)
parm:           zfs_vdev_trim_min_active:Min active trim/discard I/Os per vdev (int)
parm:           zfs_vdev_queue_depth_pct:Queue depth percentage for each top-level vdev (int)
parm:           zfs_vdev_mirror_rotating_inc:Rotating media load increment for non-seeking I/O's (int)
parm:           zfs_vdev_mirror_rotating_seek_inc:Rotating media load increment for seeking I/O's (int)
parm:           zfs_vdev_mirror_rotating_seek_offset:Offset in bytes from the last I/O which triggers a reduced rotating media seek increment (int)
parm:           zfs_vdev_mirror_non_rotating_inc:Non-rotating media load increment for non-seeking I/O's (int)
parm:           zfs_vdev_mirror_non_rotating_seek_inc:Non-rotating media load increment for seeking I/O's (int)
parm:           zfs_initialize_value:Value written during zpool initialize (ulong)
parm:           zfs_condense_indirect_vdevs_enable:Whether to attempt condensing indirect vdev mappings (int)
parm:           zfs_condense_min_mapping_bytes:Minimum size of vdev mapping to condense (ulong)
parm:           zfs_condense_max_obsolete_bytes:Minimum size obsolete spacemap to attempt condensing (ulong)
parm:           zfs_condense_indirect_commit_entry_delay_ms:Delay while condensing vdev mapping (int)
parm:           zfs_reconstruct_indirect_combinations_max:Maximum number of combinations when reconstructing split segments (int)
parm:           zfs_vdev_scheduler:I/O scheduler
parm:           zfs_vdev_cache_max:Inflate reads small than max (int)
parm:           zfs_vdev_cache_size:Total size of the per-disk cache (int)
parm:           zfs_vdev_cache_bshift:Shift size to inflate reads too (int)
parm:           zfs_vdev_default_ms_count:Target number of metaslabs per top-level vdev (int)
parm:           zfs_vdev_min_ms_count:Minimum number of metaslabs per top-level vdev (int)
parm:           zfs_vdev_ms_count_limit:Practical upper limit of total metaslabs per top-level vdev (int)
parm:           zfs_slow_io_events_per_second:Rate limit slow IO (delay) events to this many per second (uint)
parm:           zfs_checksum_events_per_second:Rate limit checksum events to this many checksum errors per second (do not set below zedthreshold). (uint)
parm:           zfs_scan_ignore_errors:Ignore errors during resilver/scrub (int)
parm:           vdev_validate_skip:Bypass vdev_validate() (int)
parm:           zfs_nocacheflush:Disable cache flushes (int)
parm:           zfs_txg_timeout:Max seconds worth of delta per txg (int)
parm:           zfs_read_history:Historical statistics for the last N reads (int)
parm:           zfs_read_history_hits:Include cache hits in read history (int)
parm:           zfs_txg_history:Historical statistics for the last N txgs (int)
parm:           zfs_multihost_history:Historical statistics for last N multihost writes (int)
parm:           zfs_flags:Set additional debugging flags (uint)
parm:           zfs_recover:Set to attempt to recover from fatal errors (int)
parm:           zfs_free_leak_on_eio:Set to ignore IO errors during free and permanently leak the space (int)
parm:           zfs_deadman_synctime_ms:Pool sync expiration time in milliseconds
parm:           zfs_deadman_ziotime_ms:IO expiration time in milliseconds
parm:           zfs_deadman_checktime_ms:Dead I/O check interval in milliseconds (ulong)
parm:           zfs_deadman_enabled:Enable deadman timer (int)
parm:           zfs_deadman_failmode:Failmode for deadman timer
parm:           spa_asize_inflation:SPA size estimate multiplication factor (int)
parm:           spa_slop_shift:Reserved free space in pool
parm:           zfs_ddt_data_is_special:Place DDT data into the special class (int)
parm:           zfs_user_indirect_is_special:Place user data indirect blocks into the special class (int)
parm:           zfs_special_class_metadata_reserve_pct:Small file blocks in special vdevs depends on this much free space available (int)
parm:           spa_config_path:SPA config file (/etc/zfs/zpool.cache) (charp)
parm:           zfs_autoimport_disable:Disable pool import at module load (int)
parm:           zfs_spa_discard_memory_limit:Maximum memory for prefetching checkpoint space map per top-level vdev while discarding checkpoint (ulong)
parm:           spa_load_verify_maxinflight:Max concurrent traversal I/Os while verifying pool during import -X (int)
parm:           spa_load_verify_metadata:Set to traverse metadata on pool import (int)
parm:           spa_load_verify_data:Set to traverse data on pool import (int)
parm:           spa_load_print_vdev_tree:Print vdev tree to zfs_dbgmsg during pool import (int)
parm:           zio_taskq_batch_pct:Percentage of CPUs to run an IO worker thread (uint)
parm:           zfs_max_missing_tvds:Allow importing pool with up to this number of missing top-level vdevs (in read-only mode) (ulong)
parm:           zfs_multilist_num_sublists:Number of sublists used in each multilist (int)
parm:           zfs_multihost_fail_intervals:Max allowed period without a successful mmp write (uint)
parm:           zfs_multihost_interval:Milliseconds between mmp writes to each leaf
parm:           zfs_multihost_import_intervals:Number of zfs_multihost_interval periods to wait for activity (uint)
parm:           metaslab_aliquot:allocation granularity (a.k.a. stripe size) (ulong)
parm:           metaslab_debug_load:load all metaslabs when pool is first opened (int)
parm:           metaslab_debug_unload:prevent metaslabs from being unloaded (int)
parm:           metaslab_preload_enabled:preload potential metaslabs during reassessment (int)
parm:           zfs_mg_noalloc_threshold:percentage of free space for metaslab group to allow allocation (int)
parm:           zfs_mg_fragmentation_threshold:fragmentation for metaslab group to allow allocation (int)
parm:           zfs_metaslab_fragmentation_threshold:fragmentation for metaslab to allow allocation (int)
parm:           metaslab_fragmentation_factor_enabled:use the fragmentation metric to prefer less fragmented metaslabs (int)
parm:           metaslab_lba_weighting_enabled:prefer metaslabs with lower LBAs (int)
parm:           metaslab_bias_enabled:enable metaslab group biasing (int)
parm:           zfs_metaslab_segment_weight_enabled:enable segment-based metaslab selection (int)
parm:           zfs_metaslab_switch_threshold:segment-based metaslab selection maximum buckets before switching (int)
parm:           metaslab_force_ganging:blocks larger than this size are forced to be gang blocks (ulong)
parm:           zfs_zevent_len_max:Max event queue length (int)
parm:           zfs_zevent_cols:Max event column width (int)
parm:           zfs_zevent_console:Log events to the console (int)
parm:           zfs_scan_vdev_limit:Max bytes in flight per leaf vdev for scrubs and resilvers (ulong)
parm:           zfs_scrub_min_time_ms:Min millisecs to scrub per txg (int)
parm:           zfs_obsolete_min_time_ms:Min millisecs to obsolete per txg (int)
parm:           zfs_free_min_time_ms:Min millisecs to free per txg (int)
parm:           zfs_resilver_min_time_ms:Min millisecs to resilver per txg (int)
parm:           zfs_scan_suspend_progress:Set to prevent scans from progressing (int)
parm:           zfs_no_scrub_io:Set to disable scrub I/O (int)
parm:           zfs_no_scrub_prefetch:Set to disable scrub prefetching (int)
parm:           zfs_async_block_max_blocks:Max number of blocks freed in one txg (ulong)
parm:           zfs_free_bpobj_enabled:Enable processing of the free_bpobj (int)
parm:           zfs_scan_mem_lim_fact:Fraction of RAM for scan hard limit (int)
parm:           zfs_scan_issue_strategy:IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size (int)
parm:           zfs_scan_legacy:Scrub using legacy non-sequential method (int)
parm:           zfs_scan_checkpoint_intval:Scan progress on-disk checkpointing interval (int)
parm:           zfs_scan_max_ext_gap:Max gap in bytes between sequential scrub / resilver I/Os (ulong)
parm:           zfs_scan_mem_lim_soft_fact:Fraction of hard limit used as soft limit (int)
parm:           zfs_scan_strict_mem_lim:Tunable to attempt to reduce lock contention (int)
parm:           zfs_scan_fill_weight:Tunable to adjust bias towards more filled segments during scans (int)
parm:           zfs_resilver_disable_defer:Process all resilvers immediately (int)
parm:           zfs_dirty_data_max_percent:percent of ram can be dirty (int)
parm:           zfs_dirty_data_max_max_percent:zfs_dirty_data_max upper bound as % of RAM (int)
parm:           zfs_delay_min_dirty_percent:transaction delay threshold (int)
parm:           zfs_dirty_data_max:determines the dirty space limit (ulong)
parm:           zfs_dirty_data_max_max:zfs_dirty_data_max upper bound in bytes (ulong)
parm:           zfs_dirty_data_sync_percent:dirty data txg sync threshold as a percentage of zfs_dirty_data_max (int)
parm:           zfs_delay_scale:how quickly delay approaches infinity (ulong)
parm:           zfs_sync_taskq_batch_pct:max percent of CPUs that are used to sync dirty data (int)
parm:           zfs_zil_clean_taskq_nthr_pct:max percent of CPUs that are used per dp_sync_taskq (int)
parm:           zfs_zil_clean_taskq_minalloc:number of taskq entries that are pre-populated (int)
parm:           zfs_zil_clean_taskq_maxalloc:max number of taskq entries that are cached (int)
parm:           zfs_disable_ivset_guid_check:Set to allow raw receives without IVset guids (int)
parm:           zfs_max_recordsize:Max allowed record size (int)
parm:           zfs_prefetch_disable:Disable all ZFS prefetching (int)
parm:           zfetch_max_streams:Max number of streams per zfetch (uint)
parm:           zfetch_min_sec_reap:Min time before stream reclaim (uint)
parm:           zfetch_max_distance:Max bytes to prefetch per stream (default 8MB) (uint)
parm:           zfetch_array_rd_sz:Number of bytes in a array_read (ulong)
parm:           zfs_pd_bytes_max:Max number of bytes to prefetch (int)
parm:           ignore_hole_birth:Alias for send_holes_without_birth_time (int)
parm:           send_holes_without_birth_time:Ignore hole_birth txg for zfs send (int)
parm:           zfs_override_estimate_recordsize:Record size calculation override for zfs send estimates (ulong)
parm:           zfs_send_corrupt_data:Allow sending corrupt data (int)
parm:           zfs_send_queue_length:Maximum send queue length (int)
parm:           zfs_send_unmodified_spill_blocks:Send unmodified spill blocks (int)
parm:           zfs_recv_queue_length:Maximum receive queue length (int)
parm:           dmu_object_alloc_chunk_shift:CPU-specific allocator grabs 2^N objects at once (int)
parm:           zfs_nopwrite_enabled:Enable NOP writes (int)
parm:           zfs_per_txg_dirty_frees_percent:percentage of dirtied blocks from frees in one TXG (ulong)
parm:           zfs_dmu_offset_next_sync:Enable forcing txg sync to find holes (int)
parm:           dmu_prefetch_max:Limit one prefetch call to this size (int)
parm:           zfs_dedup_prefetch:Enable prefetching dedup-ed blks (int)
parm:           zfs_dbuf_state_index:Calculate arc header index (int)
parm:           dbuf_cache_max_bytes:Maximum size in bytes of the dbuf cache. (ulong)
parm:           dbuf_cache_hiwater_pct:Percentage over dbuf_cache_max_bytes when dbufs must be evicted directly. (uint)
parm:           dbuf_cache_lowater_pct:Percentage below dbuf_cache_max_bytes when the evict thread stops evicting dbufs. (uint)
parm:           dbuf_metadata_cache_max_bytes:Maximum size in bytes of the dbuf metadata cache. (ulong)
parm:           dbuf_metadata_cache_shift:int
parm:           dbuf_cache_shift:Set the size of the dbuf cache to a log2 fraction of arc size. (int)
parm:           zfs_arc_min:Min arc size (ulong)
parm:           zfs_arc_max:Max arc size (ulong)
parm:           zfs_arc_meta_limit:Meta limit for arc size (ulong)
parm:           zfs_arc_meta_limit_percent:Percent of arc size for arc meta limit (ulong)
parm:           zfs_arc_meta_min:Min arc metadata (ulong)
parm:           zfs_arc_meta_prune:Meta objects to scan for prune (int)
parm:           zfs_arc_meta_adjust_restarts:Limit number of restarts in arc_adjust_meta (int)
parm:           zfs_arc_meta_strategy:Meta reclaim strategy (int)
parm:           zfs_arc_grow_retry:Seconds before growing arc size (int)
parm:           zfs_arc_p_dampener_disable:disable arc_p adapt dampener (int)
parm:           zfs_arc_shrink_shift:log2(fraction of arc to reclaim) (int)
parm:           zfs_arc_pc_percent:Percent of pagecache to reclaim arc to (uint)
parm:           zfs_arc_p_min_shift:arc_c shift to calc min/max arc_p (int)
parm:           zfs_arc_average_blocksize:Target average block size (int)
parm:           zfs_compressed_arc_enabled:Disable compressed arc buffers (int)
parm:           zfs_arc_min_prefetch_ms:Min life of prefetch block in ms (int)
parm:           zfs_arc_min_prescient_prefetch_ms:Min life of prescient prefetched block in ms (int)
parm:           l2arc_write_max:Max write bytes per interval (ulong)
parm:           l2arc_write_boost:Extra write bytes during device warmup (ulong)
parm:           l2arc_headroom:Number of max device writes to precache (ulong)
parm:           l2arc_headroom_boost:Compressed l2arc_headroom multiplier (ulong)
parm:           l2arc_feed_secs:Seconds between L2ARC writing (ulong)
parm:           l2arc_feed_min_ms:Min feed interval in milliseconds (ulong)
parm:           l2arc_noprefetch:Skip caching prefetched buffers (int)
parm:           l2arc_feed_again:Turbo L2ARC warmup (int)
parm:           l2arc_norw:No reads during writes (int)
parm:           zfs_arc_lotsfree_percent:System free memory I/O throttle in bytes (int)
parm:           zfs_arc_sys_free:System free memory target size in bytes (ulong)
parm:           zfs_arc_dnode_limit:Minimum bytes of dnodes in arc (ulong)
parm:           zfs_arc_dnode_limit_percent:Percent of ARC meta buffers for dnodes (ulong)
parm:           zfs_arc_dnode_reduce_percent:Percentage of excess dnodes to try to unpin (ulong)
parm:           zfs_abd_scatter_enabled:Toggle whether ABD allocations must be linear. (int)
parm:           zfs_abd_scatter_min_size:Minimum size of scatter allocations. (int)
parm:           zfs_abd_scatter_max_order:Maximum order allocation used for a scatter ABD. (uint)

Package version:


firepro

Enable AMDGPU driver for AMD Saturn XT FirePro M6100

AMD Radeon chipsets: BONAIRE

RESULT FROM DMESG

[    2.843621] [drm] amdgpu kernel modesetting enabled.
[    2.851441] fb0: switching to amdgpudrmfb from EFI VGA
[    2.852469] [drm] initializing kernel modesetting (BONAIRE 0x1002:0x6640 0x1028:0x04A4 0x00).
[    2.856182] [drm] register mmio base: 0xE4100000
[    2.856185] [drm] register mmio size: 262144
[    2.856188] [drm] PCIE atomic ops is not supported
[    2.856194] [drm] add ip block number 0 <cik_common>
[    2.856196] [drm] add ip block number 1 <gmc_v7_0>
[    2.856198] [drm] add ip block number 2 <cik_ih>
[    2.856200] [drm] add ip block number 3 <gfx_v7_0>
[    2.856201] [drm] add ip block number 4 <cik_sdma>
[    2.856203] [drm] add ip block number 5 <powerplay>
[    2.856205] [drm] add ip block number 6 <dce_v8_0>
[    2.856207] [drm] add ip block number 7 <uvd_v4_2>
[    2.856209] [drm] add ip block number 8 <vce_v2_0>
[    2.857054] [drm] vm size is 64 GB, 2 levels, block size is 10-bit, fragment size is 9-bit
[    2.857112] [drm] Detected VRAM RAM=2048M, BAR=256M
[    2.857114] [drm] RAM width 128bits GDDR5
[    2.857224] [drm] amdgpu: 2048M of VRAM memory ready
[    2.857230] [drm] amdgpu: 3072M of GTT memory ready.
[    2.857252] [drm] GART: num cpu pages 262144, num gpu pages 262144
[    2.857716] [drm] PCIE GART of 1024M enabled (table at 0x000000F4007E9000).
[    2.857789] [drm] Supports vblank timestamp caching Rev 2 (21.10.2013).
[    2.857791] [drm] Driver supports precise vblank timestamp query.
[    3.348657] [drm] amdgpu atom DIG backlight initialized
[    3.348666] [drm] AMDGPU Display Connectors
[    3.348668] [drm] Connector 0:
[    3.348670] [drm]   LVDS-1
[    3.348672] [drm]   DDC: 0x195c 0x195c 0x195d 0x195d 0x195e 0x195e 0x195f 0x195f
[    3.348674] [drm]   Encoders:
[    3.348675] [drm]     LCD1: INTERNAL_UNIPHY
[    3.348677] [drm] Connector 1:
[    3.348678] [drm]   DP-1
[    3.348679] [drm]   HPD1
[    3.348681] [drm]   DDC: 0x194c 0x194c 0x194d 0x194d 0x194e 0x194e 0x194f 0x194f
[    3.348683] [drm]   Encoders:
[    3.348684] [drm]     DFP1: INTERNAL_UNIPHY1
[    3.348685] [drm] Connector 2:
[    3.348686] [drm]   DP-2
[    3.348687] [drm]   HPD2
[    3.348689] [drm]   DDC: 0x1950 0x1950 0x1951 0x1951 0x1952 0x1952 0x1953 0x1953
[    3.348691] [drm]   Encoders:
[    3.348692] [drm]     DFP2: INTERNAL_UNIPHY1
[    3.348693] [drm] Connector 3:
[    3.348695] [drm]   DP-3
[    3.348696] [drm]   HPD3
[    3.348697] [drm]   DDC: 0x1954 0x1954 0x1955 0x1955 0x1956 0x1956 0x1957 0x1957
[    3.348699] [drm]   Encoders:
[    3.348700] [drm]     DFP3: INTERNAL_UNIPHY2
[    3.348702] [drm] Connector 4:
[    3.348703] [drm]   VGA-1
[    3.348705] [drm]   DDC: 0x1970 0x1970 0x1971 0x1971 0x1972 0x1972 0x1973 0x1973
[    3.348707] [drm]   Encoders:
[    3.348708] [drm]     CRT1: INTERNAL_KLDSCP_DAC1
[    3.348809] [drm] Found UVD firmware Version: 1.64 Family ID: 9
[    3.349209] [drm] Found VCE firmware Version: 50.10 Binary ID: 2
[    3.349542] [drm] PCIE gen 2 link speeds already enabled
[    3.394258] [drm] UVD initialized successfully.
[    3.514290] [drm] VCE initialized successfully.
[    4.915807] [drm] fb mappable at 0xD0BD4000
[    4.915820] [drm] vram apper at 0xD0000000
[    4.915822] [drm] size 8294400
[    4.915823] [drm] fb depth is 24
[    4.915825] [drm]    pitch is 7680
[    4.915963] fbcon: amdgpudrmfb (fb0) is primary device
[    5.514510] amdgpu 0000:01:00.0: fb0: amdgpudrmfb frame buffer device
[    5.522396] [drm] Initialized amdgpu 3.34.0 20150101 for 0000:01:00.0 on minor 0

# lspci | grep -i VGA
01:00.0 VGA compatible controller: Advanced Micro Devices, Inc. [AMD/ATI] Saturn XT [FirePro M6100]

# lspci -knn | grep -iA2 vga
01:00.0 VGA compatible controller [0300]: Advanced Micro Devices, Inc. [AMD/ATI] Saturn XT [FirePro M6100] [1002:6640]
        Subsystem: Dell Saturn XT [FirePro M6100] [1028:04a4]
        Kernel driver in use: amdgpu
GRUB_CMDLINE_LINUX="modprobe.blacklist=radeon amdgpu.si_support=1 amdgpu.cik_support=1 amdgpu.vm_fragment_size=9 amdgpu.ppfeaturemask=0"
chip "amdgpu-pci-*"
    ignore fan1

    # powerplay
    ignore in0
    ignore power1
INTERVAL=3
DEVPATH=hwmon3=devices/pci0000:00/0000:00:01.0/0000:01:00.0
DEVNAME=hwmon3=amdgpu
FCTEMPS=hwmon3/pwm1=hwmon3/temp1_input
FCFANS=hwmon3/pwm1=hwmon3/pwm1
MINTEMP=hwmon3/pwm1_min=0
MAXTEMP=hwmon3/pwm1_max=255
MINSTART=hwmon3/pwm1_min=30
MINSTOP=hwmon3/pwm1_min=0
MAXPWM=hwmon3/pwm1=255

I recommend using the following utilities to monitor the device:

Daemon for radeon-profile GUI

Application to read current clocks of ATi Radeon cards


btrfs

BTRFS Tools for kernel v5

Package version (download from Dragon support site):


mok

Kernel verification can be done without IMA security subsystem enabled.

On x86, a signature is embedded into a PE file (MS format) header of binary. Since arm64’s “Image” can also be seen as a PE file as far as CONFIG_EFI is enabled, we adopt this format for kernel signing.

You can create a signed kernel image with:

$ sbsign --key ${KEY} --cert ${CERT} Image

For signing Dragon Kernel for EFI Boot with your MOK

$ openssl x509 -in /var/lib/shim-signed/mok/MOK.der -inform DER -outform PEM -out ~/MOK.pem
$ sudo add-apt-repository ppa:wip-kernel/shv5
$ sudo apt-get update
$ sudo apt install linux-headers-5.4.20-dragon linux-headers-5.4.20-dragon-generic linux-image-unsigned-5.4.20-dragon-generic linux-modules-5.4.20-dragon-generic linux-modules-extra-5.4.20-dragon-generic
$ sudo sbsign --key /var/lib/shim-signed/mok/MOK.priv --cert ~/MOK.pem /boot/vmlinuz-5.4.20-dragon-generic --output /boot/vmlinuz-5.4.20-dragon-generic.signed
$ sudo cp /boot/initrd.img-5.4.20-dragon-generic{,.signed}
$ sudo update-grub
$ reboot
$ sudo rm /boot/vmlinuz-5.4.20-dragon-generic
$ sudo rm /boot/initrd.img-5.4.20-dragon-generic
$ sudo update-grub
$ reboot

Thanks Edgard Pineda “epineda”


NVIDIA

Fixes NVIDIA binary driver - version 390.129/430.50/435.21

For kernel 5.4+

Installation instructions for NVIDIA 390.129 (430.50/435.21 are installed the same way):

#!/bin/bash
#
sudo apt purge libnvidia-cfg1-390 libnvidia-common-390 libnvidia-compute-390 \
    libnvidia-decode-390 libnvidia-encode-390 libnvidia-fbc1-390 \
    libnvidia-gl-390 libnvidia-ifr1-390 libxnvctrl0 nvidia-compute-utils-390 \
    nvidia-dkms-390 nvidia-driver-390 nvidia-kernel-common-390 \
    nvidia-kernel-source-390 nvidia-prime nvidia-settings nvidia-utils-390 \
    screen-resolution-extra xserver-xorg-video-nvidia-390
#
exit 0

Driver version - nvidia-kernel-source-390 (390.129-0ubuntu1)

# sudo apt install nvidia-kernel-source-390

Download patch for 390.129:

Download

Download patch for 430.50:

Download

Download patch for 435.21:

Download

# cd /usr/src

put a patch here

(there should already be a source folder - nvidia-390.129)

# patch -p1 < nvidia-390.129_kernel-5.4+.patch
# rm nvidia-390.129_kernel-5.4+.patch
#!/bin/bash
#
sudo apt install libnvidia-cfg1-390 libnvidia-common-390 libnvidia-compute-390 \
    libnvidia-decode-390 libnvidia-encode-390 libnvidia-fbc1-390 \
    libnvidia-gl-390 libnvidia-ifr1-390 libxnvctrl0 nvidia-compute-utils-390 \
    nvidia-dkms-390 nvidia-driver-390 nvidia-kernel-common-390 nvidia-prime \
    nvidia-settings nvidia-utils-390 screen-resolution-extra \
    xserver-xorg-video-nvidia-390
#
exit 0

If you long time to login in system then:

Solution by Lauge Jensen

If use default ubuntu login then install lightdm and use it.

$ sudo apt install lightdm
$ sudo nano /etc/modprobe.d/nvidia-drm.conf
options nvidia-drm modeset=1
options nvidia NVreg_UsePageAttributeTable=1

iptables

Nftables is a framework by the Netfilter Project that provides packet filtering, network address translation (NAT) and other packet mangling. Two of the most common uses of nftables is to provide firewall support and NAT, nftables replaces the iptables framework.

WiKi nftables

$ sudo systemctl stop nftables.service && systemctl disable nftables.service
$ sudo nft flush ruleset
$ sudo apt purge nftables
$ sudo update-alternatives --set iptables /usr/sbin/iptables-legacy
$ sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
$ sudo update-alternatives --set arptables /usr/sbin/arptables-legacy
$ sudo update-alternatives --set ebtables /usr/sbin/ebtables-legacy
$ sudo apt purge netfilter-persistent iptables-persistent
$ sudo apt install ufw gufw
$ sudo ufw disable
$ sudo touch /etc/rc.local
$ sudo chmod 755 /etc/rc.local
$ sudo chown root:root /etc/rc.local

Add to file /etc/rc.local:

#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
#
ufw enable
#
exit 0

Change string in file /etc/default/ufw IPT_MODULES="nf_conntrack_ftp nf_nat_ftp nf_conntrack_netbios_ns" to IPT_MODULES="".

$ sudo systemctl stop ufw.service && systemctl disable ufw.service
$ sudo systemctl enable rc.local.service && systemctl start rc.local.service
$ sudo ufw status verbose

Use GUFW from your desktop to add and edit firewall rules


VIRTUALBOX

Fixed VirtualBox packages 6.0.12

For kernel 5.4+

Download

Download packages or install:

Download

$ sudo dpkg -i virtualbox-source_6.0.12-dfsg-1_all.deb
$ sudo dpkg -i virtualbox-dkms_6.0.12-dfsg-1_all.deb

Ignore compile and install dkms module...

$ cp ./virtualbox-6.0.12.patch /usr/src
$ cd /usr/src
$ patch -p0 < virtualbox-6.0.12.patch
$ sudo /usr/lib/dkms/dkms_autoinstaller start
$ apt install -f

$ modprobe -v vboxdrv

$ lsmod | egrep vbox
vboxpci                <size>  0
vboxnetflt             <size>  0
vboxnetadp             <size>  0
vboxdrv                <size>  3 vboxpci,vboxnetadp,vboxnetflt

$ sudo dpkg -i virtualbox_6.0.12-dfsg-1_amd64.deb virtualbox-qt_6.0.12-dfsg-1_amd64.deb
$ reboot
BUILD AND INSTALL LOG

$ sudo /usr/lib/dkms/dkms_autoinstaller start
 * dkms: running auto installation service for kernel 5.4.20-dragon-sandybridge
Kernel preparation unnecessary for this kernel.  Skipping...

Building module:
cleaning build area...
make -j8 KERNELRELEASE=5.4.20-dragon-sandybridge \
-C /lib/modules/5.4.20-dragon-sandybridge/build M=/var/lib/dkms/virtualbox/6.0.12/build.......
Signing module:
 - /var/lib/dkms/virtualbox/6.0.12/5.4.20-dragon-sandybridge/x86_64/module/vboxdrv.ko
 - /var/lib/dkms/virtualbox/6.0.12/5.4.20-dragon-sandybridge/x86_64/module/vboxnetadp.ko
 - /var/lib/dkms/virtualbox/6.0.12/5.4.20-dragon-sandybridge/x86_64/module/vboxnetflt.ko
 - /var/lib/dkms/virtualbox/6.0.12/5.4.20-dragon-sandybridge/x86_64/module/vboxpci.ko
This system doesn't support Secure Boot
Secure Boot not enabled on this system.
cleaning build area...

DKMS: build completed.

vboxdrv.ko:
Running module version sanity check.
modinfo: ERROR: missing module or filename.
 - Original module
   - No original module exists within this kernel
 - Installation
   - Installing to /lib/modules/5.4.20-dragon-sandybridge/updates/dkms/

vboxnetadp.ko:
Running module version sanity check.
modinfo: ERROR: missing module or filename.
 - Original module
   - No original module exists within this kernel
 - Installation
   - Installing to /lib/modules/5.4.20-dragon-sandybridge/updates/dkms/

vboxnetflt.ko:
Running module version sanity check.
modinfo: ERROR: missing module or filename.
 - Original module
   - No original module exists within this kernel
 - Installation
   - Installing to /lib/modules/5.4.20-dragon-sandybridge/updates/dkms/

vboxpci.ko:
Running module version sanity check.
modinfo: ERROR: missing module or filename.
 - Original module
   - No original module exists within this kernel
 - Installation
   - Installing to /lib/modules/5.4.20-dragon-sandybridge/updates/dkms/

depmod...

DKMS: install completed.

[47.231565] vboxdrv: Found 8 processor cores
[47.250048] vboxdrv: TSC mode is Invariant, tentative frequency 3591661955 Hz
[47.250050] vboxdrv: Successfully loaded version 6.0.12_Ubuntu (interface 0x********)
[47.335503] VBoxNetFlt: Successfully started.
[47.348157] VBoxNetAdp: Successfully started.
[47.392283] VBoxPciLinuxInit

Package version:


i915

Fixes booting kernel for i915 video chipset

If kernel starting and boot screen black or boot process stop then:

From boot grub menu enter “e” key and edit kernel boot line and press F10 for boot.


Example config files for Intel i5 Westmere, 4Gb RAM

GRUB_CMDLINE_LINUX_DEFAULT="noresume elevator=bfq mds=full psi=1 acpi_serialize acpi_osi=Linux acpi_backlight=vendor intel_iommu=on swiotlb=32768 apparmor=0 net.ifnames=0 biosdevname=0"
GRUB_CMDLINE_LINUX="systemd.gpt_auto=0 zswap.enabled=1 zswap.compressor=lz4 zswap.max_pool_percent=15"

Enable ZSWAP

Zswap is a kernel feature that provides a compressed RAM cache for swap pages

  • Add to grub.cfg
GRUB_CMDLINE_LINUX="zswap.compressor=lz4 zswap.max_pool_percent=15"
  • Add to /etc/initramfs-tools/modules
lz4
lz4_compress
  • Run command
$ sudo update-grub && update-initramfs -u

Tune IO scheduler

For now, add file /etc/udev/rules.d/60-ssd-scheduler.rules.

You can also add this to file 60-ssd-scheduler.rules:

# Set bfq scheduler for non-rotating disks
ACTION=="add|change", KERNEL=="sd[a-z]", TEST!="queue/rotational", ATTR{queue/scheduler}="bfq"
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="bfq"
# Set bfq scheduler for rotating disks
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="1", ATTR{queue/scheduler}="bfq"

and run a command:

# sudo udevadm control --reload && sudo udevadm trigger

Add to .bashrc file:

# Tune glibc memory allocation, optimize for low fragmentation
# limit the number of arenas
#
export MALLOC_ARENA_MAX=4

# Disable dynamic mmap threshold, see M_MMAP_THRESHOLD in "man mallopt"
#
export MALLOC_MMAP_THRESHOLD_=131072
export MALLOC_TRIM_THRESHOLD_=131072
export MALLOC_TOP_PAD_=131072
export MALLOC_MMAP_MAX_=65536

  • /etc/sysctl.conf
vm.laptop_mode = 0

vm.overcommit_ratio = 200 
vm.overcommit_memory = 2

# Core dump suidsafe
kernel.core_uses_pid = 1
kernel.core_pattern = /tmp/core-%e-%s-%u-%g-%p-%t
fs.suid_dumpable = 2

kernel.printk = 4 4 1 7
kernel.sysrq = 0

# Network
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.tcp_max_orphans = 65536
net.ipv4.tcp_fin_timeout = 10
net.ipv4.tcp_keepalive_time = 1800
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_keepalive_probes = 5
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_mem = 50576   64768   98152
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_orphan_retries = 0
net.ipv4.tcp_syncookies = 1
net.netfilter.nf_conntrack_max = 16777216
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_sack = 1
net.ipv4.tcp_congestion_control = yeah
net.ipv4.tcp_no_metrics_save = 1
net.ipv4.route.flush = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.lo.rp_filter = 1
net.ipv4.conf.eth0.rp_filter = 1
net.ipv4.conf.wlan0.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.lo.accept_source_route = 0
net.ipv4.conf.eth0.accept_source_route = 0
net.ipv4.conf.wlan0.accept_source_route = 0
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rfc1337 = 1
net.ipv4.ip_forward = 0
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.icmp_echo_ignore_all = 1
net.ipv4.icmp_ignore_bogus_error_responses = 1
net.core.somaxconn = 65535
net.core.netdev_max_backlog = 4096
net.core.rmem_default = 65536
net.core.wmem_default = 65536
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
fs.inotify.max_user_watches = 16777216
#
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.ip_default_ttl = 63
#
net.ipv4.tcp_ecn = 1
net.core.default_qdisc = cake
#
kernel.perf_cpu_time_max_percent = 100
#
# WriteBack cache (For SSD disk)
vm.dirty_background_bytes=67108864
vm.dirty_bytes=201326592
vm.dirty_expire_centisecs=1500
vm.dirty_writeback_centisecs=500
vm.dirtytime_expire_seconds=3000
#
# Huge Page
vm.nr_hugepages = 16
vm.nr_overcommit_hugepages = 16
vm.hugetlb_shm_group = 1001
#
kernel.yama.ptrace_scope = 2
#
# For Chromium sandbox use!
kernel.unprivileged_userns_clone = 0

  • /etc/network/interfaces
wireless-power off

  • /etc/NetworkManager/conf.d/default-wifi-powersave-on.conf
[connection]
wifi.powersave = 2

  • /etc/NetworkManager/NetworkManager.conf
[connection]
wifi.powersave = 2

[device]
wifi.scan-rand-mac-address=no

Copyright © 2019 Dragon Team All rights reserved.