Hacked By AnonymousFox
#
# tuned configuration
#
[main]
summary=Optimized Cloudlinux hosting Servers
include=throughput-performance
[bootloader]
cmdline = systemd.unified_cgroup_hierarchy=0 systemd.legacy_systemd_cgroup_controller cgroup.memory=nokmem
[cpu]
governor=performance
energy_perf_bias=performance
min_perf_pct=100
# The alternation of CPU bound load and disk IO operations of postgresql
# db server suggest CPU to go into powersave mode.
#
# Explicitly disable deep c-states to reduce latency on OLTP workloads.
force_latency=1
[vm]
transparent_hugepages=never
[sysctl]
kernel.numa_balancing = 1
vm.dirty_ratio = 40
vm.dirty_background_ratio = 10
vm.swappiness=10
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_timestamps = 1
# Increase kernel buffer size maximums. Currently this seems only necessary at 40Gb speeds.
#
# The buffer tuning values below do not account for any potential hugepage allocation.
# Ensure that you do not oversubscribe system memory.
#net.ipv4.tcp_rmem="4096 87380 16777216"
#net.ipv4.tcp_wmem="4096 16384 16777216"
##
# Busy polling helps reduce latency in the network receive path
# by allowing socket layer code to poll the receive queue of a
# network device, and disabling network interrupts.
# busy_read value greater than 0 enables busy polling. Recommended
# net.core.busy_read value is 50.
# busy_poll value greater than 0 enables polling globally.
# Recommended net.core.busy_poll value is 50
net.core.busy_read=50
net.core.busy_poll=50
# TCP fast open reduces network latency by enabling data exchange
# during the sender's initial TCP SYN. The value 3 enables fast open
# on client and server connections.
net.ipv4.tcp_fastopen=3
####
vm.zone_reclaim_mode=0
[scheduler]
sched_min_granularity_ns = 10000000
sched_wakeup_granularity_ns = 15000000
[disk-vm]
type=disk
devices = vd*
elevator = mq-deadline
[disk-sas]
type=disk
devices = sd*
elevator = mq-deadline
[disk-nvme]
type=disk
devices = nvme*
elevator = none
readahead = 0
Hacked By AnonymousFox1.0, Coded By AnonymousFox