@ユメイ2 月前
12/17
00:54
cat > optimize.sh << 'EOF'
#!/bin/bash
# ==================================================
# Linux Network & System Performance Optimization
# Copyright © sola.moe (Optimized Format v3 - RPS/XPS Enhanced)
# ==================================================
# --- 颜色配置 ---
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
BLUE='\033[0;36m'
PLAIN='\033[0m'
# --- 辅助函数:日志输出 ---
log_info() { echo -e "${GREEN}[Info]${PLAIN} $1"; }
log_warn() { echo -e "${YELLOW}[Warn]${PLAIN} $1"; }
log_err() { echo -e "${RED}[Error]${PLAIN} $1"; }
log_task() { echo -e "${BLUE}[Task]${PLAIN} $1"; }
# 0. 检查 Root 权限
if [ "$EUID" -ne 0 ]; then
log_err "请使用 root 权限运行此脚本 (sudo su -)"
exit 1
fi
echo "================================================"
echo ">> 正在启动系统全栈优化 (Kernel + Network)..."
echo "================================================"
# --- 1. 环境检查与 BBR 配置 ---
log_task ">> [1/6] 正在配置 BBR 模块..."
# 1.1 检查内核版本
KERNEL_VER=$(uname -r | awk -F . '{print $1$2}')
if [ "$KERNEL_VER" -lt "49" ]; then
log_warn "当前内核版本 < 4.9,原生 BBR 可能不支持。建议升级内核。"
fi
# 1.2 尝试加载模块
if ! grep -q "tcp_bbr" /etc/modules-load.d/modules.conf 2>/dev/null; then
echo "tcp_bbr" | tee -a /etc/modules-load.d/modules.conf > /dev/null
fi
if ! modprobe tcp_bbr 2>/dev/null; then
log_warn "modprobe tcp_bbr 失败。如果是 OpenVZ/LXC 容器,这很正常(由宿主机控制)。"
else
log_info "BBR 模块加载指令执行成功。"
fi
# --- 2. 动态调整内存缓冲区 (防止小内存机器 OOM) ---
log_task ">> [2/6] 检测内存并调整缓冲区..."
# 获取物理内存大小 (KB)
TOTAL_MEM=$(grep MemTotal /proc/meminfo | awk '{print $2}')
if [ "$TOTAL_MEM" -lt 4000000 ]; then
log_info "配置模式: 低内存 (<4GB) - 侧重稳定性"
TCP_MEM_MAX=33554432
UDP_MEM_MIN=4096
else
log_info "配置模式: 高性能 (>4GB) - 侧重吞吐量"
TCP_MEM_MAX=67108864
UDP_MEM_MIN=16384
fi
# --- 3. 写入 sysctl 优化配置 ---
log_task ">> [3/6] 正在写入 /etc/sysctl.d/99-sysctl.conf ..."
cat > /etc/sysctl.d/99-sysctl.conf <<EOC
# --- 系统级文件限制 ---
fs.file-max = 2000000
fs.inotify.max_user_instances = 524288
fs.inotify.max_user_watches = 524288
# --- 虚拟内存 (Swap) 策略 ---
vm.swappiness = 10
vm.vfs_cache_pressure = 50
# --- 拥塞控制 ---
net.core.default_qdisc = fq
net.ipv4.tcp_congestion_control = bbr
# --- TCP 缓冲区优化 (动态调整) ---
net.core.rmem_max = ${TCP_MEM_MAX}
net.core.wmem_max = ${TCP_MEM_MAX}
net.ipv4.tcp_rmem = 4096 87380 ${TCP_MEM_MAX}
net.ipv4.tcp_wmem = 4096 65536 ${TCP_MEM_MAX}
net.ipv4.udp_rmem_min = ${UDP_MEM_MIN}
net.ipv4.udp_wmem_min = ${UDP_MEM_MIN}
# 开启 TCP 窗口缩放 (支持大流量传输)
net.ipv4.tcp_window_scaling = 1
# --- 丢包恢复与重传 ---
net.ipv4.tcp_sack = 1
net.ipv4.tcp_dsack = 1
# --- IP 转发 (网关/代理必备) ---
net.ipv4.ip_forward = 1
net.ipv4.conf.all.forwarding = 1
net.ipv4.conf.default.forwarding = 1
net.ipv6.conf.all.forwarding = 1
net.ipv6.conf.default.forwarding = 1
# --- 队列与并发 ---
net.core.somaxconn = 65535
net.ipv4.tcp_max_syn_backlog = 65535
net.core.netdev_max_backlog = 300000
net.ipv4.tcp_max_orphans = 65536
net.ipv4.tcp_moderate_rcvbuf = 1
net.ipv4.tcp_no_metrics_save = 1
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_notsent_lowat = 16384
EOC
# 应用 sysctl
sysctl -p /etc/sysctl.d/99-sysctl.conf > /dev/null 2>&1
log_info "Sysctl 配置已应用。"
# --- 4. 网卡高级调优 (TxQueueLen & RPS/XPS) ---
log_task ">> [4/6] 正在配置网卡高级调优 (持久化脚本)..."
# 创建一个专门的网卡优化脚本
cat > /usr/local/bin/optimize_net.sh <<'EOS'
#!/bin/bash
# 网卡优化脚本 - 由 optimize.sh 生成
# 作用:启动时自动设置 txqueuelen 和 RPS/XPS
# 1. 找到主要的上网网卡 (默认路由网卡)
MAIN_IFACE=$(ip route get 8.8.8.8 | awk 'NR==1 {print $5}')
if [ -z "$MAIN_IFACE" ]; then
echo "未找到主网卡,跳过优化。"
exit 0
fi
echo "正在优化网卡: $MAIN_IFACE"
# 2. 增加发送队列长度 (TxQueueLen)
# 默认通常是 1000,增加到 10000 可以平滑突发流量
ip link set dev "$MAIN_IFACE" txqueuelen 10000
echo "TxQueueLen set to 10000"
# 3. 配置 RPS/XPS (多核优化)
# 获取 CPU 核心数
CPU_COUNT=$(grep -c processor /proc/cpuinfo)
# 如果是单核,RPS 意义不大,但仍可配置;多核环境下效果显著
# 计算掩码:(2^CPU_COUNT) - 1 的十六进制表示
# 例如 4核 = f, 8核 = ff
HEX_MASK=$(printf '%x' $(( (1 << CPU_COUNT) - 1 )))
# 设置 RPS (接收包转向)
for file in /sys/class/net/"$MAIN_IFACE"/queues/rx-*/rps_cpus; do
if [ -f "$file" ]; then
echo "$HEX_MASK" > "$file"
fi
done
# 设置 XPS (发送包转向)
for file in /sys/class/net/"$MAIN_IFACE"/queues/tx-*/xps_cpus; do
if [ -f "$file" ]; then
echo "$HEX_MASK" > "$file"
fi
done
# 增加流表条目数 (如果内存足够)
if [ "$CPU_COUNT" -gt 1 ]; then
sysctl -w net.core.rps_sock_flow_entries=32768 > /dev/null 2>&1
for file in /sys/class/net/"$MAIN_IFACE"/queues/rx-*/rps_flow_cnt; do
if [ -f "$file" ]; then
echo 2048 > "$file"
fi
done
fi
echo "网卡优化完成。"
EOS
chmod +x /usr/local/bin/optimize_net.sh
# 立即运行一次
/usr/local/bin/optimize_net.sh
# 创建 Systemd 服务以确保开机自启
cat > /etc/systemd/system/network-tuning.service <<EoS
[Unit]
Description=Network Performance Tuning (RPS/XPS/TxQueue)
After=network.target network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/optimize_net.sh
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
EoS
# 启用服务
systemctl daemon-reload
systemctl enable network-tuning.service > /dev/null 2>&1
log_info "网卡优化脚本已安装并设置为开机自启。"
# --- 5. 设置用户级文件限制 (limits.conf) ---
log_task ">> [5/6] 正在更新用户级限制 (limits.conf)..."
# 备份
[ ! -f /etc/security/limits.conf.bak ] && cp /etc/security/limits.conf /etc/security/limits.conf.bak
# 检查是否已存在,不存在则追加
if ! grep -q "soft nofile 1000000" /etc/security/limits.conf; then
cat >> /etc/security/limits.conf <<EOC
* soft nofile 1000000
* hard nofile 1000000
root soft nofile 1000000
root hard nofile 1000000
EOC
fi
# PAM 限制模块
if [ -f /etc/pam.d/common-session ]; then
grep -q "pam_limits.so" /etc/pam.d/common-session || echo "session required pam_limits.so" | tee -a /etc/pam.d/common-session > /dev/null
fi
# --- 6. 设置 Systemd 全局限制 (关键优化) ---
log_task ">> [6/6] 正在更新 Systemd 全局限制..."
if [ -d /etc/systemd/system.conf.d ]; then
mkdir -p /etc/systemd/system.conf.d
cat > /etc/systemd/system.conf.d/limit.conf <<EOC
[Manager]
DefaultLimitNOFILE=1000000
DefaultLimitNPROC=65535
EOC
else
# 兼容旧版系统
log_info "未找到 system.conf.d,尝试修改主配置文件..."
sed -i '/^DefaultLimitNOFILE=/d' /etc/systemd/system.conf
sed -i '/^DefaultLimitNPROC=/d' /etc/systemd/system.conf
echo "DefaultLimitNOFILE=1000000" | tee -a /etc/systemd/system.conf > /dev/null
echo "DefaultLimitNPROC=65535" | tee -a /etc/systemd/system.conf > /dev/null
fi
# 重载 Systemd
systemctl daemon-reexec
echo "================================================"
# --- 最终 BBR 状态详细检测 ---
# 1. 检测可用算法
AVAILABLE_CC=$(sysctl net.ipv4.tcp_available_congestion_control | awk -F'=' '{print $2}' | xargs)
# 2. 检测当前算法
CURRENT_CC=$(sysctl net.ipv4.tcp_congestion_control | awk -F'=' '{print $2}' | xargs)
if [[ "$CURRENT_CC" == *"bbr"* ]]; then
echo -e "✅ ${GREEN}优化完成!BBR 已成功开启生效。${PLAIN}"
echo -e " - 当前 TCP 拥塞控制算法: ${GREEN}$CURRENT_CC${PLAIN}"
echo -e " - 内核支持的算法: $AVAILABLE_CC"
echo -e " - 网卡优化 (RPS/XPS/TxQueue): ${GREEN}已启用 (服务名: network-tuning)${PLAIN}"
elif [[ "$AVAILABLE_CC" == *"bbr"* ]]; then
echo -e "⚠️ ${YELLOW}优化完成,内核支持 BBR 但尚未生效。${PLAIN}"
echo -e " - 当前算法: $CURRENT_CC (请重启后再次检查)"
else
echo -e "❌ ${RED}BBR 开启失败。${PLAIN}"
echo -e " - 当前内核可能不支持 BBR,或者在虚拟化环境(如OpenVZ)中受限。"
echo -e " - 当前算法: $CURRENT_CC"
fi
echo "================================================"
echo "建议重启服务器以确保所有服务(特别是 Systemd 托管服务)应用新配置。"
echo "Linux Network & System Performance Optimization"
echo "Copyright © sola.moe (Optimized Format v3 - RPS/XPS Enhanced)"
echo -e "重启命令: ${GREEN}reboot${PLAIN}"
echo "检查服务状态: systemctl status network-tuning.service"
echo "================================================"
EOF
chmod +x optimize.sh
echo ">> optimize.sh 文件生成完毕!"
echo ">> 请输入 ./optimize.sh 运行脚本。"
