SaltStack批量部署并配置Nginx
文章目录
资源列表
角色 | 主机名 | IP | 组名 | cpu个数 | 内存 | Nginx根目录 |
---|---|---|---|---|---|---|
master | master.saltstack.com | 192.168.93.101 | ||||
minion | web01.saltstack.com | 192.168.93.102 | web01group | 2C | 4G | /data |
minion | web02.saltstack.com | 192.168.93.103 | web02group | 2C | 4G | /www |
基础环境
- 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
- 关闭内核安全机制
setenforce 0
sed -i "s/^SELINUX=.*/SELINUX=disabled/g" /etc/selinux/config
- 修改主机名
hostnamectl set-hostname master.saltstack.com
hostnamectl set-hostname web01.saltstack.com
hostnamectl set-hostname web02.saltstack.com
- 绑定hosts解析
cat >> /etc/hosts << EOF
192.168.93.101 master.saltstack.com
192.168.93.102 web01.saltstack.com
192.168.93.103 web02.saltstack.com
EOF
- 同步时间
yum -y install ntpdate
ntpdate ntp1.aliyun.com
一、SaltStack安装
- SaltStack的Master和Minion安装非常简单
1.1、安装YUM源
- 所有主机均要操作
# 导入GPG密钥文件
sudo rpm --import https://repo.saltproject.io/py3/redhat/7/x86_64/latest/SALTSTACK-GPG-KEY.pub
# 下载SaltStack源
curl -fsSL https://repo.saltproject.io/py3/redhat/7/x86_64/latest.repo | sudo tee /etc/yum.repos.d/salt.repo
# 因为两台Minion是采用Yum的方式安装的Nginx所以要安装epel源
yum -y install epel-release
1.2、安装Master和Minion
- master节点
yum -y install salt-master.noarch
- Minion节点
yum -y install salt-minion.noarch
1.3、配置Master
- 默认配置文件全部为注释
[root@master ~]# vim /etc/salt/master
# 监听地址,写私网更加安全
interface: 192.168.93.101
# 设置允许被控制端自动认证,只要被控制端设置完主控端的IP后启动服务,主控端就会允许被控端自动认证,以避免以后每次运行salt-key来确认证书信任
auto_accept: True
# 修改SaltStack文件根目录位置,去掉注释
file_roots:
base:
- /srv/salt
# 修改组分类,末尾添加即可
nodegroups:
web01group: 'web01.saltstack.com'
web02group: 'web02.saltstack.com'
# 开启pollar功能
pillar_opts: True
# 设置pillar的主目录,去掉注释
pillar_roots:
base:
- /srv/pillar
# 主控端主要修改内容如下
[root@master ~]# cat /etc/salt/master | grep -v '^$' | grep -v '^#'
interface: 192.168.93.101
auto_accept: True
file_roots:
base:
- /srv/salt
pillar_roots:
base:
- /srv/pillar
pillar_opts: True
nodegroups:
web01group: 'web01.saltstack.com'
web02group: 'web02.saltstack.com'
# 启动服务
[root@master ~]# systemctl start salt-master.service
[root@master ~]# systemctl enable salt-master.service
# 启动完成之后监听TCP4505、TCP4506端口
[root@master ~]# netstat -anpt | grep 4505
tcp 0 0 192.168.93.101:4505 0.0.0.0:* LISTEN 8753/python3
[root@master ~]# netstat -anpt | grep 4506
tcp 0 0 192.168.93.101:4506 0.0.0.0:* LISTEN 8759/python3
# 创建salt文件根目录以及pillar目录
[root@master ~]# mkdir /srv/salt
[root@master ~]# mkdir /srv/pillar
1.4、配置Minion
1.4.1、修改Minion01配置文件
- 默认配置文件全部为注释
[root@web01 ~]# vim /etc/salt/minion
# 设置主控端IP
master: 192.168.93.101
# 设置被空主机名
id: web01.saltstack.com
[root@web01 ~]# systemctl start salt-minion.service
[root@web01 ~]# systemctl enable salt-minion.service
1.4.2、修改Minion02配置文件
- 默认配置文件全部为注释
[root@web02 ~]# vim /etc/salt/minion
# 设置主控端IP
master: 192.168.93.101
# 设置被空主机名
id: web02.saltstack.com
[root@web02 ~]# systemctl start salt-minion.service
[root@web02 ~]# systemctl enable salt-minion.service
1.5、检查通信情况
- 在主控端上,简单测试主控端和被控端的通信状态。如果返回都是True,说明正常
- 注意:这里的ping和我们平时用的ping命令不同。它只是test类下面的一个方法,是验证主控端和被控端的通信状态
*
表示所有通过认证的被控端。还可以支持其他很多正则表达式的匹配
[root@master ~]# salt '*' test.ping
web02.saltstack.com:
True
web01.saltstack.com:
True
二、SaltStack批量部署Nginx
- 下面利用SaltStack的三个重要组件grains、pillar与state完成Nginx的安装工作
- grains和pillar的作用都是采集被控端数据的。其中grains的特性是每次启动汇报;而pillar是随时可变的,只要在master端修改了,一般都会立即生效的。所以,grains更适合做一些静态的属性值的采集,例如设置的角色、磁盘个数等诸如非常固定的属性。如果想定义的属性值是经常变化的,就采用pillar。如果想定义的属性值是很固定、不易变得,就采用grains。
- 在配置之前,需要分析Nginx得主配置文件,要根据当前CPU个设置一些值、设置一些打开文件句柄数等等。
- 定义grains数据得方法有两种:一种为在被控主机定制配置文件,另一种是通过主控端扩展模块API实现,区别是模块更灵活。下面用自定义的方式进行。
- 通过下面得命令可以查看被控机web01主机得grains所有值
[root@master ~]# salt 'web01.saltstack.com' grains.items
web01.saltstack.com:
----------
biosreleasedate:
11/12/2020
biosversion:
6.00
cpu_flags:
- fpu
- vme
- de
- pse
- tsc
- msr
- pae
- mce
- cx8
- apic
- sep
- mtrr
- pge
- mca
- cmov
- pat
- pse36
- clflush
- mmx
- fxsr
- sse
- sse2
- syscall
- nx
- mmxext
- fxsr_opt
- pdpe1gb
- rdtscp
- lm
- constant_tsc
- art
- rep_good
- nopl
- tsc_reliable
- nonstop_tsc
- extd_apicid
- eagerfpu
- pni
- pclmulqdq
- ssse3
- fma
- cx16
- sse4_1
- sse4_2
- x2apic
- movbe
- popcnt
- aes
- xsave
- avx
- f16c
- rdrand
- hypervisor
- lahf_lm
- extapic
- cr8_legacy
- abm
- sse4a
- misalignsse
- 3dnowprefetch
- osvw
- topoext
- retpoline_amd
- ibpb
- vmmcall
- fsgsbase
- bmi1
- avx2
- smep
- bmi2
- erms
- invpcid
- rdseed
- adx
- smap
- clflushopt
- clwb
- sha_ni
- xsaveopt
- xsavec
- xgetbv1
- clzero
- arat
- umip
- pku
- ospke
- vaes
- vpclmulqdq
- overflow_recov
- succor
cpu_model:
AMD Ryzen 7 5800H with Radeon Graphics
cpuarch:
x86_64
cwd:
/
disks:
- sda
- sr0
dns:
----------
domain:
ip4_nameservers:
- 114.114.114.114
- 8.8.8.8
ip6_nameservers:
nameservers:
- 114.114.114.114
- 8.8.8.8
options:
search:
- saltstack.com
sortlist:
domain:
saltstack.com
efi:
False
efi-secure-boot:
False
fqdn:
web01.saltstack.com
fqdn_ip4:
- 192.168.93.102
fqdn_ip6:
- fe80::8777:f1a8:c3dd:6a2b
fqdns:
- web01.saltstack.com
gid:
0
gpus:
|_
----------
model:
SVGA II Adapter
vendor:
vmware
groupname:
root
host:
web01
hwaddr_interfaces:
----------
ens33:
00:0c:29:c2:6c:57
lo:
00:00:00:00:00:00
id:
web01.saltstack.com
init:
systemd
ip4_gw:
192.168.93.2
ip4_interfaces:
----------
ens33:
- 192.168.93.102
lo:
- 127.0.0.1
ip6_gw:
False
ip6_interfaces:
----------
ens33:
- fe80::8777:f1a8:c3dd:6a2b
lo:
- ::1
ip_gw:
True
ip_interfaces:
----------
ens33:
- 192.168.93.102
- fe80::8777:f1a8:c3dd:6a2b
lo:
- 127.0.0.1
- ::1
ipv4:
- 127.0.0.1
- 192.168.93.102
ipv6:
- ::1
- fe80::8777:f1a8:c3dd:6a2b
kernel:
Linux
kernelparams:
|_
- BOOT_IMAGE
- /vmlinuz-3.10.0-1160.71.1.el7.x86_64
|_
- root
- /dev/mapper/centos-root
|_
- ro
- None
|_
- crashkernel
- auto
|_
- rd.lvm.lv
- centos/root
|_
- rd.lvm.lv
- centos/swap
|_
- rhgb
- None
|_
- quiet
- None
|_
- LANG
- zh_CN.UTF-8
kernelrelease:
3.10.0-1160.71.1.el7.x86_64
kernelversion:
#1 SMP Tue Jun 28 15:37:28 UTC 2022
locale_info:
----------
defaultencoding:
UTF-8
defaultlanguage:
zh_CN
detectedencoding:
UTF-8
timezone:
unknown
localhost:
web01.saltstack.com
lsb_distrib_codename:
CentOS Linux 7 (Core)
lsb_distrib_id:
CentOS Linux
lvm:
----------
centos:
- home
- root
- swap
machine_id:
5237691b432a4b3889e1d6d816e33374
manufacturer:
VMware, Inc.
master:
192.168.93.101
mdadm:
mem_total:
3770
nodename:
web01.saltstack.com
num_cpus:
2
num_gpus:
1
os:
CentOS
os_family:
RedHat
osarch:
x86_64
oscodename:
CentOS Linux 7 (Core)
osfinger:
CentOS Linux-7
osfullname:
CentOS Linux
osmajorrelease:
7
osrelease:
7.9.2009
osrelease_info:
- 7
- 9
- 2009
path:
/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin
pid:
8819
productname:
VMware Virtual Platform
ps:
ps -efHww
pythonexecutable:
/usr/bin/python3
pythonpath:
- /usr/bin
- /usr/lib64/python36.zip
- /usr/lib64/python3.6
- /usr/lib64/python3.6/lib-dynload
- /usr/lib64/python3.6/site-packages
- /usr/lib/python3.6/site-packages
pythonversion:
- 3
- 6
- 8
- final
- 0
saltpath:
/usr/lib/python3.6/site-packages/salt
saltversion:
3005.5
saltversioninfo:
- 3005
- 5
selinux:
----------
enabled:
False
enforced:
Disabled
serialnumber:
VMware-56 4d b3 e7 5c c1 80 b6-00 cd 81 a5 8f c2 6c 57
server_id:
618231344
shell:
/bin/sh
ssds:
swap_total:
2047
systemd:
----------
features:
+PAM +AUDIT +SELINUX +IMA -APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 -SECCOMP +BLKID +ELFUTILS +KMOD +IDN
version:
219
systempath:
- /usr/local/sbin
- /usr/local/bin
- /usr/sbin
- /usr/bin
transactional:
False
uid:
0
username:
root
uuid:
e7b34d56-c15c-b680-00cd-81a58fc26c57
virtual:
VMware
zfs_feature_flags:
False
zfs_support:
False
zmqversion:
4.1.4
- grans是SaltStack重要的组件之一,它可以收集被控主机得基本信息,如CPU、内存、操作系统、虚拟化等静态数据。但是有些数据采集不到,例如默认打开得文件句柄数,需要通过自定义得Python脚本获取。在服务端利用这些信息对不同被控主机进行个性化配置。
- 首先,创建grains目录,需要将目录下的定制文件同步到被控机上运行;然后,能正常获取被控机开打文件句柄数。
[root@master ~]# mkdir /srv/salt/_grains
[root@master ~]# vim /srv/salt/_grains/nginx_config.py
#!/usr/bin/python
# 这个适用于python3版本
import os,sys,subprocess
def NginxGrains():
'''
return Nginx config grains value
'''
grains = {}
max_open_file=65535
try:
getulimit = subprocess.getstatusoutput('source /etc/profile;ulimit -n')
except:
pass
if getulimit[0]==0:
max_open_file = int(getulimit[1])
grains['max_open_file'] = max_open_file
return grains
- 上述脚本得含义就是让被控主机获取它当前打开文件句柄数
- 在同步到被控端之前,执行如下命令,确认在主控端是否能获取被控端得max_open_file值
[root@master ~]# salt '*' grains.item max_open_file
web02.saltstack.com:
----------
max_open_file:
web01.saltstack.com:
----------
max_open_file:
- 从命令执行结果中得知:不能获取到max_open_file得值,需要同步grains模块,在主控端运行如下命令。
- 注意:如果报错,那就多执行几次
[root@master ~]# salt '*' saltutil.sync_all
web02.saltstack.com:
----------
beacons:
clouds:
engines:
executors:
grains:
- grains.nginx_config
log_handlers:
matchers:
modules:
output:
proxymodules:
renderers:
returners:
sdb:
serializers:
states:
thorium:
utils:
web01.saltstack.com:
----------
beacons:
clouds:
engines:
executors:
grains:
- grains.nginx_config
log_handlers:
matchers:
modules:
output:
proxymodules:
renderers:
returners:
sdb:
serializers:
states:
thorium:
utils:
- 被控文件存放在/var/cache/salt目录下
[root@web01 ~]# find /var/cache/salt/
/var/cache/salt/
/var/cache/salt/minion
/var/cache/salt/minion/extmods
/var/cache/salt/minion/extmods/grains
/var/cache/salt/minion/extmods/grains/nginx_config.py
/var/cache/salt/minion/extmods/grains/__pycache__
/var/cache/salt/minion/extmods/grains/__pycache__/nginx_config.cpython-36.pyc
/var/cache/salt/minion/proc
/var/cache/salt/minion/module_refresh
/var/cache/salt/minion/files
/var/cache/salt/minion/files/base
/var/cache/salt/minion/files/base/_grains
/var/cache/salt/minion/files/base/_grains/nginx_config.py
[root@web02 ~]# find /var/cache/salt/
/var/cache/salt/
/var/cache/salt/minion
/var/cache/salt/minion/extmods
/var/cache/salt/minion/extmods/grains
/var/cache/salt/minion/extmods/grains/nginx_config.py
/var/cache/salt/minion/extmods/grains/__pycache__
/var/cache/salt/minion/extmods/grains/__pycache__/nginx_config.cpython-36.pyc
/var/cache/salt/minion/proc
/var/cache/salt/minion/module_refresh
/var/cache/salt/minion/files
/var/cache/salt/minion/files/base
/var/cache/salt/minion/files/base/_grains
/var/cache/salt/minion/files/base/_grains/nginx_config.py
- 再次获取max_open_file的值
[root@master ~]# salt '*' grains.item max_open_file
web02.saltstack.com:
----------
max_open_file:
8192
web01.saltstack.com:
----------
max_open_file:
8192
- 配置pillar:在主控端上创建入口文件top.sls,入口文件的作用是定义pillar的数据覆盖被控主机的有效域范围。具体内容如下
[root@master ~]# cat /srv/pillar/top.sls
base:
web01group: # 组名
- match: nodegroup
- web01server # 设置包括web01server.sls
web02group: # 组名
- match: nodegroup
- web02server # 设置包括web02server.sls
web01group
和web02group
是/etc/salt/master中定义的不同的组,对于每一个组编写一个对应的文件指定配置,这里使用的是web01server
和web02server
,再分别定义不同组主机的Nginx的根目录,如下所示
[root@master ~]# cat /srv/pillar/web01server.sls
nginx:
root: /data
[root@master ~]# cat /srv/pillar/web02server.sls
nginx:
root: /www
- 使用以下命令查看pillar配置的情况
[root@master ~]# salt '*' pillar.data nginx
web01.saltstack.com:
----------
nginx:
----------
root:
/data
web02.saltstack.com:
----------
nginx:
----------
root:
/www
# 如果上述命令执行回显结果不一样,请先执行以下命令
[root@master ~]# salt '*' saltutil.refresh_pillar
-
在执行结果中可以很明显的看出,被控主机的Nginx的根目录被配置成功
-
定义state的入口top.sls文件,注意和pillar的入口文件名字一样
[root@master ~]# cat /srv/salt/top.sls
base:
'*':
- nginx
- 其次,定义被控机的状态,安装Nginx软件、配置、启动
[root@master ~]# cat /srv/salt/nginx.sls
nginx:
pkg.installed:
- name: nginx
file.managed:
- source: salt://nginx/nginx.conf
- name: /etc/nginx/nginx.conf
- user: root
- group: root
- mode: 644
- template: jinja
service.running:
- enable: True
- reload: True
- watch:
- file: /etc/nginx/nginx.conf
- pkg: nginx
# 文件内容解释
nginx:
pkg.installed: # 1.包管理
- name: nginx # 安装nginx
file.managed: # 2.文件管理
- source: salt://nginx/nginx.conf # 配置文件在服务器的目录
- name: /etc/nginx/nginx.conf # 配置文件在被控主机的路径
- user: root # 文件所属主
- group: root # 文件所属组
- mode: 644 # 权限
- template: jinja # 配置文件使用jinja模板
service.running: # 3.运行服务管理
- enable: True # 开机自动运行
- reload: True # 可以重载
- watch:
- file: /etc/nginx/nginx.conf
- pkg: nginx
# salt://nginx/nginx.conf为配置模板文件位置
# enable:True 表示检查服务是否正在开机自启动服务队列中,如果不加则加上,等价于chkconfig nginx on命令
# reload:True 表示服务支持relaod操作,不加则会默认执行restart操作
# watch:即用于检测/etc/nginx/nginc.conf是否发生变化,如果变化重启nginx,又用于确保Nginx已经安装成功
-
上面的配置文件实际上与一台服务器直接安装Nginx的过程是相同的,首先是安装包,然后是对配置文件进行修改,最后是启动服务。
-
使用jinja模板定义Nginx配置文件
nginx.conf
,首先创建一个nginx目录,因为上面定义了Nginx配置文件的源路径
[root@master ~]# mkdir /srv/salt/nginx
- 然后在该目录下创建Nginx配置文件,
nginx.conf
配置文件可以根据自己的需求进行编写。
[root@master ~]# cat /srv/salt/nginx/nginx.conf
user nginx;
worker_processes {{grains['num_cpus']}};
{% if grains['num_cpus'] ==1 %}
worker_cpu_affinity 10;
{% elif grains['num_cpus'] ==2 %}
worker_cpu_affinity 01 10;
{% elif grains['num_cpus'] == 4 %}
worker_cpu_affinity 0001 0010 0100 1000;
{% elif grains['num_cpus'] == 8 %}
worker_cpu_affinity 00000001 00000010 00000100 00001000 00010000 00100000 01000000 10000000;
{% else %}
worker_cpu_affinity 0001 0010 0100 1000;
{% endif %}
worker_rlimit_nofile {{ grains['max_open_file'] }};
error_log /var/log/nginx_error.log;
pid /var/run/nginx.pid;
events
{
worker_connections {{ grains['max_open_file'] }};
}
http
{
include /etc/nginx/mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 60;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"' ;
server{
listen 80 default_server;
server_name _;
location / {
root {{ pillar['nginx']['root'] }};
index index.html index.htm;
}
error_page 404 /404.html;
location = /404.html {
root /usr/share/nginx/html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
- 现在就可以在主控端执行刷新
state
配置,让两台被控端执行安装Nginx
并配置。
[root@master ~]# salt '*' state.highstate
# 回显内容
web01.saltstack.com:
----------
ID: nginx
Function: pkg.installed
Result: True
Comment: The following packages were installed/updated: nginx
Started: 21:22:58.889050
Duration: 47000.074 ms
Changes:
----------
centos-indexhtml:
----------
new:
7-9.el7.centos
old:
gperftools-libs:
----------
new:
2.6.1-1.el7
old:
nginx:
----------
new:
1:1.20.1-10.el7
old:
nginx-filesystem:
----------
new:
1:1.20.1-10.el7
old:
openssl11-libs:
----------
new:
1:1.1.1k-7.el7
old:
----------
ID: nginx
Function: file.managed
Name: /etc/nginx/nginx.conf
Result: True
Comment: File /etc/nginx/nginx.conf updated
Started: 21:23:45.891199
Duration: 30.837 ms
Changes:
----------
diff:
---
+++
@@ -1,84 +1,38 @@
-# For more information on configuration, see:
-# * Official English Documentation: http://nginx.org/en/docs/
-# * Official Russian Documentation: http://nginx.org/ru/docs/
+user nginx;
+worker_processes 2;
-user nginx;
-worker_processes auto;
-error_log /var/log/nginx/error.log;
-pid /run/nginx.pid;
+worker_cpu_affinity 01 10;
-# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
-include /usr/share/nginx/modules/*.conf;
-
-events {
- worker_connections 1024;
+worker_rlimit_nofile 8192;
+error_log /var/log/nginx_error.log;
+pid /var/run/nginx.pid;
+events
+{
+worker_connections 8192;
}
-
-http {
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
- tcp_nopush on;
- tcp_nodelay on;
- keepalive_timeout 65;
- types_hash_max_size 4096;
-
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- # Load modular configuration files from the /etc/nginx/conf.d directory.
- # See http://nginx.org/en/docs/ngx_core_module.html#include
- # for more information.
- include /etc/nginx/conf.d/*.conf;
-
- server {
- listen 80;
- listen [::]:80;
- server_name _;
- root /usr/share/nginx/html;
-
- # Load configuration files for the default server block.
- include /etc/nginx/default.d/*.conf;
-
- error_page 404 /404.html;
- location = /404.html {
- }
-
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- }
- }
-
-# Settings for a TLS enabled server.
-#
-# server {
-# listen 443 ssl http2;
-# listen [::]:443 ssl http2;
-# server_name _;
-# root /usr/share/nginx/html;
-#
-# ssl_certificate "/etc/pki/nginx/server.crt";
-# ssl_certificate_key "/etc/pki/nginx/private/server.key";
-# ssl_session_cache shared:SSL:1m;
-# ssl_session_timeout 10m;
-# ssl_ciphers HIGH:!aNULL:!MD5;
-# ssl_prefer_server_ciphers on;
-#
-# # Load configuration files for the default server block.
-# include /etc/nginx/default.d/*.conf;
-#
-# error_page 404 /404.html;
-# location = /40x.html {
-# }
-#
-# error_page 500 502 503 504 /50x.html;
-# location = /50x.html {
-# }
-# }
-
+http
+{
+include /etc/nginx/mime.types;
+default_type application/octet-stream;
+sendfile on;
+keepalive_timeout 60;
+log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+'$status $body_bytes_sent "$http_referer" '
+'"$http_user_agent" "$http_x_forwarded_for"' ;
+server{
+listen 80 default_server;
+server_name _;
+location / {
+root /data;
+index index.html index.htm;
}
-
+error_page 404 /404.html;
+location = /404.html {
+root /usr/share/nginx/html;
+}
+error_page 500 502 503 504 /50x.html;
+location = /50x.html {
+root /usr/share/nginx/html;
+}
+}
+}
----------
ID: nginx
Function: service.running
Result: True
Comment: Service nginx has been enabled, and is running
Started: 21:23:45.930632
Duration: 93.236 ms
Changes:
----------
nginx:
True
Summary for web01.saltstack.com
------------
Succeeded: 3 (changed=3)
Failed: 0
------------
Total states run: 3
Total run time: 47.124 s
web02.saltstack.com:
----------
ID: nginx
Function: pkg.installed
Result: True
Comment: The following packages were installed/updated: nginx
Started: 21:22:58.874407
Duration: 43775.334 ms
Changes:
----------
centos-indexhtml:
----------
new:
7-9.el7.centos
old:
gperftools-libs:
----------
new:
2.6.1-1.el7
old:
nginx:
----------
new:
1:1.20.1-10.el7
old:
nginx-filesystem:
----------
new:
1:1.20.1-10.el7
old:
openssl11-libs:
----------
new:
1:1.1.1k-7.el7
old:
----------
ID: nginx
Function: file.managed
Name: /etc/nginx/nginx.conf
Result: True
Comment: File /etc/nginx/nginx.conf updated
Started: 21:23:42.652020
Duration: 30.833 ms
Changes:
----------
diff:
---
+++
@@ -1,84 +1,38 @@
-# For more information on configuration, see:
-# * Official English Documentation: http://nginx.org/en/docs/
-# * Official Russian Documentation: http://nginx.org/ru/docs/
+user nginx;
+worker_processes 2;
-user nginx;
-worker_processes auto;
-error_log /var/log/nginx/error.log;
-pid /run/nginx.pid;
+worker_cpu_affinity 01 10;
-# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
-include /usr/share/nginx/modules/*.conf;
-
-events {
- worker_connections 1024;
+worker_rlimit_nofile 8192;
+error_log /var/log/nginx_error.log;
+pid /var/run/nginx.pid;
+events
+{
+worker_connections 8192;
}
-
-http {
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /var/log/nginx/access.log main;
-
- sendfile on;
- tcp_nopush on;
- tcp_nodelay on;
- keepalive_timeout 65;
- types_hash_max_size 4096;
-
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- # Load modular configuration files from the /etc/nginx/conf.d directory.
- # See http://nginx.org/en/docs/ngx_core_module.html#include
- # for more information.
- include /etc/nginx/conf.d/*.conf;
-
- server {
- listen 80;
- listen [::]:80;
- server_name _;
- root /usr/share/nginx/html;
-
- # Load configuration files for the default server block.
- include /etc/nginx/default.d/*.conf;
-
- error_page 404 /404.html;
- location = /404.html {
- }
-
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- }
- }
-
-# Settings for a TLS enabled server.
-#
-# server {
-# listen 443 ssl http2;
-# listen [::]:443 ssl http2;
-# server_name _;
-# root /usr/share/nginx/html;
-#
-# ssl_certificate "/etc/pki/nginx/server.crt";
-# ssl_certificate_key "/etc/pki/nginx/private/server.key";
-# ssl_session_cache shared:SSL:1m;
-# ssl_session_timeout 10m;
-# ssl_ciphers HIGH:!aNULL:!MD5;
-# ssl_prefer_server_ciphers on;
-#
-# # Load configuration files for the default server block.
-# include /etc/nginx/default.d/*.conf;
-#
-# error_page 404 /404.html;
-# location = /40x.html {
-# }
-#
-# error_page 500 502 503 504 /50x.html;
-# location = /50x.html {
-# }
-# }
-
+http
+{
+include /etc/nginx/mime.types;
+default_type application/octet-stream;
+sendfile on;
+keepalive_timeout 60;
+log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+'$status $body_bytes_sent "$http_referer" '
+'"$http_user_agent" "$http_x_forwarded_for"' ;
+server{
+listen 80 default_server;
+server_name _;
+location / {
+root /www;
+index index.html index.htm;
}
-
+error_page 404 /404.html;
+location = /404.html {
+root /usr/share/nginx/html;
+}
+error_page 500 502 503 504 /50x.html;
+location = /50x.html {
+root /usr/share/nginx/html;
+}
+}
+}
----------
ID: nginx
Function: service.running
Result: True
Comment: Service nginx has been enabled, and is running
Started: 21:23:42.691178
Duration: 85.452 ms
Changes:
----------
nginx:
True
Summary for web02.saltstack.com
------------
Succeeded: 3 (changed=3)
Failed: 0
------------
Total states run: 3
Total run time: 43.892 s
- 通过执行结果,看到了三个ID,相当于三个任务,第一个安装、第二个配置、第三个启动。而且显示三个都成功了,失败零个。如果不放心,也可以看看被控端Nginx是否启动
[root@web01 ~]# ps -ef | grep nginx
root 10507 1 0 21:23 ? 00:00:00 nginx: master process /usr/sbin/nginx
nginx 10508 10507 0 21:23 ? 00:00:00 nginx: worker process
nginx 10509 10507 0 21:23 ? 00:00:00 nginx: worker process
root 10603 8208 0 21:25 pts/0 00:00:00 grep --color=auto nginx
[root@web01 ~]# netstat -anpt | grep 80
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 10507/nginx: master
[root@web02 ~]# ps -ef | grep nginx
root 20435 1 0 21:23 ? 00:00:00 nginx: master process /usr/sbin/nginx
nginx 20436 20435 0 21:23 ? 00:00:00 nginx: worker process
nginx 20437 20435 0 21:23 ? 00:00:00 nginx: worker process
root 20535 8197 0 21:25 pts/0 00:00:00 grep --color=auto nginx
[root@web02 ~]# netstat -anpt | grep 80
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 20435/nginx: master
- 查看web01.saltstack.com节点的Nginx主配置文件nginx.conf
[root@web01 ~]# more /etc/nginx/nginx.conf
user nginx;
worker_processes 2;
worker_cpu_affinity 01 10;
worker_rlimit_nofile 8192;
error_log /var/log/nginx_error.log;
pid /var/run/nginx.pid;
events
{
worker_connections 8192;
}
http
{
include /etc/nginx/mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 60;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"' ;
server{
listen 80 default_server;
server_name _;
location / {
root /data;
index index.html index.htm;
}
error_page 404 /404.html;
location = /404.html {
root /usr/share/nginx/html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
- 查看web02.saltstack.com节点的Nginx主配置文件nginx.conf
[root@web02 ~]# more /etc/nginx/nginx.conf
user nginx;
worker_processes 2;
worker_cpu_affinity 01 10;
worker_rlimit_nofile 8192;
error_log /var/log/nginx_error.log;
pid /var/run/nginx.pid;
events
{
worker_connections 8192;
}
http
{
include /etc/nginx/mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 60;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"' ;
server{
listen 80 default_server;
server_name _;
location / {
root /www;
index index.html index.htm;
}
error_page 404 /404.html;
location = /404.html {
root /usr/share/nginx/html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
三、访问验证
3.1、web01
[root@web01 ~]# mkdir /data
[root@web01 ~]# echo "web01" > /data/index.html
[root@web01 ~]# curl 127.0.0.1
web01
3.2、web02
[root@web02 ~]# mkdir /www
[root@web02 ~]# echo "web02" > /www/index.html
[root@web02 ~]# curl 127.0.0.1
web02