自动化五Saltstack生产案例Haproxy与keepalived集群

1.Saltstack部署Haproxy及keepalived集群

1.1Salt目录结构

[root@linux-node1 ~]# tree /srv/salt/prod
/srv/salt/prod
├── modules   #功能目录,通用目录
│   ├── apache
│   │   ├── files
│   │   │   ├── httpd.conf
│   │   │   └── php.ini
│   │   ├── init.sls
│   │   └── php.sls
│   ├── mysql
│   │   ├── files
│   │   │   ├── mariadb-server-master.cnf
│   │   │   ├── mariadb-server-slave.cnf
│   │   │   └── my.cnf
│   │   ├── install.sls
│   │   ├── master.sls
│   │   └── slave.sls
│   └── redis
│   ├── files
│   │   └── redis.conf
│   ├── init.sls
│   ├── master.sls
│   └── slave.sls
└── shop-user  #业务目录
 ├── files
 │   ├── php.conf
 │   ├── php.ini
 │   ├── shop-user.conf
 │   └── start_slave.sh
 ├── mysql-master.sls
 ├── mysql-slave.sls
 └── web.sls

9 directories, 21 files

1.2各个配置文件的状态内容

#apache状态的配置文件
[root@linux-node1 ~]# cat /srv/salt/prod/modules/apache/init.sls 
include:
  - modules.apache.php

apache-install:
  pkg.installed:
    - name: httpd

apache-config:
  file.managed:
    - name: /etc/httpd/conf/httpd.conf
    - source: salt://modules/apache/files/httpd.conf
    - user: root
    - group: root
    - mode: 644
    - template: jinja
    - defaults:
      PORT: 80
      IPADDR: {{ grains['fqdn_ip4'][0] }}
    - watch_in:
      - service: apache-service

apache-service:
  service.running:
    - name: httpd
    - enable: True
    - reload: True

#php状态文件的配置内容
[root@linux-node1 ~]# cat /srv/salt/prod/modules/apache/php.sls 
php-install:
  pkg.installed:
    - pkgs:
      - php
      - php-pdo
      - php-mysql

#MySQL状态文件配置内容
[root@linux-node1 ~]# more /srv/salt/prod/modules/mysql/*
::::::::::::::
/srv/salt/prod/modules/mysql/install.sls
::::::::::::::
mysql-install:
  pkg.installed:
    - pkgs:
    - mariadb
    - mariadb-server
mysql-config:
  file.managed:
    - name: /etc/my.cnf
    - source: salt://modules/mysql/files/my.cnf
    - user: root
    - group: root
    - mode: 644
::::::::::::::
/srv/salt/prod/modules/mysql/master.sls
::::::::::::::
include:
  - modules.mysql.install

master-config:
  file.managed:
    - name: /etc/my.cnf.d/mariadb-server.cnf
    - source: salt://modules/mysql/files/mariadb-server-master.cnf
    - user: root
    - group: root
    - mode: 644

master-service:
  service.running:
    - name: mariadb
    - enable: True
::::::::::::::
/srv/salt/prod/modules/mysql/slave.sls
::::::::::::::
include:
  - modules.mysql.install

slave-config:
  file.managed:
    - name: /etc/my.cnf.d/mariadb-server.cnf
    - source: salt://modules/mysql/files/mariadb-server-slave.cnf
    - user: root
    - group: root
    - mode: 644

slave-service:
  service.running:
    - name: mariadb
    - enable: True

#redis配置及主从状态配置文件内容
[root@linux-node1 redis]# more init.sls master.sls slave.sls 
::::::::::::::
init.sls
::::::::::::::
redis-install:
  pkg.installed:
    - name: redis

redis-config:
  file.managed:
    - name: /etc/redis.conf
    - source: salt://redis/files/redis.conf
    - user: root
    - group: root
    - mode: 644
    - template: jinja
    - defaults:
      PORT: 6379
      IPADDR: {{ grains['fqdn_ip4'][0] }}

redis-service:
  service.running:
    - name: redis
    - enable: True
    - reload: True
::::::::::::::
master.sls
::::::::::::::
include:
  - redis.init
::::::::::::::
slave.sls
::::::::::::::
include:
  - redis.init

slave_confing:
  cmd.run:
    - name: redis-cli -h 118.190.201.12 slaveof 118.190.201.11 6379
    - unless: redis-cli -h 118.190.201.12 info |grep role:slave
    - require:
      - service: redis-service

1.3手动执行Salt命令进行验证

#关闭mysql缓存
vim /etc/salt/master
#master_job_cache: mysql
systemctl restart salt-master
#手动执行Salt命令进行验证
salt '*' state.sls modules.apache.init saltenv=prod
Summary
------------
Succeeded: 5
Failed: 0
------------
Total states run: 5

1.4业务正常运行需要的条件

1.php的session需要存放到redis里面,需要改php.ini

#把php.ini的配置管理单独拿出来
[root@linux-node1 shop-user]# vim web.sls 
include:
  - modules.apache.init   ##shop-user这个项目需要用它,所有把它放到web里面

php-config:
  file.managed:
    - name: /etc/php.ini
    - source: salt://modules/apache/files/php.ini
    - user: root
    - group: root
    - mode: 644

php-redis-config:   #缓存session
  file.managed:
    - name: /etc/httpd/conf.d/php.conf
    - source: salt://shop-user/files/php.conf
    - user: root
    - group: root
    - mode: 644
web-confing:    ##管理一个web的配置.apache的配置文件
  file.managed:
    - name: /etc/httpd/conf.d/shop-user.conf   ##虚拟主机的配置
    - source: salt://shop-user/files/shop-user.conf
    - user: root
    - group: root
    - mode: 644
    - watch_in:
      - service: apache-service

2.MySQL主配置文件的状态

[root@linux-node1 shop-user]# cat /srv/salt/prod/shop-user/mysql-master.sls 
include:
  - modules.mysql.master

master-grant:
  cmd.run:
    - name: mysql -e "GRANT replication slave,super on *.* to 'repl_user'@'118.190.201.0/255.255.255.0' identified by 'repl_user@pass'"
    - unless: mysql -h 118.190.201.11 -u repl_user -prepl_user@pass -e "exit"

3.MySQL从配置文件的状态

[root@linux-node1 shop-user]# cat /srv/salt/prod/shop-user/mysql-slave.sls 
include:
  - modules.mysql.slave

slave-start:
  file.managed:
    - name: /tmp/start_slave.sh
    - source: salt://shop-user/files/start_slave.sh
    - user: root
    - group: root
    - mode: 755
  cmd.run:
    - name: /bin/bash /tmp/start_slave.sh
    - unless: test -f /etc/my.cnf.d/slave.lock

4.各个服务的配置文件

#链接地址如下:
链接:https://pan.baidu.com/s/1in_8uyX8UEqIkrFfZ8YLZw 
提取码:w9pm

5.手动执行测试验证

[root@linux-node1 ~]# salt '*' state.sls shop-user.web saltenv=prod
......省略部分.........
Summary
------------
Succeeded: 7
Failed: 0
------------
Total states run: 7

5.这台服务器需要安装Haproxy与keepalived,所以不起80端口

[root@linux-node1 apache]# pwd
/srv/salt/prod/modules/apache
[root@linux-node1 apache]# vim init.sls
......省略部分......
      PORT: 8080
......省略部分......
[root@linux-node1 files]# pwd
/srv/salt/prod/modules/apache/files
[root@linux-node1 files]# vim httpd.conf
......省略部分......
Listen {{ IPADDR }}:{{ PORT }}
......省略部分......
[root@linux-node1 ~]# salt '*' state.sls shop-user.web saltenv=prod
[root@linux-node1 ~]# netstat -lntup|grep 8080
tcp    0     0      118.190.201.11:8080     0.0.0.0:*     LISTEN     57593/httpd 
[root@linux-node2 ~]# netstat -lntup|grep 8080
tcp    0     0      118.190.201.12:8080     0.0.0.0:*     LISTEN     7042/httpd

6.手动编写php.info文件进行测试

#node1,node2节点
[root@linux-node1 html]# vim /var/www/html/info.php
<?php
phpinfo();
?>
###浏览器访问:http://118.190.201.11:8080/info.php

7.加入到base环境的top_file里面

[root@linux-node1 ~]# vim /srv/salt/base/top.sls 
#base:
#  'os:CentOS':
#    - match: grain
#    - web.apache

prod:
  'linux-node1.example.com':
    - shop-user.mysql-master
    - shop-user.web
  'linux-node2.example.com':
    - shop-user.mysql-slave
    - shop-user.web

8.执行验证

[root@linux-node1 ~]# salt '*' state.highstate
......省略部分......
Summary
-------------
Succeeded: 13 (changed=4)
Failed: 0
-------------
Total states run: 13

2.部署Haproxy服务

2.1Haproxy状态文件

[root@linux-node1 haproxy]# pwd
/srv/salt/prod/modules/haproxy
[root@linux-node1 haproxy]# cat init.sls 
include:
  - modules.pkg.init
haproxy-install:
  file.managed:
    - name: /usr/local/src/haproxy-1.5.3.tar.gz
    - source: salt://modules/haproxy/files/haproxy-1.5.3.tar.gz
    - mode: 755
    - user: root
    - group: root
  cmd.run:
    - name: cd /usr/local/src && tar zxf haproxy-1.5.3.tar.gz && cd haproxy-1.5.3 && make TARGET=linux26 PREFIX=/usr/local/haproxy && make install PREFIX=/usr/local/haproxy
    - unless: test -d /usr/local/haproxy
    - require:
      - pkg: pkg-init
      - file: haproxy-install
/etc/init.d/haproxy:
  file.managed:
    - source: salt://modules/haproxy/files/haproxy.init
    - mode: 755
    - user: root
    - group: root
    - require:
      - cmd: haproxy-install
net.ipv4.ip_nonlocal_bind:
  sysctl.present:
    - value: 1
haproxy-config-dir:
  file.directory:
    - name: /etc/haproxy
    - mode: 755
    - user: root
    - group: root
haproxy-init:
  cmd.run:
    - name: chkconfig --add haproxy
    - unless: chkconfig --list | grep haproxy
    - require:
      - file: /etc/init.d/haproxy

2.2Haproxy所依赖的安装包状态文件

[root@linux-node1 pkg]# pwd
/srv/salt/prod/modules/pkg
[root@linux-node1 pkg]# cat init.sls 
pkg-init:
  pkg.installed:
    - names:
      - gcc
      - gcc-c++
      - glibc
      - make
      - autoconf
      - openssl
      - openssl-devel

2.3手动执行验证

[root@linux-node1 ~]# salt '*' state.sls modules.haproxy.init saltenv=prod
.......省略部分.......
Summary
-------------
Succeeded: 13 (changed=9)
Failed: 0
-------------
Total states run: 13

2.4如果是每个业务一个负载均衡需要放到shop-user下进行管理

[root@linux-node1 ~]# cd /srv/salt/prod
[root@linux-node1 prod]# mkdir lb-outside
[root@linux-node1 prod]# cd lb-outside/
[root@linux-node1 lb-outside]# mkdir files

3.部署keepalived服务

3.1Keepalived安装状态文件

[root@linux-node1 keepalived]# cat init.sls 
include:
  - modules.pkg.init
keepalived-install:
  file.managed:
    - name: /usr/local/src/keepalived-1.2.17.tar.gz
    - source: salt://modules/keepalived/files/keepalived-1.2.17.tar.gz
    - mode: 755
    - user: root
    - group: root
  cmd.run:
    - name: cd /usr/local/src && tar zxf keepalived-1.2.17.tar.gz && cd keepalived-1.2.17 && ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install
    - unless: test -d /usr/local/keepalived
    - require:
      - file: keepalived-install

/etc/sysconfig/keepalived:
  file.managed:
    - source: salt://modules/keepalived/files/keepalived.sysconfig
    - mode: 644
    - user: root
    - group: root

/etc/init.d/keepalived:
  file.managed:
    - source: salt://modules/keepalived/files/keepalived.init
    - mode: 755
    - user: root
    - group: root

keepalived-init:
  cmd.run:
    - name: chkconfig --add keepalived
    - unless: chkconfig --list | grep keepalived
    - require:
      - file: /etc/init.d/keepalived

/etc/keepalived:
  file.directory:
    - user: root
    - group: root

3.2手动执行验证

 [root@linux-node1 ~]# salt '*' state.sls modules.keepalived.init saltenv=prod
Summary
-------------
Succeeded: 13 (changed=6)
Failed: 0
-------------
Total states run: 13

4.编写keepalive与haproxy的配置不能放在基础模块里面,那个业务用放哪里

 [root@linux-node1 /srv/salt/prod/lb-outside]# tree
.
├── files
└── haproxy-keepalived.sls

1 directory, 1 file
###haproxy的配置与keepalive的配置进行关联
 [root@linux-node1 /srv/salt/prod/lb-outside]# vim haproxy-outside.sls
include:
  - modules.haproxy.init

haproxy-service:
  file.managed:
    - name: /etc/haproxy/haproxy.cfg
    - source: salt://lb-outside/files/haproxy-outside.cfg
    - user: root
    - group: root
    - mode: 644
  service.running:
    - name: haproxy
    - enable: True
    - reload: True
    - require:
      - cmd: haproxy-init   ##把haproxy服务加入到系统服务里面
    - watch:
      - file: haproxy-service    ##发生变动自动进行reload需要加reload:True
####haproxy配置文件内容
 [root@linux-node1 /srv/salt/prod/lb-outside/files]# cat haproxy-outside.cfg
global     ###全局配置 对所有生效
maxconn 100000
chroot /usr/local/haproxy
uid 99 
gid 99 
daemon
nbproc 1 
pidfile /usr/local/haproxy/logs/haproxy.pid 
log 127.0.0.1 local3 info

defaults   ###默认配置
option http-keep-alive
maxconn 100000
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms

listen stats  ###监听状态
mode http
bind 0.0.0.0:8888
stats enable
stats uri /haproxy-status 
stats auth haproxy:saltstack   ###用户名密码

frontend frontend_www_example_com
    bind 118.190.201.21:80    ##监听VIP
    mode http
    option httplog
    log global
    default_backend backend_www_example_com ###前端后端的关联名称 

backend backend_www_example_com   ###设置一个后端
   option forwardfor header X-REAL-IP       ###把支持IP插入到一个header里面
   option httpchk HEAD / HTTP/1.0           ###做健康检查的
   balance source                           ###负载均衡算法source做session会话保持
   server web-node1 118.190.201.11:8080 check inter 2000 rise 30 fall 15
   server web-node2 118.190.201.12:8080 check inter 2000 rise 30 fall 15

4.1手动执行状态

 [root@linux-node1 ~]# salt '*' state.sls lb-outside.haproxy-outside saltenv=prod
.......省略部分.......
Summary
-------------
Succeeded: 15 (changed=2)
Failed: 0
-------------
Total states run: 15

浏览器输入进行查看http://118.190.201.11:8888/haproxy-status

4.2haproxy与keepalived的配置状态

 [root@linux-node1 /srv/salt/prod/lb-outside]# cat haproxy-outside-keepalived.sls 
include:
  - modules.keepalived.init
keepalived-server:
  file.managed:
    - name: /etc/keepalived/keepalived.conf
    - source: salt://lb-outside/files/haproxy-outside-keepalived.conf
    - mode: 644
    - user: root
    - group: root
    - template: jinja
    {% if grains['fqdn'] == 'linux-node1.example.com' %}
    - ROUTEID: haproxy_ha
    - STATEID: MASTER
    - PRIORITYID: 150
    {% elif grains['fqdn'] == 'linux-node2.example.com' %}
    - ROUTEID: haproxy_ha
    - STATEID: BACKUP
    - PRIORITYID: 100
    {% endif %}
  service.running:
    - name: keepalived
    - enable: True
    - watch:
      - file: keepalived-server

###keepalived的配置文件内容
 [root@linux-node1 /srv/salt/prod/lb-outside/files]# vim haproxy-outside-keepalived.conf 
! Configuration File for keepalived
global_defs {
 notification_email {
 saltstack@example.com
 }
 notification_email_from keepalived@example.com
 smtp_server 127.0.0.1
 smtp_connect_timeout 30
 router_id {{ROUTEID}}
}

vrrp_instance haproxy_ha {
state {{STATEID}}
interface eth0
 virtual_router_id 36
priority {{PRIORITYID}}
 advert_int 1
authentication {
auth_type PASS
 auth_pass 1111
 }
 virtual_ipaddress {
 118.190.201.21
 }
}

4.3手动执行状态

 [root@linux-node1 ~]# salt '*' state.sls lb-outside.haproxy-outside-keepalived saltenv=prod
.......省略部分......
Summary
-------------
Succeeded: 15 (changed=2)
Failed: 0
-------------
Total states run: 15
###监听VIP
 [root@linux-node1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
 inet 127.0.0.1/8 scope host lo
 valid_lft forever preferred_lft forever
 inet6 ::1/128 scope host 
 valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
 link/ether 00:0c:29:c1:2f:12 brd ff:ff:ff:ff:ff:ff
 inet 118.190.201.11/24 brd 118.190.201.255 scope global eth0
 valid_lft forever preferred_lft forever
 inet 118.190.201.21/32 scope global eth0
 valid_lft forever preferred_lft forever
 inet6 fe80::20c:29ff:fec1:2f12/64 scope link 
 valid_lft forever preferred_lft forever
###停止node1节点上岛keepalived服务
 [root@linux-node1 ~]# /etc/init.d/keepalived stop
###VIP出现在node2上
 [root@linux-node2 ~]# ip add li
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
 inet 127.0.0.1/8 scope host lo
 valid_lft forever preferred_lft forever
 inet6 ::1/128 scope host 
 valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
 link/ether 00:0c:29:2c:94:43 brd ff:ff:ff:ff:ff:ff
 inet 118.190.201.12/24 brd 118.190.201.255 scope global eth0
 valid_lft forever preferred_lft forever
 inet 118.190.201.21/32 scope global eth0
 valid_lft forever preferred_lft forever
 inet6 fe80::20c:29ff:fe2c:9443/64 scope link 
 valid_lft forever preferred_lft forever

4.4redis服务状态文件

 [root@linux-node1 /srv/salt/prod/redis-cluster]# tree
.
├── master.sls
└── slave.sls

0 directories, 2 files
 [root@linux-node1 /srv/salt/prod/redis-cluster]# cat master.sls 
include:
  - modules.redis.init
 [root@linux-node1 /srv/salt/prod/redis-cluster]# cat slave.sls 
include:
  - modules.redis.init

slave_confing:
  cmd.run:
    - name: redis-cli -h 118.190.201.12 slaveof 118.190.201.11 6379
    - unless: redis-cli -h 118.190.201.12 info |grep role:slave
    - require:
      - service: redis-service

4.5编辑top_file文件

 [root@linux-node1 /srv/salt/base]# vim top.sls 
base:
  '*':
    - init.init-all

prod:
  'linux-node1.example.com':
    - shop-user.mysql-master
    - redis-cluster.master
    - shop-user.web
    - lb-outside.haproxy-outside
    - lb-outside.haproxy-outside-keepalived

  'linux-node2.example.com':
    - shop-user.mysql-slave
    - redis-cluster.slave
    - shop-user.web
    - lb-outside.haproxy-outside
    - lb-outside.haproxy-outside-keepalived
0
如无特殊说明,文章均为本站原创,转载请注明出处

该文章由 发布

这货来去如风,什么鬼都没留下!!!
发表我的评论

Hi,请填写昵称和邮箱!

取消评论
代码 贴图 加粗 链接 删除线 签到