[TOC]
软件安装
执行安装命令
apt install ansible
# 查看版本
ansible --version
# 结果
ansible [core 2.16.6]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0] (/usr/bin/python3)
jinja version = 3.0.3
libyaml = True
# 温馨提示:若装失败,提示`python3-crypto : Depends: python3 (< 3.9) but 3.10.6-1~22.04 is to be installed`,可使用下述命令先更新仓库地址,再执行安装命令
apt update
apt install software-properties-common
apt-add-repository --yes --update ppa:ansible/ansible
apt install ansible
目录说明
目录 | 备注 |
---|---|
/etc/ansible/ | 主目录 |
/etc/ansible/ansible.cfg | 配置文件,如公共配置,格式: [default] log_path=/var/log/ansible.log |
/etc/ansible/hosts | 默认的资源访问清单,即可通信主机列表,可通过ansible.cfg配置文件中的inventory属性更改 |
配置免密登录(可选)
下述步骤是使用root用户操作时的记录
# 如,在A机器上使用 ssh-keygen 命令生成id_rsa、id_rsa.pub
ssh-keygen
# 直接使用默认的,下一步即可
# 查看密匙
cat ~/.ssh/id_rsa.pub
# 在B机器上配置A机器生成的密匙
# 在需要免登录的集群上将上述步骤生成的id_rsa.pub的内容拷贝到authorized_keys文件中即可
vim ~/.ssh/authorized_keys
# 配置完毕即可在B机器上直接使用ssh命令免密登录A机器
ssh A
修改默认配置
# 新增日志存储文件
touch /etc/ansible/ansible.log
chmod 600 /etc/ansible/ansible.log
# 添加配置
vim /etc/ansible/ansible.cfg
# 在[defaults]下添加如下配置
# 配置日志路径
log_path=/etc/ansible/ansible.log
# 不检测主机密钥,默认为True,开启后连接本地的ssh会提示:Using a SSH password instead of a key is not possible because Host Key checkingis enabl
host_key_checking = False
# 这个可以控制,执行ansible-playbook时是否会自动默认弹出弹出密码.默认为no::
# 配置了免密登录,此选项可不用配置
ask_pass=True
配置Inventory
清单配置
cat >> /etc/ansible/hosts << \EOF
# 指定分组名称,如:common_hostname
[common_hostname]
k8s-master ansible_ssh_host=192.168.13.247 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-01 ansible_ssh_host=192.168.13.16 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-02 ansible_ssh_host=192.168.13.51 ansible_ssh_port=22 ansible_ssh_user=root EOF
配置参数说明
参数名称 | 备注 |
---|---|
ansible_ssh_host | 将要连接的远程主机名.与你想要设定的主机的别名不同的话,可通过此变量设置. |
ansible_ssh_port | ssh端口号.如果不是默认的端口号,通过此变量设置. |
ansible_ssh_user | 默认的 ssh 用户名 |
简单案例
若未配置免密登录,执行下述命令时需要添加–ask-pass参数并输入密码
查看资源主机是否可以ping通
# 可以简写为 ansible all -m ping,-m 默认值为command
ansible all -m ping
# 结果:
192.168.13.247 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
192.168.13.51 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
192.168.13.16 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
输出当前时间到目标主机
ansible all -m shell -a "date -R > /root/date.txt"
重启目标主机的redis服务
# state可选值:reloaded, restarted, started, stopped
ansible ip_group_test -m service -a "name=redis-7011 state=started"
playbook的简单使用
服务器初始化
修改主机名称、新增用户、配置IP和域名映射
编写playbook
mkdir -p /etc/ansible/playbook/
vim /etc/ansible/playbook/server-init.yaml
# begin
---
- hosts: common_hostname
vars:
hostnames:
- ip: 192.168.13.247
hostname: k8s-master
- ip: 192.168.13.16
hostname: k8s-worker-01
- ip: 192.168.13.51
hostname: k8s-worker-02
tasks:
- name: 设置主机名称
# inventory_hostname 为固定的变量名,为循环中在/etc/hosts中定义的资源主机名
shell: hostnamectl set-hostname "{{ inventory_hostname }}"
- name: 新建 test 用户
user:
name: test
home: /home/test
shell: /bin/bash
# 设置密码,此处需要使用加密函数
password: "{{ 'test' | password_hash }}"
- name: 判断用户是否存在 sudo 权限
shell: grep -c "test ALL=(ALL) ALL" /etc/sudoers
# 将结果存储在变量中
register: sudo_exist_flag
failed_when: sudo_exist_flag.rc == -1
- name: 不存在sudo权限时设置sudo权限
shell: sed -i '44 a test ALL=(ALL) ALL' /etc/sudoers
when: sudo_exist_flag.stdout == '0'
- name: 判断/etc/hosts中是否已经存在ip、域名配置
shell: grep -c "{{item.ip}} {{item.hostname}}" /etc/hosts
with_items: "{{hostnames}}"
register: hosts_exist_flag
failed_when: hosts_exist_flag.rc == -1
- name: 存储ip、域名配置是否存在判断结果
set_fact:
match_count: "{{hosts_exist_flag.results | json_query(query_ip_match_condition) | join('') }}"
vars:
query_ip_match_condition: "[?item.hostname=='{{inventory_hostname}}'].stdout"
- name: 【开始】添加IP、域名配置
shell: echo '\n# ip、域名配置 begin' >> /etc/hosts
when: match_count == "0"
- name: 正在配置ip、域名
shell: echo '{{item.ip}} {{item.hostname}}' >> /etc/hosts
when: match_count == "0"
with_items: "{{hostnames}}"
- name: 【结束】添加IP、域名配置
shell: echo '# ip、域名配置 end' >> /etc/hosts
when: match_count == "0"
# end
执行playbook
absible-playbook /etc/ansible/playbook/server-init.yaml
其他
密码加密参考:https://docs.ansible.com/ansible/latest/collections/ansible/builtin/password_hash_filter.html
playbook中import语句的使用
导入task
# 需要导入的 import-task-demo.yaml
---
- name: import task demo
shell: echo hello import task demo
# 需要引入task的文件 all-task.yaml,2个文件存放在平级目录
---
tasks:
- name: demo
service:
name: httpd
state: started
- import_tasks: import-task-demo.yaml
导入playbook
# 需要导入的 import-playbook-demo.yaml
---
tasks:
- name: import playbook demo
shell: echo hello import playbook demo
# 需要引入 playbook 的文件 all-playbook.yaml,2个文件存放在平级目录
---
tasks:
- name: demo
service:
name: httpd
state: started
- import_playbook: include-playbook-demo.yaml
playbook的使用
角色(Roles)说明
Roles 基于一个已知的文件结构,去自动的加载某些 vars_files,tasks 以及 handlers
详情见:Role详解
服务器初始化(设置主机名、分区、磁盘挂载、新建用户、配置hosts)
创建资源清单
cat >> /etc/ansible/hosts << \EOF
# 指定分组名称,如:server_init,根据实际情况调整
[server_init]
k8s-master ansible_ssh_host=192.168.13.247 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-01 ansible_ssh_host=192.168.13.16 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-02 ansible_ssh_host=192.168.13.51 ansible_ssh_port=22 ansible_ssh_user=root EOF
创建角色目录
mkdir -p /etc/ansible/roles/server-init/
cd /etc/ansible/roles/server-init/
mkdir tasks vars
创建变量
cat > /etc/ansible/roles/server-init/vars/main.yaml << \EOF
hostnames:
- ip: 192.168.13.247
hostname: k8s-master
# 是否使用默认配置,为false时将使用自定义的fstab配置
use_default_fstab: true
# 使用自定义文件系统属性(use_default_fstab=false)时下列值必须定义
fstab:
file_system: ''
mount_point: ''
type: ''
- ip: 192.168.13.16
hostname: k8s-worker-01
- ip: 192.168.13.51
hostname: k8s-worker-02
# 新建用户时用到的变量
add_user:
name: test
password: test
home: /home/test
shell: /bin/bash
# 是否允许使用sudo命令
enable_sudo: true
default:
# 默认的文件系统配置
fstab:
# 设备或文件系统源:可以是设备文件(如/dev/sda1)、卷标(如LABEL=root)、UUID(如UUID=123e4567-e89b-12d3-a456-426655440000)、网络文件系统路径(如nfs://server/export/path)等
file_system: /dev/vdb
# 挂载点,如:/home
mount_point: /mnt/home
# 设备上文件系统的类型,如ext4, xfs, ntfs, swap, nfs, vfat等
type: 'ext4'
EOF
创建磁盘分区任务
使用默认文件系统配置
cat > /etc/ansible/roles/server-init/tasks/disk-partition-default.yaml << \EOF
- name: 获得当前主机下配置的所有自定义变量
set_fact:
# 获取当前主机自定义的变量
current_host: "{{hostnames | json_query(query_condition) }}"
vars:
# 查询出的结果为一个json数组,取第一条即可
query_condition: "[?hostname=='{{inventory_hostname}}'] | [0]"
# 自定义系统文件配置时需要在指定hostnames下配置fstab属性
- fail:
msg: "使用默认文件系统配置时【default.fstab.file_system、default.fstab.mount_point、default.fstab.type】不能为空"
when: current_host.use_default_fstab == true and ((default.fstab.file_system | default('') | length == 0) or (default.fstab.mount_point | default('') | length == 0) or (default.fstab.type | default('') | length == 0))
- name: 查找挂载的磁盘目录
find:
paths: "{{default.fstab.mount_point}}"
register: valid_mount_point_result
when: current_host.use_default_fstab == true
- fail:
# 校验挂载的磁盘目录是否为空
msg: '需要挂载的目录:{{default.fstab.mount_point}} 不为空'
when: current_host.use_default_fstab == true and valid_mount_point_result.matched > 0
- name: 校验虚拟磁盘是否存在
stat:
path: "{{default.fstab.file_system}}"
register: valid_fstab_result
when: current_host.use_default_fstab == true
- fail:
msg: "需要挂载的文件系统:{{default.fstab.file_system}} 不存在"
when: current_host.use_default_fstab == true and valid_fstab_result.stat.exists == false
- name: 新建gpt分区
parted:
device: "{{default.fstab.file_system}}"
label: gpt
number: 1
state: present
part_type: primary
when: current_host.use_default_fstab == true
- name: 格式化分区
filesystem:
dev: "{{default.fstab.file_system + '1'}}"
fstype: "{{default.fstab.type}}"
when: current_host.use_default_fstab == true
- name: 挂载分区
mount:
# 默认取第一个分区的设备路径,和 新建gpt分区 中的number属性保持一致
src: "{{default.fstab.file_system + '1'}}"
path: "{{default.fstab.mount_point}}"
fstype: "{{default.fstab.type}}"
state: mounted
when: current_host.use_default_fstab == true
EOF
使用自定义文件系统配置
cat > /etc/ansible/roles/server-init/tasks/disk-partition-custom.yaml << \EOF
- name: 获得当前主机下配置的所有自定义变量
set_fact:
# 获取当前主机自定义的变量
current_host: "{{hostnames | json_query(query_condition) }}"
vars:
# 查询出的结果为一个json数组,取第一条即可
query_condition: "[?hostname=='{{inventory_hostname}}'] | [0]"
# 自定义系统文件配置时需要在指定hostnames下配置fstab属性
- fail:
msg: "使用自定义文件系统配置时【{{current_host.hostname}}】主机变量中【fstab.file_system、fstab.mount_point、fstab.type】不能为空"
when: current_host.use_default_fstab == false and ((current_host.fstab.file_system | default('') | length == 0) or (current_host.fstab.mount_point | default('') | length == 0) or (current_host.fstab.type | default('') | length == 0))
- name: 查找挂载的磁盘目录
find:
paths: "{{current_host.fstab.mount_point}}"
register: valid_mount_point_result
when: current_host.use_default_fstab == false
- fail:
# 校验挂载的磁盘目录是否为空
msg: '需要挂载的目录:{{current_host.fstab.mount_point}} 不为空'
when: current_host.use_default_fstab == false and valid_mount_point_result.matched > 0
- name: 校验虚拟磁盘是否存在
stat:
path: "{{current_host.fstab.file_system}}"
register: valid_fstab_result
when: current_host.use_default_fstab == false
- fail:
msg: "需要挂载的文件系统:{{current_host.fstab.file_system}} 不存在"
when: current_host.use_default_fstab == false and valid_fstab_result.stat.exists == false
- name: 新建gpt分区
parted:
device: "{{current_host.fstab.file_system}}"
label: gpt
number: 1
state: present
part_type: primary
when: current_host.use_default_fstab == false
- name: 格式化分区
filesystem:
dev: "{{current_host.fstab.file_system + '1'}}"
fstype: "{{current_host.fstab.type}}"
when: current_host.use_default_fstab == false
- name: 挂载分区
mount:
# 默认取第一个分区的设备路径,和 新建gpt分区 中的number属性保持一致
src: "{{current_host.fstab.file_system + '1'}}"
path: "{{current_host.fstab.mount_point}}"
fstype: "{{current_host.fstab.type}}"
state: mounted
when: current_host.use_default_fstab == false
EOF
创建设置主机名称任务
cat > /etc/ansible/roles/server-init/tasks/set-hostname.yaml << \EOF
- name: 设置主机名称
# inventory_hostname 为固定的变量名,为循环中在/etc/hosts中定义的别名,即第1列的值
shell: hostnamectl set-hostname "{{ inventory_hostname }}"
EOF
创建新建用户任务
cat > /etc/ansible/roles/server-init/tasks/add-user.yaml << \EOF
- name: 查询用户信息
getent:
database: passwd
register: user_facts
- name: 设置用户是否存在标识
vars:
query_user_exists_condition: "{{add_user.name}}"
set_fact:
user_exists_flag: "{{(ansible_facts.getent_passwd | json_query(add_user.name)) != None}}"
- name: 用户不存在时新增用户
user:
name: "{{add_user.name}}"
home: "{{add_user.home}}"
shell: "{{add_user.shell}}"
# 设置密码,此处需要使用加密函数
password: "{{ add_user.password | password_hash }}"
when: user_exists_flag == false
- name: 判断用户是否存在 sudo 权限
shell: grep -c "{{add_user.name}} ALL=(ALL) ALL" /etc/sudoers
# 将结果存储在变量中
register: sudo_exist_flag
failed_when: sudo_exist_flag.rc == -1
when: user_exists_flag == false
- name: 用户存在时设置sudo权限
shell: sed -i '44 a {{add_user.name}} ALL=(ALL) ALL' /etc/sudoers
when: user_exists_flag == false and add_user.enable_sudo == true and sudo_exist_flag.stdout == '0'
EOF
创建设置hosts任务
cat > /etc/ansible/roles/server-init/tasks/set-hosts.yaml << \EOF
- name: 判断/etc/hosts中是否已经存在ip、域名配置
shell: grep -c "{{item.ip}} {{item.hostname}}" /etc/hosts
with_items: "{{hostnames}}"
register: hosts_exist_flag
failed_when: hosts_exist_flag.rc == -1
- name: 存储ip、域名配置是否存在判断结果
set_fact:
match_count: "{{hosts_exist_flag.results | json_query(query_hostname_match_condition) | join('') }}"
vars:
query_hostname_match_condition: "[?item.hostname=='{{inventory_hostname}}'].stdout"
- name: 【开始】添加IP、域名配置
shell: echo '\n# ip、域名配置 begin' >> /etc/hosts
when: match_count == "0"
- name: 正在配置ip、域名
shell: echo '{{item.ip}} {{item.hostname}}' >> /etc/hosts
when: match_count == "0"
with_items: "{{hostnames}}"
- name: 【结束】添加IP、域名配置
shell: echo '# ip、域名配置 end' >> /etc/hosts
when: match_count == "0"
EOF
汇总任务
将上述任务添加至main.yaml中
cat > /etc/ansible/roles/server-init/tasks/main.yaml << \EOF
- import_tasks: disk-partition-default.yaml
- import_tasks: disk-partition-custom.yaml
- import_tasks: set-hostname.yaml
- import_tasks: add-user.yaml
- import_tasks: set-hosts.yaml
EOF
新建执行文件
cat > /etc/ansible/server-init.yaml << \EOF
---
- hosts: server_init
roles:
- server-init
EOF
执行脚本
ansible-playbook /etc/ansible/server-init.yaml
参考连接
安装docker、docker compose
创建资源清单
cat >> /etc/ansible/hosts << \EOF
# 指定分组名称,如:docker,根据实际情况调整
[docker]
k8s-master ansible_ssh_host=192.168.13.247 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-01 ansible_ssh_host=192.168.13.16 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-02 ansible_ssh_host=192.168.13.51 ansible_ssh_port=22 ansible_ssh_user=root EOF
创建角色目录
mkdir -p /etc/ansible/roles/docker/
cd /etc/ansible/roles/docker/
mkdir tasks vars files files/x86_64 files/aarch64
创建docker相关文件
docker 配置文件
cat > /etc/ansible/roles/docker/files/daemon.json << \EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries":["113.57.95.26:7030"],
"registry-mirrors": ["https://kn0t2bca.mirror.aliyuncs.com"],
"data-root":"/home/zhoujibin/docker/root-data/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "30m",
"max-file": "2"
}
}
EOF
docker 服务自启文件
cat > /etc/ansible/roles/docker/files/docker.service << \EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
#Requires=docker.socket containerd.service
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
docker相关下载地址
创建变量
cat > /etc/ansible/roles/docker/vars/main.yaml << \EOF
hostnames:
- ip: 192.168.13.247
hostname: k8s-master
# 安装docker同时是否安装docker-compose
install_docker_compose: true
- ip: 192.168.13.16
hostname: k8s-worker-01
# 安装docker同时是否安装docker-compose
install_docker_compose: false
- ip: 192.168.13.51
hostname: k8s-worker-02
# 安装docker同时是否安装docker-compose
install_docker_compose: false
# docker安装方式:本地/网络获取,优先级:本地>网络获取
install_docker_from:
# 从本地安装,ansible_facts.architecture变量示例:x86_64、aarch64
local: "/etc/ansible/roles/docker/files/{{ansible_facts.architecture}}/docker-20.10.9.tgz"
# docker安装方式:本地/网络获取,优先级:本地>网络获取
install_docker_compose_from:
# 从本地安装,ansible_facts.architecture变量示例:x86_64、aarch64
local: "/etc/ansible/roles/docker/files/{{ansible_facts.architecture}}/docker-compose-linux-x86_64"
# docker 自启文件的路径
docker_service_path: /etc/ansible/roles/docker/files/docker.service
# docker daemon.json 配置文件的路径
docker_daemon_path: /etc/ansible/roles/docker/files/daemon.json
EOF
创建安装任务
cat > /etc/ansible/roles/docker/tasks/main.yaml << \EOF
- name: 获得当前主机下配置的所有自定义变量
set_fact:
# 获取当前主机自定义的变量
current_host: "{{hostnames | json_query(query_condition) }}"
vars:
# 查询出的结果为一个json数组,取第一条即可
query_condition: "[?hostname=='{{inventory_hostname}}'] | [0]"
# 自定义系统文件配置时需要在指定hostnames下配置fstab属性
# 安装方式校验
- fail:
msg: "安装docker时需要指定安装方式,【install_docker_from.local】不能为空"
when:
- install_docker_from.local is undefined or install_docker_from.local == None or install_docker_from.local | length == 0
- name: 查看docker版本
shell: "docker --version"
register: docker_version_result
failed_when: docker_version_result.rc == -1
- name: 校验docker是否已经安装
fail:
msg: "docker已安装"
when: docker_version_result.rc == 0
- name: docker安装包目录是否存在,不存在自动创建
file:
path: "{{install_docker_from.local | dirname}}"
state: directory
when: not (install_docker_from.local is undefined or install_docker_from.local == None or install_docker_from.local | length == 0)
- name: 复制docker安装包到目标主机
copy:
src: "{{install_docker_from.local}}"
dest: "{{install_docker_from.local}}"
backup: yes
when: not (install_docker_from.local is undefined or install_docker_from.local == None or install_docker_from.local | length == 0)
- name: 解压docker安装包
shell:
cd {{install_docker_from.local | dirname }}
&& rm -rf docker
&& tar -zxvf {{install_docker_from.local}}
&& cp docker/* /usr/bin/
when: not (install_docker_from.local is undefined or install_docker_from.local == None or install_docker_from.local | length == 0)
- name: 复制自启文件到/etc/systemd/system目录
copy:
src: "{{docker_service_path}}"
dest: "/etc/systemd/system/{{docker_service_path | basename}}"
backup: yes
when: not (docker_service_path is undefined or docker_service_path == None or docker_service_path | length == 0)
- name: docker配置文件目录是否存在,不存在自动创建
file:
path: "/etc/docker/"
state: directory
when: not (docker_daemon_path is undefined or docker_daemon_path == None or docker_daemon_path | length == 0)
- name: 复制docker配置文件daemon.json至/etc/docker/目录
copy:
src: "{{docker_daemon_path}}"
dest: "/etc/docker/{{docker_daemon_path | basename}}"
backup: yes
when: not (docker_daemon_path is undefined or docker_daemon_path == None or docker_daemon_path | length == 0)
- name: 重载systemd并设置docker开机自启
shell: |
systemctl daemon-reload
systemctl enable docker
systemctl start docker
when: not (docker_service_path is undefined or docker_service_path == None or docker_service_path | length == 0)
- name: 查看安装成功的 docker 版本
shell: docker --version
register: installed_docker_version
changed_when: false
ignore_errors: true
# 确单跳过在Docker容器内运行此任务
when: ansible_virtualization_type != "docker"
- name: 查看docker-compose版本
shell: "docker-compose"
register: docker_compose_version_result
failed_when: docker_compose_version_result.rc == -1
when: current_host.install_docker_compose == true
- name: 校验 docker-compose 是否已经安装
fail:
msg: "docker-compose 已安装"
when:
- current_host.install_docker_compose == true
- docker_compose_version_result.rc == 0
- name: docker-compose 安装包目录是否存在,不存在自动创建
file:
path: "{{install_docker_compose_from.local | dirname}}"
state: directory
when:
- current_host.install_docker_compose == true
- not (install_docker_compose_from.local is undefined or install_docker_compose_from.local == None or install_docker_compose_from.local | length == 0)
- name: 复制 docker-compose 安装包到目标主机
copy:
src: "{{install_docker_compose_from.local}}"
dest: "{{install_docker_compose_from.local}}"
backup: yes
when:
- current_host.install_docker_compose == true
- not (install_docker_compose_from.local is undefined or install_docker_compose_from.local == None or install_docker_compose_from.local | length == 0)
- name: 安装 docker-compose
shell: |
cp {{install_docker_compose_from.local}} /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
when:
- current_host.install_docker_compose == true
- not (install_docker_compose_from.local is undefined or install_docker_compose_from.local == None or install_docker_compose_from.local | length == 0)
- name: 查看安装成功的 docker-compose 版本
shell: docker-compose version
register: installed_docker_compose_version
changed_when: false
ignore_errors: true
when: current_host.install_docker_compose == true
- name: 查看安装的 docker 版本
debug:
msg: "{{installed_docker_version.stdout}}"
- name: 查看安装的 docker-compose 版本
debug:
msg: "{{installed_docker_compose_version.stdout}}"
when: current_host.install_docker_compose == true
EOF
创建执行文件
cat > /etc/ansible/install-docker.yaml << \EOF
---
- hosts: docker
roles:
- docker
EOF
执行脚本
ansible-playbook /etc/ansible/install-docker.yaml
卸载docker、docker-compose命令(下述命令仅在测试时使用)
# 卸载docker参考:https://blog.csdn.net/qq_45495857/article/details/113743109
service docker stop
apt-get remove docker docker-engine docker.io containerd runc
apt-get autoremove docker*
rm -rf /usr/bin/docker
rm -rf /run/docker
# 卸载docker-compose
rm -rf /usr/local/bin/docker-compose
rm -rf /usr/bin/docker-compose
docker-compose安装nginx
创建资源清单
cat >> /etc/ansible/hosts << \EOF
# 指定分组名称,如:docker,根据实际情况调整
[nginx]
k8s-master ansible_ssh_host=192.168.13.247 ansible_ssh_port=22 ansible_ssh_user=root EOF
创建角色目录
mkdir -p /etc/ansible/roles/nginx/
cd /etc/ansible/roles/nginx/
mkdir tasks vars templates files
创建nginx启动时依赖的文件
mime.types
cat > /etc/ansible/roles/nginx/files/mime.types << \EOF
types {
text/html html htm shtml;
text/css css;
text/xml xml;
image/gif gif;
image/jpeg jpeg jpg;
application/javascript js;
application/atom+xml atom;
application/rss+xml rss;
text/mathml mml;
text/plain txt;
text/vnd.sun.j2me.app-descriptor jad;
text/vnd.wap.wml wml;
text/x-component htc;
image/png png;
image/svg+xml svg svgz;
image/tiff tif tiff;
image/vnd.wap.wbmp wbmp;
image/webp webp;
image/x-icon ico;
image/x-jng jng;
image/x-ms-bmp bmp;
font/woff woff;
font/woff2 woff2;
application/java-archive jar war ear;
application/json json;
application/mac-binhex40 hqx;
application/msword doc;
application/pdf pdf;
application/postscript ps eps ai;
application/rtf rtf;
application/vnd.apple.mpegurl m3u8;
application/vnd.google-earth.kml+xml kml;
application/vnd.google-earth.kmz kmz;
application/vnd.ms-excel xls;
application/vnd.ms-fontobject eot;
application/vnd.ms-powerpoint ppt;
application/vnd.oasis.opendocument.graphics odg;
application/vnd.oasis.opendocument.presentation odp;
application/vnd.oasis.opendocument.spreadsheet ods;
application/vnd.oasis.opendocument.text odt;
application/vnd.openxmlformats-officedocument.presentationml.presentation
pptx;
application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
xlsx;
application/vnd.openxmlformats-officedocument.wordprocessingml.document
docx;
application/vnd.wap.wmlc wmlc;
application/x-7z-compressed 7z;
application/x-cocoa cco;
application/x-java-archive-diff jardiff;
application/x-java-jnlp-file jnlp;
application/x-makeself run;
application/x-perl pl pm;
application/x-pilot prc pdb;
application/x-rar-compressed rar;
application/x-redhat-package-manager rpm;
application/x-sea sea;
application/x-shockwave-flash swf;
application/x-stuffit sit;
application/x-tcl tcl tk;
application/x-x509-ca-cert der pem crt;
application/x-xpinstall xpi;
application/xhtml+xml xhtml;
application/xspf+xml xspf;
application/zip zip;
application/octet-stream bin exe dll;
application/octet-stream deb;
application/octet-stream dmg;
application/octet-stream iso img;
application/octet-stream msi msp msm;
audio/midi mid midi kar;
audio/mpeg mp3;
audio/ogg ogg;
audio/x-m4a m4a;
audio/x-realaudio ra;
video/3gpp 3gpp 3gp;
video/mp2t ts;
video/mp4 mp4;
video/mpeg mpeg mpg;
video/quicktime mov;
video/webm webm;
video/x-flv flv;
video/x-m4v m4v;
video/x-mng mng;
video/x-ms-asf asx asf;
video/x-ms-wmv wmv;
video/x-msvideo avi;
}
EOF
创建nginx相关变量
cat > /etc/ansible/roles/nginx/vars/main.yaml << \EOF
nginx:
image: 113.57.95.26:7030/components/nginx
# 此处填写ngin配置文件中用到的host,格式:域名:ip,一般为:网关域名+ip
extra_hosts:
- "local.test:192.168.13.247"
# 下列版本在运行过程中只会选择一个,结果会存储在: target_compoment_version 变量中
component_version:
# amd架构下的nginx版本
x86_64: 1.18
# arm架构下的版本
arrch64: 1.18-arm64
# 存放nginx compose 模板文件的目录
template_compose_file_path: /etc/ansible/roles/nginx/templates/compose.yaml
# 存放渲染完毕的 nginx compose文件的目录
compose_file_dir: /home/zhoujibin/nginx
# nginx安装目录,默认:/etc/nginx
install_dir:
# 运行nginx的服务名称,默认为当前nginx所在的机器ip
server_name:
# nginx监听的端口,默认8080
listen: 9023
# 日志目录,默认:/etc/nginx/log
log_dir:
# 服务配置文件目录,默认:/etc/nginx/server
server_dir:
# stream 模块路径,默认:/etc/nginx/stream
stream_dir:
# 项目配置名称,默认:default-server.conf
project_config_file_name:
# 前端配置
frontend:
# 前端项目路径,默认:/etc/nginx/frontend
project_dir:
# 前端项目资源路径,默认:/etc/nginx/frontend/resource
project_resource_dir:
# 前端请求url前缀,默认/web/
url_prefix:
# 后端api请求前缀
backend:
# gateway网关域名
gateway: local.test
# 后端api请求前缀
api_prefix:
# 高德地图相关配置
web_map:
jscode: b770bd7bfd352f641450ad377bb1e434
url: https://restapi.amap.com/
# 政务地址配置
gov_map:
url:http://58.48.28.181:28888/ServiceAdapter/MAP/
# docker相关账户配置信息,用于登录镜像仓库
docker:
registry_url: 113.57.95.26:7030
username: docker
password: RzpwKwB1Fk
EOF
创建模板文件
nginx compose
cat > /etc/ansible/roles/nginx/templates/compose.yaml << \EOF
services:
nginx:
container_name: nginx-service
image: "{{nginx.image}}:{{target_compoment_version}}"
volumes:
- {{nginx.install_dir}}:/etc/nginx/
- {{nginx.log_dir}}:/var/log/nginx/
- {{nginx.frontend.project_dir}}:/home/project/frontend
- {{nginx.frontend.project_resource_dir}}:/home/project/frontend/resource
extra_hosts:
{% for host in nginx.extra_hosts %}
- "{{host}}"
{% endfor %}
restart: always
deploy:
resources:
limits:
memory: 1536M
reservations:
memory: 768M
network_mode: "host"
EOF
nginx.conf
cat > /etc/ansible/roles/nginx/templates/nginx.conf << \EOF
#user nobody;
worker_processes 4;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 10000;
}
http {
include mime.types;
default_type application/octet-stream;
set_real_ip_from 0.0.0.0/0;
real_ip_header x-forwarded-for;
real_ip_recursive on;
include {{ nginx.server_dir}}/*.conf;
client_max_body_size 50m;
client_body_buffer_size 50m;
# 自定义请求头
underscores_in_headers on;
# 其他配置xxx
server_tokens off;
# 关闭文件索引
autoindex off;
}
stream {
include {{ nginx.stream_dir}}/*.stream;
}
EOF
default-server.conf
# 默认的服务配置
cat > /etc/ansible/roles/nginx/templates/default-server.conf << \EOF
server {
listen {{nginx.listen}};
server_name {{nginx.server_name}};
charset utf-8;
index index.html;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_connect_timeout 180;
proxy_send_timeout 180;
proxy_read_timeout 180;
send_timeout 180;
gzip on;
gzip_types text/plain application/javascript application/json application/x-javascript text/css application/xml text/javascript application/x-httpd-php;
gzip_disable "MSIE [1-6]\.";
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_comp_level 5;
gzip_vary on;
# web前端工程访问代理
location ^~/{{nginx.frontend.url_prefix}} {
proxy_pass http://{{nginx.backend.gateway}}/{{nginx.frontend.url_prefix}};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# 后端API 接口代理
location ^~ /{{nginx.backend.api_prefix}}/ {
proxy_pass http://{{nginx.backend.gateway}}/{{nginx.backend.api_prefix}};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
# 前端静态项目地址
location / {
root {{nginx.frontend.project_dir}};
index index.html;
}
{% if nginx.web_map.jscode is defined and nginx.web_map.jscode
and nginx.web_map.url is defined and nginx.web_map.url %}
# Web服务API 代理
location /_AMapService/ {
set $args "$args&jscode={{nginx.web_map.jscode}}";
proxy_pass {{nginx.web_map.url}};
}
{% endif %}
{% if nginx.gov_map.url is defined and nginx.gov_map.url %}
# 电子政务地图
location ^~ /ServiceAdapter/MAP/ {
proxy_pass {{nginx.gov_map.url}};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
{% endif %}}
EOF
创建变量初始化任务
cat > /etc/ansible/roles/nginx/tasks/default-var-init.yaml << \EOF
# nginx安装目录,默认:/etc/nginx
- name: nginx 安装目录 为空时设置默认值
set_fact:
nginx: "{{ nginx | combine({'install_dir':'/etc/nginx'}) }}"
when: nginx.install_dir is undefined or nginx.install_dir == None or nginx.install_dir | length == 0
- name: 运行nginx的服务名称,默认为当前nginx所在的机器ip
set_fact:
nginx: "{{ nginx | combine({'server_name': ansible_default_ipv4['address']}) }}"
when: nginx.server_name is undefined or nginx.server_name == None or nginx.server_name | length == 0
- name: nginx监听的端口,默认8080
set_fact:
nginx: "{{ nginx | combine({'listen':'8080'}) }}"
when: nginx.listen is undefined or nginx.listen == None
- name: 日志目录,默认:/etc/nginx/log
set_fact:
nginx: "{{ nginx | combine({'log_dir':'/etc/nginx/log'}) }}"
when: nginx.log_dir is undefined or nginx.log_dir == None or nginx.log_dir | length == 0
- name: 服务配置文件目录,默认:/etc/nginx/server
set_fact:
nginx: "{{ nginx | combine({'server_dir':'/etc/nginx/server'}) }}"
when: nginx.server_dir is undefined or nginx.server_dir == None or nginx.server_dir | length == 0
- name: stream 模块路径,默认:/etc/nginx/stream
set_fact:
nginx: "{{ nginx | combine({'stream_dir':'/etc/nginx/stream'}) }}"
when: nginx.stream_dir is undefined or nginx.stream_dir == None or nginx.stream_dir | length == 0
- name: 前端项目路径,默认:/etc/nginx/frontend
set_fact:
nginx: "{{ nginx | combine({'frontend': {'project_dir': '/etc/nginx/frontend'}},recursive=True) }}"
when: nginx.frontend.project_dir is undefined or nginx.frontend.project_dir == None or nginx.frontend.project_dir | length == 0
- name: 前端项目资源路径,默认:/etc/nginx/frontend/resource
set_fact:
nginx: "{{ nginx | combine({'frontend': {'project_resource_dir': '/etc/nginx/frontend/resource'}},recursive=True) }}"
when: nginx.frontend.project_resource_dir is undefined or nginx.frontend.project_resource_dir == None or nginx.frontend.project_resource_dir | length == 0
- name: 前端请求url前缀,默认/web/
set_fact:
nginx: "{{ nginx | combine({'frontend': {'url_prefix': 'web'}} ,recursive=True) }}"
when: nginx.frontend.url_prefix is undefined or nginx.frontend.url_prefix == None or nginx.frontend.url_prefix | length == 0
- name: 后端api请求前缀,默认/api/
set_fact:
nginx: "{{ nginx | combine({'backend': {'api_prefix': 'api'}} ,recursive=True) }}"
when: nginx.backend.api_prefix is undefined or nginx.backend.api_prefix == None or nginx.backend.api_prefix | length == 0
- name: 项目配置名称,默认:default-server.conf
set_fact:
nginx: "{{ nginx | combine({'project_config_file_name': 'default-server.conf'}) }}"
when: nginx.project_config_file_name is undefined or nginx.project_config_file_name == None or nginx.project_config_file_name | length == 0
EOF
创建nginx安装任务
cat > /etc/ansible/roles/nginx/tasks/main.yaml << \EOF
# 安装方式校验
- fail:
msg: "安装nginx时需要指定镜像地址【nginx.image】及镜像版本【nginx.component_version】"
when:
- nginx.image is undefined or nginx.image == None or nginx.image | length == 0
- nginx.component_version.x86_64 is undefined or nginx.component_version.x86_64 == None or nginx.component_version.x86_64 | length == 0
- nginx.component_version.arrch64 is undefined or nginx.component_version.arrch64 == None or nginx.component_version.arrch64 | length == 0
- name: 查看 nginx 进程是否存在
# 此处使用docker ps的方式判断
shell: "docker ps | grep nginx"
register: nginx_process_result
failed_when: nginx_process_result.rc == -1
- name: 校验 nginx 是否已经安装
fail:
msg: "nginx已安装"
when: nginx_process_result.stdout_lines | length > 0
# 导入默认变量处理任务
- import_tasks: default-var-init.yaml
- name: nginx compose文件存放目录是否存在,不存在自动创建
file:
path: "{{nginx.compose_file_dir}}"
state: directory
when: not (nginx.compose_file_dir is undefined or nginx.compose_file_dir == None or nginx.compose_file_dir | length == 0)
- name: 根据 nginx compose 模板生成文件并将文件到目标主机
template:
src: "{{nginx.template_compose_file_path}}"
dest: "{{nginx.compose_file_dir}}/{{nginx.template_compose_file_path | basename}}"
backup: yes
vars:
target_compoment_version: "{{nginx.component_version[ansible_facts.architecture]}}"
when: not (nginx.template_compose_file_path is undefined or nginx.template_compose_file_path == None or nginx.template_compose_file_path | length == 0)
- name: nginx 安装目录是否存在,不存在自动创建
file:
path: "{{nginx.install_dir}}"
state: directory
when: not (nginx.install_dir is undefined or nginx.install_dir == None or nginx.install_dir | length == 0)
- name: nginx server目录是否存在,不存在自动创建
file:
path: "{{nginx.server_dir}}"
state: directory
when: not (nginx.server_dir is undefined or nginx.server_dir == None or nginx.server_dir | length == 0)
- name: 根据 nginx server 模板生成nginx.conf并将文件到目标主机
template:
src: "nginx.conf"
dest: "{{nginx.install_dir}}/nginx.conf"
backup: yes
- name: 复制 mime.types 到目标机器
copy:
src: "mime.types"
dest: "{{nginx.install_dir}}/mime.types"
backup: yes
- name: 根据 nginx server 模板生成默认项目配置并将文件到目标主机
template:
src: "default-server.conf"
dest: "{{nginx.server_dir}}/{{nginx.project_config_file_name}}"
backup: yes
- name: nginx stream 目录是否存在,不存在自动创建
file:
path: "{{nginx.stream}}"
state: directory
when: not (nginx.stream is undefined or nginx.stream == None or nginx.stream | length == 0)
- name: 登录 docker
shell: "docker login -u {{ docker.username }} -p {{ docker.password }} {{ docker.registry_url }}"
when:
- not (docker.registry_url is undefined or docker.registry_url == None or docker.registry_url | length == 0)
- not (docker.username is undefined or docker.username == None or docker.username | length == 0)
- not (docker.password is undefined or docker.password == None or docker.password | length == 0)
- name: 启动 nginx
shell: |
cd "{{nginx.compose_file_dir}}"
docker-compose up -d
EOF
新建nginx安装执行文件
cat > /etc/ansible/install-nginx.yaml << \EOF
---
- hosts: nginx
roles:
- nginx
EOF
执行脚本
ansible-playbook /etc/ansible/install-nginx.yaml
卸载nginx命令(下述命令仅在测试时使用)
docker stop nginx-service
rm -rf /etc/nginx
rm -rf /home/zhoujibin/nginx
安装jdk
创建资源清单
cat >> /etc/ansible/hosts << \EOF
# 指定分组名称,如:docker,根据实际情况调整
[jdk]
k8s-master ansible_ssh_host=192.168.13.247 ansible_ssh_port=22 ansible_ssh_user=root EOF
创建角色目录
mkdir -p /etc/ansible/roles/jdk/
cd /etc/ansible/roles/jdk/
mkdir tasks vars files files/x86_64 files/aarch64
创建变量
cat > /etc/ansible/roles/jdk/vars/main.yaml << \EOF
# docker安装方式:本地/网络获取,优先级:本地>网络获取
install_jdk_from:
# 从本地安装,ansible_facts.architecture变量示例:x86_64、aarch64
local: "/etc/ansible/roles/jdk/files/{{ansible_facts.architecture}}/jdk-1.8.tar.gz"
# jdk的安装目录
java_home: /usr/lib/jvm
# 已存在jdk时是否继续安装
continue_if_jdk_exists: true
# 解压后的jdk文件夹名称,默认和压缩包名称一致
uncompress_jdk_folder_name: jdk-1.8
EOF
创建安装任务
cat > /etc/ansible/roles/jdk/tasks/main.yaml << \EOF
# 安装方式校验
- fail:
msg: "安装jdk时需要指定安装方式,【install_jdk_from.local】不能为空"
when:
- install_jdk_from.local is undefined or install_jdk_from.local == None or install_jdk_from.local | length == 0
- name: 查看jdk版本
shell: "jdk --version"
register: jdk_version_result
failed_when: jdk_version_result.rc == -1
- name: 校验docker是否已经安装
fail:
msg: "jdk已安装"
when:
- continue_if_jdk_exists == false
- jdk_version_result.rc == 0
- name: jdk 安装包目录是否存在,不存在自动创建
file:
path: "{{install_jdk_from.local | dirname}}"
state: directory
when: not (install_jdk_from.local is undefined or install_jdk_from.local == None or install_jdk_from.local | length == 0)
- name: jdk 安装目录是否存在,不存在自动创建
file:
path: "{{java_home}}"
state: directory
when: not (java_home is undefined or java_home == None or java_home | length == 0)
- name: 复制 jdk 安装包到目标主机
copy:
src: "{{install_jdk_from.local}}"
dest: "{{install_jdk_from.local}}"
backup: yes
when: not (install_jdk_from.local is undefined or install_jdk_from.local == None or install_jdk_from.local | length == 0)
- name: 解压 jdk 到指定目录
shell: |
tar -zxvf "{{install_jdk_from.local}}" -C "{{java_home}}"
when:
- not (install_jdk_from.local is undefined or install_jdk_from.local == None or install_jdk_from.local | length == 0)
- not (java_home is undefined or java_home == None or java_home | length == 0)
- name: 设置 JAVA_HOME
lineinfile:
path: /etc/profile
line: "export JAVA_HOME={{java_home}}/{{uncompress_jdk_folder_name}}"
state: present
insertafter: EOF
when:
- not (java_home is undefined or java_home == None or java_home | length == 0)
- not (uncompress_jdk_folder_name is undefined or uncompress_jdk_folder_name == None or uncompress_jdk_folder_name | length == 0)
- name: 设置 JRE_HOME
lineinfile:
path: /etc/profile
line: "export JRE_HOME=$JAVA_HOME/jre"
state: present
insertafter: EOF
- name: 设置 CLASSPATH
lineinfile:
path: /etc/profile
line: "export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib"
state: present
insertafter: EOF
- name: 设置 PATH
lineinfile:
path: /etc/profile
line: "export PATH=$JAVA_HOME/bin:$PATH"
state: present
insertafter: EOF
- name: 查看 安装成功的 jdk版本
shell:
cmd: |
source /etc/profile
java -version
executable: /bin/bash
args:
executable: /bin/bash
register: install_jdk_version_result
failed_when: install_jdk_version_result.rc == -1
- name: 打印安装成功的 jdk 版本
debug:
msg: "{{install_jdk_version_result.stderr_lines}}"
EOF
创建执行文件
cat > /etc/ansible/install-jdk.yaml << \EOF
---
- hosts: jdk
roles:
- jdk
EOF
执行脚本
ansible-playbook /etc/ansible/install-jdk.yaml
安装kafka
创建资源清单
cat >> /etc/ansible/hosts << \EOF
# 指定分组名称,如:kafka,根据实际情况调整
[kafka]
k8s-master ansible_ssh_host=192.168.13.247 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-01 ansible_ssh_host=192.168.13.16 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-02 ansible_ssh_host=192.168.13.51 ansible_ssh_port=22 ansible_ssh_user=root EOF
创建角色目录
mkdir -p /etc/ansible/roles/kafka/
cd /etc/ansible/roles/kafka/
mkdir tasks vars files templates
创建变量
cat > /etc/ansible/roles/kafka/vars/main.yaml << \EOF
# 集群安装时需要配置多台机器
hostnames:
- ip: 192.168.13.247
hostname: k8s-master
zookeeper:
# 集群环境中每台机器上对应的zookeeper机器标识
myid: 1
# zooKeeper监听客户端连接的端口号,默认:2181
client_port:
# 数据同步端口,默认:2888
data_sync_port:
# 选举端口,默认:3888
elect_port:
kafka:
# 缓存代理,Kafka集群中的一台或多台服务器统称broker.
broke_id: 0
# kafka监听的端口,默认:9092
listener_port:
# kafka关联的所有zookeeper的ip:端口配置
zookeeper_list:
# 默认:9999
jmx_port:
- ip: 192.168.13.16
hostname: k8s-worker-01
zookeeper:
# 集群环境中每台机器上对应的zookeeper机器标识
myid: 2
# zooKeeper监听客户端连接的端口号,默认:2181
client_port:
# 数据同步端口,默认:2888
data_sync_port:
# 选举端口,默认:3888
elect_port:
kafka:
# 缓存代理,Kafka集群中的一台或多台服务器统称broker.
broke_id: 1
# kafka监听的端口,默认:9092
listener_port:
# kafka关联的所有zookeeper的ip:端口配置
zookeeper_list:
# 默认:9999
jmx_port:
- ip: 192.168.13.51
hostname: k8s-worker-02
zookeeper:
# 集群环境中每台机器上对应的zookeeper机器标识
myid: 3
# zooKeeper监听客户端连接的端口号,默认:2181
client_port:
# 数据同步端口,默认:2888
data_sync_port:
# 选举端口,默认:3888
elect_port:
kafka:
# 缓存代理,Kafka集群中的一台或多台服务器统称broker.
broke_id: 2
# kafka监听的端口,默认:9092
listener_port:
# kafka关联的所有zookeeper的ip:端口配置
zookeeper_list:
# 默认:9999
jmx_port:
kafka:
# 安装类型:单机-standalone,集群-cluster,集群环境要求至少3台机器
install_type: cluster
install_from:
# 从本地安装,需要指定具体的安装包路径
local: /etc/ansible/roles/kafka/files/kafka_2.13-3.2.1.tgz
# kafka安装目录
install_dir: /home/zhoujibin/
# kafka解压后的文件夹名称,一般和压缩包名称一致
uncompress_folder_name: kafka_2.13-3.2.1
# 存储偏移量数据文件,默认:/tmp/connect.offsets
offset_storage_file_filename: /home/zhoujibin/kafka_2.13-3.2.1/connect.offsets
# 日志存储目录,默认放在安装目录下的logs目录
log_dirs: /home/zhoujibin/kafka_2.13-3.2.1/kafka-logs
# 监听方式:域名-domain(默认),ip-ip
listen_type:
zookeeper:
# 配置后将不再使用kafka内置的zookeeper
install_from:
# 从本地安装,需要指定具体的安装包路径
local: /etc/ansible/roles/kafka/files/apache-zookeeper-3.6.3-bin.tar.gz
# zookeeper安装目录
install_dir: /home/zhoujibin/
# zookeeper 解压后的文件夹名称,一般和压缩包名称一致
uncompress_folder_name: apache-zookeeper-3.6.3-bin
# zookeeper 时间间隔基本单位,即“滴答”(tick)的长度,单位是毫秒,默认:2000
tick_time:
# 初始化同步阶段的限制,默认:10
init_limit:
# 同步操作的限制,同样以滴答数计,默认:5
sync_limit:
# 数据存储目录,默认:/tmp/zookeeper
data_dir: /home/zhoujibin/apache-zookeeper-3.6.3-bin/data
# efak相关配置
efak:
install_from:
# 从本地安装,需要指定具体的安装包路径
local: /etc/ansible/roles/kafka/files/efak-web-3.0.1.tar.gz
# 安装到那台机器上,对应 hostnames 中的ip属性,默认取 hostnames 数组中的第一个ip
install_to:
# efak 安装目录
install_dir: /home/zhoujibin
# efak 解压后的文件夹名称,一般和压缩包名称一致
uncompress_folder_name: efak-web-3.0.1
# 数据存储相关配置,目前官方仅支持 sqlite 和 mysql
driver: org.sqlite.JDBC
url: jdbc:sqlite:/home/zhoujibin/efak-web-3.0.1/db/ke.db
username: root
password: roo@123456
# web ui 的端口,默认:8048
webui:
port:
# web ui执行操作(如:删除totic)使用的token,默认keadmin
topic:
token:
EOF
创建模板文件
使用kafka内置zookeeper时的配置文件
cat > /etc/ansible/roles/kafka/templates/zookeeper-zookeeper.properties << \EOF
# 这个参数指定了ZooKeeper存储快照和事务日志的本地目录。快照是ZooKeeper状态的定期备份,用于快速恢复。dataDir应当指向一个具有足够空间且持久化的存储位置。注意示例中的/tmp仅用于演示,实际生产环境中不建议使用/tmp,因为它可能随系统重启而被清空
dataDir={{zookeeper.data_dir}}
# 这是ZooKeeper监听客户端连接的端口号。客户端,包括ZooKeeper应用和服务,会通过这个端口与ZooKeeper集群通信。在本例中,这个端口是2181。确保这个端口在防火墙规则中被正确开放,且没有其他服务占用
clientPort={{current_host.zookeeper.client_port}}
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# Disable the adminserver by default to avoid port conflicts.
# Set the port to something non-conflicting if choosing to enable this
admin.enableServer=false
EOF
zookeeper集群配置文件
cat > /etc/ansible/roles/kafka/templates/zookeeper-zoo.cfg << \EOF
# 这个参数定义了zooKeeper中基本的时间单位,即“滴答”(tick)的长度,单位是毫秒。在这个例子中,每个滴答持续2000毫秒。ZooKeeper使用这个时间间隔来衡量事件和执行周期性操作,如心跳检测、选举超时计算等
tickTime={{zookeeper.tick_time}}
# 初始化同步阶段的限制,以滴答数计。这个参数定义了在选举一个新的领导者期间,跟随者连接到领导者并同步初始数据集的最大时间,以滴答为单位。在这个配置中,跟随者有10个滴答(即tickTime乘以initLimit,这里是20000毫秒)的时间来完成与领导者同步
initLimit={{zookeeper.init_limit}}
# 同步操作的限制,同样以滴答数计。这个参数设置了ZooKeeper服务器之间发送请求和收到响应(确认)之间能容忍的最大等待滴答数。如果超过这个限制还未收到确认,请求将被认定为失败。在这个例子中,同步操作最多可以等待5个滴答(10000毫秒)没有响应。
syncLimit={{zookeeper.sync_limit}}
# 这个参数指定了zooKeeper存储快照和事务日志的本地目录。快照是ZooKeeper状态的定期备份,用于快速恢复。dataDir应当指向一个具有足够空间且持久化的存储位置。注意示例中的/tmp仅用于演示,实际生产环境中不建议使用/tmp,因为它可能随系统重启而被清空
dataDir={{zookeeper.data_dir}}
# 这是zooKeeper监听客户端连接的端口号。客户端,包括ZooKeeper应用和服务,会通过这个端口与ZooKeeper集群通信。在本例中,这个端口是2181。确保这个端口在防火墙规则中被正确开放,且没有其他服务占用
clientPort={{current_host.zookeeper.client_port}}
{% if kafka.install_type =='cluster' %}
# 数字1/2/3需要与zookeeper中定义的myid文件一致。右边两个端口,2888表示数据同步和通信端口;3888表示选举端口
{% for host in hostnames -%}
server.{{host.zookeeper.myid}}={{host.ip}}:{{host.zookeeper.data_sync_port | string }}:{{host.zookeeper.elect_port | string }}
{% endfor -%}
{% endif %}
EOF
zookeeper集群标识
cat > /etc/ansible/roles/kafka/templates/zookeeper-myid << \EOF
{{current_host.zookeeper.myid}}
EOF
kafka配置文件
cat > /etc/ansible/roles/kafka/templates/kafka-server.properties << \EOF
broker.id={{current_host.kafka.broke_id}}
# kafka监听的地址
listeners=PLAINTEXT://:{{current_host.kafka.listener_port}}
{% if kafka.listen_type == 'domain' %}
advertised.listeners=PLAINTEXT://{{current_host.hostname}}:{{current_host.kafka.listener_port}}
{% endif %}
{% if kafka.listen_type == 'ip' %}
advertised.listeners=PLAINTEXT://{{current_host.ip}}:{{current_host.kafka.listener_port}}
{% endif %}
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
# 日志存储目录
log.dirs={{kafka.log_dirs}}
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
# 此处配置所有的zookeeper地址,格式:ip1:port1,ip2:port2
zookeeper.connect={{current_host.kafka.zookeeper_list}}
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
EOF
efak配置文件
cat > /etc/ansible/roles/kafka/templates/efak-system-config.properties << \EOF
######################################
# multi zookeeper & kafka cluster list
# Settings prefixed with 'kafka.eagle.' will be deprecated, use 'efak.' instead
######################################
efak.zk.cluster.alias=cluster1
cluster1.zk.list={{current_host.kafka.zookeeper_list}}
######################################
# broker size online list
######################################
cluster1.efak.broker.size=20
######################################
# zk client thread limit
######################################
kafka.zk.limit.size=16
######################################
# EFAK webui port
######################################
efak.webui.port={{efak.webui.port}}
######################################
# kafka offset storage
######################################
cluster1.efak.offset.storage=kafka
######################################
# kafka jmx uri
######################################
cluster1.efak.jmx.uri=service:jmx:rmi:///jndi/rmi://%s/jmxrmi
######################################
# kafka sql topic records max
######################################
efak.sql.topic.records.max=5000
efak.sql.topic.preview.records.max=10
######################################
# delete kafka topic token
######################################
efak.topic.token={{efak.topic.token}}
######################################
# kafka mysql jdbc driver address
######################################
efak.driver={{efak.driver}}
efak.url={{efak.url}}
efak.username={{efak.username}}
efak.password={{efak.password}}
EOF
创建变量默认值初始化任务
cat > /etc/ansible/roles/kafka/tasks/default-var-init.yaml << \EOF
- name: 初始化 kafka 中 监听类型配置
set_fact:
kafka: "{{ kafka | combine({'listen_type': 'domain'}) }}"
when: kafka.listen_type is undefined or kafka.listen_type == None or kafka.listen_type | length == 0
- name: 初始化 kafka 中 日志目录配置
set_fact:
kafka: "{{ kafka | combine({'log_dirs': '/tmp/kafka-logs'}) }}"
when: kafka.log_dirs is undefined or kafka.log_dirs == None or kafka.log_dirs | length == 0
- name: 初始化 kafka 中 存储偏移量数据文件 配置
set_fact:
kafka: "{{ kafka | combine({'offset_storage_file_filename':'/tmp/connect.offsets'}) }}"
when: kafka.offset_storage_file_filename is undefined or kafka.offset_storage_file_filename == None or kafka.offset_storage_file_filename | length == 0
- name: 初始化 zooKeeper 中 基本的时间单位 配置
set_fact:
zookeeper: "{{ zookeeper | combine({'tick_time':'2000'}) }}"
when: zookeeper.tick_time is undefined or zookeeper.tick_time == None or zookeeper.tick_time | length == 0
- name: 初始化 zooKeeper 中 初始化同步阶段的限制 配置
set_fact:
zookeeper: "{{ zookeeper | combine({'init_limit':'10'}) }}"
when: zookeeper.init_limit is undefined or zookeeper.init_limit == None or zookeeper.init_limit | length == 0
- name: 初始化 zooKeeper 中 同步操作的限制 配置
set_fact:
zookeeper: "{{ zookeeper | combine({'sync_limit':'5'}) }}"
when: zookeeper.sync_limit is undefined or zookeeper.sync_limit == None or zookeeper.sync_limit | length == 0
- name: 初始化 zooKeeper 中 数据目录 配置
set_fact:
zookeeper: "{{ zookeeper | combine({'data_dir':'/tmp/zookeeper'}) }}"
when: zookeeper.data_dir is undefined or zookeeper.data_dir == None or zookeeper.data_dir | length == 0
- name: 设置默认的 zookeeper 监听端口变量
set_fact:
default_zookeeper_port: "2181"
- name: 获取定义的所有zookeeper主机信息
set_fact:
temp_zookeeper: "{{hostnames | json_query('[].{domain:hostname,ip:ip,port:zookeeper.client_port | string}') }}"
- name: 拼接 zookeeper 主机信息:ip:port
set_fact:
temp_zookeeper_list: |-
{% for zookeeper in temp_zookeeper -%}
{% if zookeeper.port == None or zookeeper.port == '' %}
{{ zookeeper.ip }}:{{default_zookeeper_port}}
{% else %}
{{ zookeeper.ip }}:{{ zookeeper.port }}
{% endif %}
{% endfor -%}
loop: "{{temp_zookeeper}}"
when: kafka.listen_type == 'ip'
- name: 拼接 zookeeper 主机信息:域名:port
set_fact:
temp_zookeeper_list: |-
{% for zookeeper in temp_zookeeper -%}
{% if zookeeper.port == None or zookeeper.port == '' %}
{{ zookeeper.domain }}:{{default_zookeeper_port}}
{% else %}
{{ zookeeper.domain }}:{{ zookeeper.port }}
{% endif %}
{% endfor -%}
loop: "{{temp_zookeeper}}"
when: kafka.listen_type == 'domain'
- name: 实例化hostnames 默认属性
set_fact:
temp_hostnames: >-
{{
temp_hostnames | default([]) +
[
item | combine(
{
"kafka":{
"listener_port": "9092" if (item.kafka.listener_port is undefined or item.kafka.listener_port == None or not item.kafka.listener_port) else item.kafka.listener_port | string
,"jmx_port": "9999" if (item.kafka.jmx_port is undefined or item.kafka.jmx_port == None or not item.kafka.jmx_port) else item.kafka.jmx_port | string
,"zookeeper_list": "{{temp_zookeeper_list | trim | regex_replace('\s+',',') }}" if (item.kafka.zookeeper_list is undefined or item.kafka.zookeeper_list == None or not item.kafka.zookeeper_list) else item.kafka.zookeeper_list
}
,"zookeeper":{
"client_port": "{{default_zookeeper_port}}" if item.zookeeper.client_port is undefined or item.zookeeper.client_port == None or not item.zookeeper.client_port else item.zookeeper.client_port | string
,"data_sync_port": "2888" if item.zookeeper.data_sync_port is undefined or item.zookeeper.data_sync_port == None or not item.zookeeper.data_sync_port else item.zookeeper.data_sync_port | string
,"elect_port": "3888" if item.zookeeper.elect_port is undefined or item.zookeeper.elect_port == None or not item.zookeeper.elect_port else item.zookeeper.elect_port | string
}
}
,recursive=True)
]
}}
loop: "{{ hostnames }}"
vars:
temp_hostnames: []
- name: 重新给hostnames属性赋值
set_fact:
hostnames: "{{temp_hostnames}}"
- name: efak 安装机器 ip 指定,默认:hostnames数组中的第一个机器的ip
set_fact:
efak: "{{ efak | combine({'install_to': '{{hostnames[0].ip}}'})}}"
when: efak.install_to is undefined or efak.install_to == None or efak.install_to | length == 0
- name: efak web ui 端口指定,为空时默认:8048
set_fact:
efak: "{{ efak | combine({'webui': {'port': '8048'}},recursive=True) }}"
when: efak.webui.port is undefined or efak.webui.port == None or efak.webui.port | length == 0
- name: 执行操作(如:删除totic)使用的token 指定,为空时默认:keadmin
set_fact:
efak: "{{ efak | combine({'topic': {'token': 'keadmin'}},recursive=True) }}"
when: efak.topic.token is undefined or efak.topic.token == None or efak.topic.token | length == 0
EOF
创建安装任务
主机、域名设置任务
cat > /etc/ansible/roles/kafka/tasks/set-hosts.yaml << \EOF
- name: 判断/etc/hosts中是否已经存在ip、域名配置
shell: grep -c "{{item.ip}} {{item.hostname}}" /etc/hosts
with_items: "{{hostnames}}"
register: hosts_exist_flag
failed_when: hosts_exist_flag.rc == -1
- name: 存储ip、域名配置是否存在判断结果
set_fact:
match_count: "{{hosts_exist_flag.results | json_query(query_hostname_match_condition) | join('') }}"
vars:
query_hostname_match_condition: "[?item.hostname=='{{inventory_hostname}}'].stdout"
- name: 【开始】添加 kafka ip、域名配置
shell: echo '# kafka ip、域名配置 begin' >> /etc/hosts
when:
- match_count == "0"
- kafka.listen_type == 'domain'
- name: 正在配置 kafka ip、域名
shell: echo '{{item.ip}} {{item.hostname}}' >> /etc/hosts
when: match_count == "0"
with_items: "{{hostnames}}"
when: kafka.listen_type == 'domain'
- name: 【结束】添加 kafka ip、域名配置
shell: echo '# kafka ip、域名配置 end' >> /etc/hosts
when:
- match_count == "0"
- kafka.listen_type == 'domain'
EOF
创建zookeeper安装任务
cat > /etc/ansible/roles/kafka/tasks/install-zookeeper.yaml << \EOF
- name: 判断 zookeeper 安装包目录是否存在,不存在自动创建
file:
path: "{{zookeeper.install_from.local | dirname}}"
state: directory
when: not (zookeeper.install_from.local is undefined or zookeeper.install_from.local == None or zookeeper.install_from.local | length == 0)
- name: 复制 zookeeper 安装包到目标主机
copy:
src: "{{zookeeper.install_from.local}}"
dest: "{{zookeeper.install_from.local}}"
backup: yes
when: not (zookeeper.install_from.local is undefined or zookeeper.install_from.local == None or zookeeper.install_from.local | length == 0)
- name: 判断 zookeeper 安装目录是否存在,不存在自动创建
file:
path: "{{zookeeper.install_dir}}"
state: directory
when: not (zookeeper.install_dir is undefined or zookeeper.install_dir == None or zookeeper.install_dir | length == 0)
- name: 判断 zookeeper 数据目录是否存在,不存在自动创建
file:
path: "{{zookeeper.data_dir}}"
state: directory
when: not (zookeeper.data_dir is undefined or zookeeper.data_dir == None or zookeeper.data_dir | length == 0)
- name: 集群环境下写入内容至 zookeeker 的 myid 文件
template:
src: "zookeeper-myid"
dest: "{{zookeeper.data_dir}}/myid"
backup: yes
when:
- kafka.install_type =='cluster'
- not (zookeeper.data_dir is undefined or zookeeper.data_dir == None or zookeeper.data_dir | length == 0)
- name: 解压自定义的 zookeeper 文件
shell:
cd {{zookeeper.install_from.local | dirname }}
&& tar -zxvf {{zookeeper.install_from.local}} -C {{zookeeper.install_dir}}
when: not (zookeeper.install_from.local is undefined or zookeeper.install_from.local == None or zookeeper.install_from.local | length == 0)
- name: 使用【非内置】的 zookeeper 配置模板生成默认配置并将文件到目标主机
template:
src: "zookeeper-zoo.cfg"
dest: "{{zookeeper.install_dir}}/{{zookeeper.uncompress_folder_name}}/conf/zoo.cfg"
backup: yes
when: not (zookeeper.install_from.local is undefined or zookeeper.install_from.local == None or zookeeper.install_from.local | length == 0)
- name: 使用【内置】的 zookeeper 配置模板生成默认配置并将文件到目标主机
template:
src: "zookeeper-zookeeper.properties"
dest: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/zookeeper.properties"
backup: yes
when: not (kafka.install_dir is undefined or kafka.install_dir == None or kafka.install_dir | length == 0)
EOF
创建端口及启动脚本默认配置修改任务
此任务主要用于修改启动脚本、kafka、zookeeper默认端口配置信息。包含如下修改点:
文件地址 | 改动点 | 备注 |
---|---|---|
kafka安装目录/bin/kafka-run-class.sh | 删除-XX:+UseG1GC、新增jmx配置 | 不修改kafka无法启动,无法看到监控的jmx数据 |
kafka安装目录/bin/kafka-server-start.sh | # 30行左右添加JMX的端口监听配置 JMX_PORT=”9999″ | |
connect-distributed.properties | bootstrap.servers改为当前kafka对应的ip和端口 | |
producer.properties | bootstrap.servers改为当前kafka对应的ip和端口 | |
connect-standalone.properties | bootstrap.servers改为当前kafka对应的ip和端口 | |
consumer.properties | bootstrap.servers改为当前kafka对应的ip和端口 |
cat > /etc/ansible/roles/kafka/tasks/update-kafka-port-config.yaml << \EOF
- name: 修改 kafka-run-class.sh 启动脚本配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/bin/kafka-run-class.sh"
regexp: '-XX:\+UseG1GC'
replace: ""
- name: 修改 kafka-run-class.sh 启动脚本监听方式(ip)配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/bin/kafka-run-class.sh"
regexp: '-Djava\.awt\.headless=true'
replace: "{{'-Djava.awt.headless=true -Djava.net.preferIPv4Stack=true -Dcom.sun.management.jmxremote.host=0.0.0.0 -Dcom.sun.management.jmxremote.local.only=false -Djava.rmi.server.hostname=' + current_host.ip}}"
when: kafka.listen_type == 'ip'
- name: 修改 kafka-run-class.sh 启动脚本监听方式(domain)配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/bin/kafka-run-class.sh"
regexp: '-Djava\.awt\.headless=true'
replace: "{{'-Djava.awt.headless=true -Djava.net.preferIPv4Stack=true -Dcom.sun.management.jmxremote.host=0.0.0.0 -Dcom.sun.management.jmxremote.local.only=false -Djava.rmi.server.hostname=' + current_host.hostname}}"
when: kafka.listen_type == 'domain'
- name: 判断 kafka kafka-server-start.sh 脚本中是否设置了 JMX_PORT
shell: "grep -c 'export JMX_PORT=\"{current_host.kafka.jmx_port}}\"' {{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/bin/kafka-server-start.sh"
# 将结果存储在变量中
register: jxm_port_exist_flag
failed_when: jxm_port_exist_flag.rc == -1
- name: 添加 JMX_PORT 配置
shell: "sed -i '29 a export JMX_PORT=\"{{current_host.kafka.jmx_port}}\"' {{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/bin/kafka-server-start.sh"
when: jxm_port_exist_flag.stdout == '0'
- name: 修改 connect-distributed.properties 配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/connect-distributed.properties"
regexp: 'localhost:9092'
replace: "{{current_host.ip+':'+current_host.kafka.listener_port}}"
when: kafka.listen_type == 'ip'
- name: 修改 connect-distributed.properties 配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/connect-distributed.properties"
regexp: 'localhost:9092'
replace: "{{current_host.hostname+':'+current_host.kafka.listener_port}}"
when: kafka.listen_type == 'domain'
- name: 修改 producer.properties 配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/producer.properties"
regexp: 'localhost:9092'
replace: "{{current_host.ip+':'+current_host.kafka.listener_port}}"
when: kafka.listen_type == 'ip'
- name: 修改 producer.properties 配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/producer.properties"
regexp: 'localhost:9092'
replace: "{{current_host.hostname+':'+current_host.kafka.listener_port}}"
when: kafka.listen_type == 'domain'
- name: 修改 connect-standalone.properties 中的配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/connect-standalone.properties"
regexp: 'localhost:9092'
replace: "{{current_host.ip+':'+current_host.kafka.listener_port}}"
when: kafka.listen_type == 'ip'
- name: 修改 connect-standalone.properties 中的配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/connect-standalone.properties"
regexp: 'localhost:9092'
replace: "{{current_host.hostname+':'+current_host.kafka.listener_port}}"
when: kafka.listen_type == 'domain'
- name: 修改 connect-standalone.properties 配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/connect-standalone.properties"
regexp: '/tmp/connect.offsets'
replace: "{{kafka.offset_storage_file_filename}}"
- name: 修改 consumer.properties 配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/consumer.properties"
regexp: 'localhost:9092'
replace: "{{current_host.ip+':'+current_host.kafka.listener_port}}"
when: kafka.listen_type == 'ip'
- name: 修改 consumer.properties 配置
replace:
path: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/consumer.properties"
regexp: 'localhost:9092'
replace: "{{current_host.hostname+':'+current_host.kafka.listener_port}}"
when: kafka.listen_type == 'domain'
EOF
efak安装任务
cat > /etc/ansible/roles/kafka/tasks/install-efak.yaml << \EOF
- name: 判断 efak 安装包目录是否存在,不存在自动创建
file:
path: "{{efak.install_from.local | dirname}}"
state: directory
when:
- efak.install_to == current_host.ip
- not (efak.install_from.local is undefined or efak.install_from.local == None or efak.install_from.local | length == 0)
- name: 判断 efak 安装目录是否存在,不存在自动创建
file:
path: "{{efak.install_dir}}"
state: directory
when:
- efak.install_to == current_host.ip
- not (efak.install_dir is undefined or efak.install_dir == None or efak.install_dir | length == 0)
- name: 复制 efak 安装包到目标主机
copy:
src: "{{efak.install_from.local}}"
dest: "{{efak.install_from.local}}"
backup: yes
when:
- efak.install_to == current_host.ip
- not (efak.install_from.local is undefined or efak.install_from.local == None or efak.install_from.local | length == 0)
- name: 解压 efak 安装包
shell:
cd {{efak.install_from.local | dirname }}
&& tar -zxvf {{efak.install_from.local}} -C {{efak.install_dir}}
when:
- efak.install_to == current_host.ip
- not (efak.install_from.local is undefined or efak.install_from.local == None or efak.install_from.local | length == 0)
- name: 判断 efak 是否需要创建 sqlite 数据库文件
file:
path: "{{efak.url | replace('jdbc:sqlite:','') }}"
state: touch
when:
- efak.install_to == current_host.ip
- not (efak.url is undefined or efak.url == None or efak.url | length == 0)
- "{{efak.url.startswith('jdbc:sqlite:')}}"
- name: 从 efak 配置模板生成默认配置并将文件到目标主机
template:
src: "efak-system-config.properties"
dest: "{{efak.install_dir}}/{{efak.uncompress_folder_name}}/conf/system-config.properties"
backup: yes
when:
- efak.install_to == current_host.ip
- not (kafka.install_from.local is undefined or kafka.install_from.local == None or kafka.install_from.local | length == 0)
- name: 修改 efak 启动脚本配置
replace:
path: "{{efak.install_dir}}/{{efak.uncompress_folder_name}}/bin/ke.sh"
regexp: '-XX:\+UseG1GC'
replace: ""
when: efak.install_to == current_host.ip
- name: 设置 efak 环境变量
lineinfile:
path: /etc/profile
line: "export KE_HOME={{efak.install_dir}}/{{efak.uncompress_folder_name}}"
state: present
insertafter: EOF
when: efak.install_to == current_host.ip
- name: 启动 efak(延后5s执行,等待所有的kafka集群启动完毕)
shell:
cmd: |
sleep 5s
source /etc/profile
cd {{efak.install_dir}}/{{efak.uncompress_folder_name}}/bin && ./ke.sh start
executable: /bin/bash
when:
- efak.install_to == current_host.ip
- not (efak.install_from.local is undefined or efak.install_from.local == None or efak.install_from.local | length == 0)
EOF
kafka安装任务
cat > /etc/ansible/roles/kafka/tasks/install-kafka.yaml << \EOF
- name: 获得当前主机下配置的所有自定义变量
set_fact:
# 获取当前主机自定义的变量
current_host: "{{hostnames | json_query(query_condition) }}"
vars:
# 查询出的结果为一个json数组,取第一条即可
query_condition: "[?hostname=='{{inventory_hostname}}'] | [0]"
# 安装方式校验
- fail:
msg: "安装kafka时需要指定安装方式,【kafka.install_from.local】不能为空"
when:
- kafka.install_from.local is undefined or kafka.install_from.local == None or kafka.install_from.local | length == 0
- name: 判断 kafka安装包目录是否存在,不存在自动创建
file:
path: "{{kafka.install_from.local | dirname}}"
state: directory
when: not (kafka.install_from.local is undefined or kafka.install_from.local == None or kafka.install_from.local | length == 0)
- name: 判断 kafka 安装目录是否存在,不存在自动创建
file:
path: "{{kafka.install_dir}}"
state: directory
when: not (kafka.install_dir is undefined or kafka.install_dir == None or kafka.install_dir | length == 0)
- name: 判断 kafka 日志存储目录是否存在,不存在自动创建
file:
path: "{{kafka.log_dirs}}"
state: directory
when: not (kafka.log_dirs is undefined or kafka.log_dirs == None or kafka.log_dirs | length == 0)
- name: 复制 kafka 安装包到目标主机
copy:
src: "{{kafka.install_from.local}}"
dest: "{{kafka.install_from.local}}"
backup: yes
when: not (kafka.install_from.local is undefined or kafka.install_from.local == None or kafka.install_from.local | length == 0)
- name: 解压 kafka 安装包
shell:
cd {{kafka.install_from.local | dirname }}
&& tar -zxvf {{kafka.install_from.local}} -C {{kafka.install_dir}}
when: not (kafka.install_from.local is undefined or kafka.install_from.local == None or kafka.install_from.local | length == 0)
- name: 从 kafka 配置模板生成默认配置并将文件到目标主机
template:
src: "kafka-server.properties"
dest: "{{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/config/server.properties"
backup: yes
when: not (kafka.install_from.local is undefined or kafka.install_from.local == None or kafka.install_from.local | length == 0)
- name: 判断 kafka 日志存储目录是否存在,不存在自动创建
file:
path: "{{kafka.log_dirs}}"
state: directory
when: not (kafka.log_dirs is undefined or kafka.log_dirs == None or kafka.log_dirs | length == 0)
- name: 判断 kafka 存储偏移量数据文件文件是否存在,不存在自动创建
file:
path: "{{kafka.offset_storage_file_filename}}"
state: touch
when: kafka.offset_storage_file_filename is undefined or kafka.offset_storage_file_filename == None or kafka.offset_storage_file_filename | length == 0
# 导入 zookeeper 安装任务
- import_tasks: install-zookeeper.yaml
# 导入配置文件及端口修改任务
- import_tasks: update-kafka-port-config.yaml
- name: 启动【非内置zookeeper】时的zookeeper、kafka
shell:
cmd: |
source /etc/profile
cd {{zookeeper.install_dir}}/{{zookeeper.uncompress_folder_name}}/bin
./zkServer.sh start ../conf/zoo.cfg
cd {{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/bin
./kafka-server-start.sh -daemon ../config/server.properties
executable: /bin/bash
when:
- not (zookeeper.install_from.local is undefined or zookeeper.install_from.local == None or zookeeper.install_from.local | length == 0)
- name: 启动【内置zookeeper】时的zookeeper、kafka
shell:
cmd: |
source /etc/profile
cd {{kafka.install_dir}}/{{kafka.uncompress_folder_name}}/bin
./zookeeper-server-start.sh -daemon ../config/zookeeper.properties
./kafka-server-start.sh -daemon ../config/server.properties
executable: /bin/bash
when:
- not (zookeeper.install_from.local is undefined or zookeeper.install_from.local == None or zookeeper.install_from.local | length == 0)
EOF
汇总任务
将上述任务添加至main.yaml中
cat > /etc/ansible/roles/kafka/tasks/main.yaml << \EOF
# 默认变量处理任务
- import_tasks: default-var-init.yaml
# 设置hosts设置任务
- import_tasks: set-hosts.yaml
# 安装kafka
- import_tasks: install-kafka.yaml
# 安装efak
- import_tasks: install-efak.yaml
EOF
创建执行文件
cat > /etc/ansible/install-kafka.yaml << \EOF
---
- hosts: kafka
roles:
# kafka依赖jdk,需要先安装jdk
- role: jdk
# 设置jdk安装出错时不影响后续流程
ignore_errors: true
- kafka
EOF
执行脚本
ansible-playbook /etc/ansible/install-kafka.yaml
zookeeper、kafka相关启动脚本
内置zookeeper
cd /home/zhoujibin/kafka_2.13-3.2.1/bin
# 启动zookeeper
./zookeeper-server-start.sh -daemon ../config/zookeeper.properties
# 启动kafka
./kafka-server-start.sh -daemon ../config/server.properties
# 查看启动日志
tail -100f ../logs/server.log
# 关闭zookeeper
./zookeeper-server-stop.sh
# 关闭kafka
./kafka-server-stop.sh
非内置zookeeper
cd /home/zhoujibin/apache-zookeeper-3.6.3-bin/bin
# 启动zookeeper,不指定配置文件时默认加载conf目录下的zoo.cfg
./zkServer.sh start ../conf/zoo.cfg
# 启动kafka
cd /home/zhoujibin/kafka_2.13-3.2.1//bin
./kafka-server-start.sh -daemon ../config/server.properties
# 查看启动日志
tail -100f ../logs/server.log
# 关闭zookeeper
cd /home/zhoujibin/apache-zookeeper-3.6.3-bin//bin
./zkServer.sh stop
# 关闭kafka
cd /home/zhoujibin/kafka_2.13-3.2.1//bin
./kafka-server-stop.sh
删除zookeeper、kafka(下述命令仅在测试时使用)
# 卸载删除zookeeper、kafka(下述命令仅在测试时使用)
rm -rf /home/zhoujibin/apache-zookeeper-3.6.3-bin /home/zhoujibin/kafka_2.13-3.2.1 /home/zhoujibin/efak-web-3.0.1
重启脚本汇总
/home/zhoujibin/apache-zookeeper-3.6.3-bin//bin/zkServer.sh stop && /home/zhoujibin/kafka_2.13-3.2.1//bin/kafka-server-stop.sh && /home/zhoujibin/efak-web-3.0.1/bin/ke.sh stop
/home/zhoujibin/apache-zookeeper-3.6.3-bin/bin/zkServer.sh start /home/zhoujibin/apache-zookeeper-3.6.3-bin/conf/zoo.cfg && /home/zhoujibin/kafka_2.13-3.2.1/bin/kafka-server-start.sh -daemon /home/zhoujibin/kafka_2.13-3.2.1/config/server.properties
# 下述脚本在装有efak的集群上执行
/home/zhoujibin/efak-web-3.0.1/bin/ke.sh start
安装mongo
创建资源清单
cat >> /etc/ansible/hosts << \EOF
# 指定分组名称,如:mongo,根据实际情况调整
[mongo]
k8s-master ansible_ssh_host=192.168.13.247 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-01 ansible_ssh_host=192.168.13.16 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-02 ansible_ssh_host=192.168.13.51 ansible_ssh_port=22 ansible_ssh_user=root EOF
创建角色目录
mkdir -p /etc/ansible/roles/mongo
cd /etc/ansible/roles/mongo/
mkdir tasks vars files files/x86_64 files/aarch64 templates
安装包下载地址:
centos:
x86:https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-rhel70-4.4.4.tgz
arm64:https://fastdl.mongodb.org/linux/mongodb-linux-aarch64-rhel82-4.4.4.tgz
ubuntu:
x86:https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2004-4.4.4.tgz
arm64:https://fastdl.mongodb.org/linux/mongodb-linux-aarch64-ubuntu2004-4.4.4.tgz
注意:若使用自定义的安装包,请保证加压后的文件夹在不同的系统架构下是一样的名称
创建变量
cat > /etc/ansible/roles/mongo/vars/main.yaml << \EOF
# 集群安装时需要配置多台机器
hostnames:
- ip: 192.168.13.247
hostname: k8s-master
mongo:
# 运行时的端口,默认-7004
port:
# 权重,权重最高的是主节点 ,数字越大,权重越高,不指定默认在前面的元素权重大
priority:
# 是否作为仲裁节点,默认-false,若都未指定,则默认最后一个节点为仲裁节点
arbiterOnly:
- ip: 192.168.13.16
hostname: k8s-worker-01
mongo:
# 运行时的端口,默认-7004
port:
# 权重,权重最高的是主节点 ,数字越大,权重越高,不指定默认在前面的元素权重大
priority:
# 是否作为仲裁节点,默认-false,若都未指定,则默认最后一个节点为仲裁节点
arbiterOnly:
- ip: 192.168.13.51
hostname: k8s-worker-02
mongo:
# 运行时的端口,默认-7004
port:
# 权重,权重最高的是主节点 ,数字越大,权重越高,不指定默认在前面的元素权重大
priority:
# 是否作为仲裁节点,默认-false,若都未指定,则默认最后一个节点为仲裁节点
arbiterOnly:
mongo:
# 安装类型:单机-standalone(默认),集群-cluster
install_type: cluster
# 集群的名称,默认:mongo-cluster
cluster_name:
install_from:
# 从本地安装,ansible_facts.architecture变量示例:x86_64、aarch64
local: "/etc/ansible/roles/mongo/files/{{ansible_facts.architecture}}/mongodb-linux-4.4.4.tar.gz"
# mongo 安装目录
install_dir: /home/zhoujibin
# mongo 压缩包解压后的文件夹名称,一般和压缩包名称一致
uncompress_folder_name: mongodb-linux-4.4.4
# 是否开启认证,默认-false
auth:
# 配置文件存储目录,默认在安装目录下的 etc 目录中
config_dir:
# 数据存储目录,默认在安装目录下的 data 目录中
db_path:
# 日志文件路径,默认在安装目录下,文件名为:mongodb.log
log_path:
# 进程文件路径,默认在安装目录下,文件名为 mongodb.pid
pid_file_path:
# 程序安装后的相关参数
config:
# 安装后是否立即运行
run_after_install:
# 启动程序时使用的用户信息,默认-root
run_user:
name:
group:
# 是否开机自启,默认-true
start_follow_server:
# 数据初始化相关配置
init:
# 管理员账户,默认:zhoujibin_root/123456
admin_user:
username:
password:
# 允许访问的数据库,默认:admin
db:
# 普通用户,默认-zhoujibin/123456
normal_user:
username:
password:
# 允许访问的数据库,默认:test,一般和项目保持一致
db:
# 需要初始化创建的集合,数据类型为字符串数组
collection:
EOF
创建模板文件
配置文件
cat > /etc/ansible/roles/mongo/templates/mongodb.conf << \EOF
# 是否需要授权
auth={{mongo.auth | lower}}
# 数据存储目录
dbpath={{mongo.db_path}}
# 日志文件地址
logpath={{mongo.log_path}}
# 进程文件地址
pidfilepath={{mongo.pid_file_path}}
# 以追加的方式记录日志
logappend=true
# 服务绑定的ip
bind_ip={{current_host.ip}}
# 服务运行时的端口
port={{current_host.mongo.port}}
# 是否后台运行
fork=true
{% if mongo.cluster_name is defined and mongo.install_type == 'cluster' %}
# 集群名称
replSet={{mongo.cluster_name}}
{% endif %}
EOF
服务自启文件
cat > /etc/ansible/roles/mongo/templates/mongo.service << \EOF
[Unit]
Description=mongo system config
After=network.target
[Service]
Type=forking
User={{config.run_user.name}}
Group={{config.run_user.group}}
ExecStart={{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/bin/mongod -f {{mongo.config_dir}}/mongodb.conf --fork
ExecStop={{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/bin/mongod -f {{mongo.config_dir}}/mongodb.conf --shutdown
# Other directives omitted
# (file size)
LimitFSIZE=infinity
# (cpu time)
LimitCPU=infinity
# (virtual memory size)
LimitAS=infinity
# (locked-in-memory size)
LimitMEMLOCK=infinity
# (open files)
LimitNOFILE=64000
# (processes/threads)
LimitNPROC=64000
[Install]
WantedBy=multi-user.target
EOF
数据库初始化脚本文件
管理员用户
cat > /etc/ansible/roles/mongo/templates/mongo-init-admin-user.js << \EOF
db.dropUser('{{init.admin_user.username}}')
db.createUser( {user: "{{init.admin_user.username}}",pwd: "{{init.admin_user.password}}",roles: [ { role: "userAdminAnyDatabase", db: "{{init.admin_user.db}}" } ]})
db.auth("{{init.admin_user.username}}","{{init.admin_user.password}}");
EOF
普通用户
cat > /etc/ansible/roles/mongo/templates/mongo-init-normal-user.js << \EOF
{% if init.normal_user.collection is defined and (init.normal_user.collection | type_debug == 'list') and (init.normal_user.collection | count > 0) %}
{% for c in init.normal_user.collection -%}
db.createCollection("{{c}}");
{% endfor -%}
{% endif %}
// 创建不同database到用户,系统通过创建的账号访问
db.dropUser('{{init.normal_user.username}}')
db.createUser( {user: "{{init.normal_user.username}}",pwd: "{{init.normal_user.password}}",roles: [{ role: "readWrite", db: "{{init.normal_user.db}}" }]});
EOF
集群参数初始化文件
cat > /etc/ansible/roles/mongo/templates/init-mongo-cluster-params.js << \EOF
cfg=
{% if hostnames is defined and (hostnames | type_debug == 'list') and (hostnames | count > 0) %}
{
"_id": "{{mongo.cluster_name}}"
,"members": [
{% for h in hostnames -%}
{"_id": {{h.mongo._id}}, "host": "{{h.mongo.host}}", "priority": {{h.mongo.priority}}, "arbiterOnly": {{h.mongo.arbiterOnly | lower}} }{% if not loop.last %},
{% endif %}
{% endfor -%}
]
};
{% endif %}
rs.initiate(cfg);
EOF
创建安装任务
创建变量默认值初始化任务
cat > /etc/ansible/roles/mongo/tasks/default-var-init.yaml << \EOF
- name: 初始化 mongo 从节点是否可读取数据
set_fact:
mongo: "{{ mongo | combine({'secondary_ok': true }) }}"
when: mongo.secondary_ok is undefined or mongo.secondary_ok == None or not mongo.secondary_ok
- name: 初始化 mongo 是否需要授权 配置,默认-false
set_fact:
mongo: "{{ mongo | combine({'auth': false }) }}"
when: mongo.auth is undefined or mongo.auth == None or not mongo.auth
- name: 初始化 mongo 集群名称
set_fact:
mongo: "{{ mongo | combine({'cluster_name': 'mongo_cluster'}) }}"
when: mongo.cluster_name is undefined or mongo.cluster_name == None or not mongo.cluster_name
- name: 初始化 mongo 配置文件目录,默认在安装目录下的 etc 目录中
set_fact:
mongo: "{{ mongo | combine({'config_dir': mongo.install_dir + '/' + mongo.uncompress_folder_name + '/etc'}) }}"
when: mongo.config_path is undefined or mongo.config_path == None or mongo.config_path | length == 0
- name: 初始化 mongo 数据存储目录,默认在安装目录下的 data 目录中
set_fact:
mongo: "{{ mongo | combine({'db_path': mongo.install_dir + '/' + mongo.uncompress_folder_name + '/data'}) }}"
when: mongo.db_path is undefined or mongo.db_path == None or mongo.db_path | length == 0
- name: 初始化 mongo 日志存储文件路径,默认在安装目录下的 mongodb.log文件
set_fact:
mongo: "{{ mongo | combine({'log_path': mongo.install_dir + '/' + mongo.uncompress_folder_name + '/mongodb.log'}) }}"
when: mongo.log_path is undefined or mongo.log_path == None or mongo.log_path | length == 0
- name: 初始化 mongo 进程文件路径,默认在安装目录下,文件名为 mongodb.pid
set_fact:
mongo: "{{ mongo | combine({'pid_file_path': mongo.install_dir + '/' + mongo.uncompress_folder_name + '/mongodb.pid'}) }}"
when: mongo.pid_file_path is undefined or mongo.pid_file_path == None or mongo.pid_file_path | length == 0
- name: 实例化 hostnames 默认属性
set_fact:
temp_hostnames: >-
{{
temp_hostnames | default([]) +
[
item | combine(
{
"mongo":{
"port": "7004" if (item.mongo.port is undefined or item.mongo.port == None or not item.mongo.port) else item.mongo.port | string
,"priority": ansible_loop.revindex if (item.mongo.priority is undefined or item.mongo.priority == None or not item.mongo.priority) else item.mongo.priority
,"_id": ansible_loop.index0
,"host": item.ip + ':' + ("7004" if (item.mongo.port is undefined or item.mongo.port == None or not item.mongo.port) else item.mongo.port | string)
,"arbiterOnly": false | lower | bool if (item.mongo.arbiterOnly is undefined or item.mongo.arbiterOnly == None) else item.mongo.arbiterOnly
,"arbiterOnly": true if ((item.mongo.arbiterOnly is undefined or item.mongo.arbiterOnly == None) and ansible_loop.last) else false | lower | bool if (item.mongo.arbiterOnly is undefined or item.mongo.arbiterOnly == None) else item.mongo.arbiterOnly
,"is_init_cluster_host": ansible_loop.first
}
}
,recursive=True)
]
}}
loop: "{{ hostnames }}"
loop_control:
extended: true
vars:
temp_hostnames: []
- name: 重新给hostnames属性赋值
set_fact:
hostnames: "{{temp_hostnames}}"
- name: mongo 安装后是否立运行配置
set_fact:
config: "{{ config | combine({'run_after_install': true },recursive=True) }}"
when: config.run_after_install is undefined or config.run_after_install == None or not config.run_after_install
- name: mongo 是否开机自启
set_fact:
config: "{{ config | combine({'start_follow_server': true },recursive=True) }}"
when: config.start_follow_server is undefined or config.start_follow_server == None or not config.start_follow_server
- name: mongo 安装后运行时使用的用户配置
set_fact:
config: "{{ config | combine({'run_user': {'name': 'root'}},recursive=True) }}"
when: config.run_user.name is undefined or config.run_user.name == None or mongo.install_from.local | length == 0
- name: mongo 安装后运行时使用的用户组配置
set_fact:
config: "{{ config | combine({'run_user': {'group': 'root'}},recursive=True) }}"
when: config.run_user.group is undefined or config.run_user.group == None or mongo.install_from.local | length == 0
EOF
创建安装任务
cat > /etc/ansible/roles/mongo/tasks/install-mongo.yaml << \EOF
- name: 获得当前主机下配置的所有自定义变量
set_fact:
# 获取当前主机自定义的变量
current_host: "{{hostnames | json_query(query_condition) }}"
vars:
# 查询出的结果为一个json数组,取第一条即可
query_condition: "[?hostname=='{{inventory_hostname}}'] | [0]"
# 安装方式校验
- fail:
msg: "安装 mongo 时需要指定安装方式,【mongo.install_from.local】不能为空"
when:
- mongo.install_from.local is undefined or mongo.install_from.local == None or mongo.install_from.local | length == 0
- name: 判断 mongo 安装包目录是否存在,不存在自动创建
file:
path: "{{mongo.install_from.local | dirname}}"
state: directory
when: not (mongo.install_from.local is undefined or mongo.install_from.local == None or mongo.install_from.local | length == 0)
- name: 判断 mongo 安装目录是否存在,不存在自动创建
file:
path: "{{mongo.install_dir}}"
state: directory
when: not (mongo.install_dir is undefined or mongo.install_dir == None or mongo.install_dir | length == 0)
- name: 复制 mongo 安装包到目标主机
copy:
src: "{{mongo.install_from.local}}"
dest: "{{mongo.install_from.local}}"
backup: yes
when: not (mongo.install_from.local is undefined or mongo.install_from.local == None or mongo.install_from.local | length == 0)
- name: 解压 mongo 安装包
shell:
cd {{mongo.install_from.local | dirname }}
&& tar -zxvf {{mongo.install_from.local}} -C {{mongo.install_dir}}
when: not (mongo.install_from.local is undefined or mongo.install_from.local == None or mongo.install_from.local | length == 0)
- name: 判断 mongo 配置目录是否存在,不存在自动创建
file:
path: "{{mongo.config_dir}}"
state: directory
when: not (mongo.config_dir is undefined or mongo.config_dir == None or mongo.config_dir | length == 0)
- name: 判断 mongo 数据目录是否存在,不存在自动创建
file:
path: "{{mongo.db_path}}"
state: directory
when: not (mongo.db_path is undefined or mongo.db_path == None or mongo.db_path | length == 0)
- name: 判断 mongo 日志文件是否存在,不存在自动创建
file:
path: "{{mongo.log_path}}"
state: touch
when: not (mongo.log_path is undefined or mongo.log_path == None or mongo.log_path | length == 0)
- name: 判断 mongo 进程是否存在,不存在自动创建
file:
path: "{{mongo.pid_file_path}}"
state: touch
when: not (mongo.pid_file_path is undefined or mongo.pid_file_path == None or mongo.pid_file_path | length == 0)
- name: 从 mongo 配置模板生成默认配置并将文件到目标主机
template:
src: "mongodb.conf"
dest: "{{mongo.config_dir}}/mongodb.conf"
backup: yes
- name: 设置 mongo 初始化时默认不需要授权
replace:
path: "{{mongo.config_dir}}/mongodb.conf"
regexp: 'auth=false|auth=False'
replace: "auth=false"
- name: 从 mongo 服务自启模板生成默认配置并将文件到目标主机
template:
src: "mongo.service"
dest: "/etc/systemd/system/mongo.service"
backup: yes
- name: 配置是否开机自启
shell: systemctl enable mongo.service
when: config.start_follow_server
- name: 重载环境变量
shell: systemctl daemon-reload
- name: 启动 mongo
service:
name: mongo
state: started
when: config.run_after_install
- name: 从 mongo 数据库实例化【集群初始化参数】模板生成默认配置并将文件到目标主机
template:
src: "init-mongo-cluster-params.js"
dest: "{{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/init-mongo-cluster-params.js"
when:
- current_host.mongo.is_init_cluster_host
- mongo.install_type == 'cluster'
- name: 初始化集群
shell: |
{{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/bin/mongo {{current_host.ip}}:{{current_host.mongo.port}} {{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/init-mongo-cluster-params.js
when:
- current_host.mongo.is_init_cluster_host
- mongo.install_type == 'cluster'
# 导入数据库初始化任务
- import_tasks: mongo-init.yaml
- name: 集群环境下 生成 keyfile
shell: |
openssl rand -base64 756 > {{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/keyfile
chmod 600 {{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/keyfile
when:
- current_host.mongo.is_init_cluster_host
- mongo.install_type == 'cluster'
- name: 复制 keyfile 到 目标主机
copy:
src: "{{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/keyfile"
dest: "{{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/keyfile"
mode: '600'
backup: yes
when: mongo.install_type == 'cluster'
- name: mongo 配置文件新增 集群名称 说明
lineinfile:
path: "{{mongo.config_dir}}/mongodb.conf"
line: "# 集群名称"
state: present
insertafter: EOF
when: mongo.install_type == 'cluster'
- name: mongo 配置文件新增 集群名称
lineinfile:
path: "{{mongo.config_dir}}/mongodb.conf"
line: "replSet={{mongo.cluster_name}}"
state: present
insertafter: EOF
when:
- mongo.install_type == 'cluster'
- name: mongo 配置文件新增 配置说明
lineinfile:
path: "{{mongo.config_dir}}/mongodb.conf"
line: "# 集群日志及keyfile配置"
state: present
insertafter: EOF
when: mongo.install_type == 'cluster'
- name: mongo 配置文件新增 集群日志大小 配置
lineinfile:
path: "{{mongo.config_dir}}/mongodb.conf"
line: "oplogSize=100"
state: present
insertafter: EOF
when: mongo.install_type == 'cluster'
- name: mongo 配置文件新增 keyfile 配置
lineinfile:
path: "{{mongo.config_dir}}/mongodb.conf"
line: "keyFile={{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/keyfile"
state: present
insertafter: EOF
when: mongo.install_type == 'cluster'
- name: 修改 mongo 配置,需要授权才能操作
replace:
path: "{{mongo.config_dir}}/mongodb.conf"
regexp: 'auth=false'
replace: "auth=true"
- name: 重启 mongo
service:
name: mongo
state: restarted
when: mongo.install_type == 'cluster'
EOF
创建数据库实例化任务
cat > /etc/ansible/roles/mongo/tasks/mongo-init.yaml << \EOF
- name: mongo 数据库实例化管理员账户,默认-zhoujibin_root
set_fact:
init: "{{ init | combine({'admin_user': {'username': 'zhoujibin_root'}},recursive=True) }}"
when:
- current_host.mongo.is_init_cluster_host
- init.admin_user.username is undefined or init.admin_user.username == None or init.admin_user.username | length == 0
- name: mongo 数据库实例化管理员账户密码,123456
set_fact:
init: "{{ init | combine({'admin_user': {'password': '123456'}},recursive=True) }}"
when:
- current_host.mongo.is_init_cluster_host
- init.admin_user.password is undefined or init.admin_user.password == None or init.admin_user.password | length == 0
- name: mongo 数据库实例化管理员可访问的数据库,默认-admin
set_fact:
init: "{{ init | combine({'admin_user': {'db': 'admin'}},recursive=True) }}"
when:
- current_host.mongo.is_init_cluster_host
- init.admin_user.db is undefined or init.admin_user.db == None or init.admin_user.db | length == 0
- name: mongo 数据库实例化普通用户,账户-zhoujibin
set_fact:
init: "{{ init | combine({'normal_user': {'username': 'zhoujibin'}},recursive=True) }}"
when:
- current_host.mongo.is_init_cluster_host
- init.normal_user.username is undefined or init.normal_user.username == None or init.normal_user.username | length == 0
- name: mongo 数据库实例化普通用户密码,默认-123456
set_fact:
init: "{{ init | combine({'normal_user': {'password': '123456'}},recursive=True) }}"
when:
- current_host.mongo.is_init_cluster_host
- init.normal_user.password is undefined or init.normal_user.password == None or init.normal_user.password | length == 0
- name: 正在应用集群配置,预计需要10s,请稍后。。。
pause:
seconds: 10
- name: mongo 数据库实例化普通用户可访问的数据库,默认-test
set_fact:
init: "{{ init | combine({'normal_user': {'db': 'test'}},recursive=True) }}"
when:
- current_host.mongo.is_init_cluster_host
- init.normal_user.db is undefined or init.normal_user.db == None or init.normal_user.db | length == 0
- name: 从 mongo 数据库实例化管理员用户数据模板生成默认配置并将文件到目标主机
template:
src: "mongo-init-admin-user.js"
dest: "{{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/mongo-init-admin-user.js"
when:
- current_host.mongo.is_init_cluster_host
- name: 执行 mongo 数据库 初始化脚本
shell: "{{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/bin/mongo {{current_host.ip}}:{{current_host.mongo.port}}/{{init.admin_user.db}} {{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/mongo-init-admin-user.js"
when:
- current_host.mongo.is_init_cluster_host
- name: 从 mongo 数据库实例化【管理员用户数据】模板生成默认配置并将文件到目标主机
template:
src: "mongo-init-normal-user.js"
dest: "{{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/mongo-init-normal-user.js"
when:
- current_host.mongo.is_init_cluster_host
- name: 从 mongo 数据库实例化【普通用户数据】模板生成默认配置并将文件到目标主机
shell: "{{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/bin/mongo {{current_host.ip}}:{{current_host.mongo.port}}/{{init.normal_user.db}} {{mongo.install_dir}}/{{mongo.uncompress_folder_name}}/mongo-init-normal-user.js"
when:
- current_host.mongo.is_init_cluster_host
EOF
汇总安装任务
cat > /etc/ansible/roles/mongo/tasks/main.yaml << \EOF
# 默认变量处理任务
- import_tasks: default-var-init.yaml
# mongo安装任务
- import_tasks: install-mongo.yaml
EOF
创建执行文件
cat > /etc/ansible/install-mongo.yaml << \EOF
---
- hosts: mongo
roles:
- mongo
EOF
执行脚本
ansible-playbook /etc/ansible/install-mongo.yaml
测试脚本
!!!删除mongo(仅在测试时使用)
service mongo stop && rm -rf /home/zhoujibin/mongodb-linux-4.4.4
mongo集群测试
# 主节点上测试写
/home/zhoujibin/mongodb-linux-4.4.4/bin/mongo 192.168.13.247:7004
db.auth('zhoujibin','123456');
use test;
db.createCollection("col1");
show collections
db.col1.insertOne({
"id":1,
"name":"test"
})
# 从节点上测试数据读取
/home/zhoujibin/mongodb-linux-4.4.4/bin/mongo 192.168.13.16:7004
db.auth('zhoujibin','123456');
use test;
# 执行此命令允许从节点读取数据
rs.secondaryOk()
db.col1.find()
取消开机自启
systemctl disable mongo
参考链接
安装redis
创建资源清单
cat >> /etc/ansible/hosts << \EOF
# 指定分组名称,如:redis,根据实际情况调整
[redis]
k8s-master ansible_ssh_host=192.168.13.247 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-01 ansible_ssh_host=192.168.13.16 ansible_ssh_port=22 ansible_ssh_user=root k8s-worker-02 ansible_ssh_host=192.168.13.51 ansible_ssh_port=22 ansible_ssh_user=root EOF
创建角色目录
mkdir -p /etc/ansible/roles/redis
cd /etc/ansible/roles/redis/
mkdir tasks vars files files/x86_64 files/aarch64 templates
源码下载地址: http://download.redis.io/releases/redis-5.0.7.tar.gz
注意:若使用自定义的安装包,请保证加压后的文件夹在不同的系统架构下是一样的名称
创建变量
cat > /etc/ansible/roles/redis/vars/main.yaml << \EOF
# 集群安装时需要配置多台机器
hostnames:
- ip: 192.168.13.247
hostname: k8s-master
redis:
# 端口,默认7011
port:
# 密码,默认Redis@123456
password:
# 是否是主节点,未指定时默认取hostnames中第一台机器作为主节点,有多个是取满足条件的第一个主机
is_master:
sentinel:
# 端口,默认-27011
port:
- ip: 192.168.13.16
hostname: k8s-worker-01
redis:
# 端口,默认7011
port:
# 密码,默认Redis@123456
password:
sentinel:
# 端口,默认-27011
port:
- ip: 192.168.13.51
hostname: k8s-worker-02
redis:
# 端口,默认7011
port:
# 密码,默认Redis@123456
password:
sentinel:
# 端口,默认-27011
port:
redis:
# 安装类型:单机-standalone(默认),集群-cluster
install_type: cluster
install_from:
# 从本地安装,ansible_facts.architecture变量示例:x86_64、aarch64
local: "/etc/ansible/roles/redis/files/{{ansible_facts.architecture}}/redis-5.0.7.tar.gz"
# redis 安装目录
install_dir: /home/zhoujibin
# redis 压缩包解压后的文件夹名称,一般和压缩包名称一致
uncompress_folder_name: redis-5.0.7
# 数据目录,默认为安装目录下的 data 目录
data_dir:
# 进程文件路径,默认放在安装目录下的 pid 目录下,文件名格式:redis-{{port}}.pid
pid_file:
# 日志文件,默认放在安装目录下的 logs 目录下,文件名格式:redis-{{port}}.log
log_file:
# 配置文件目录路径,默认放在安装目录下的 conf 目录下,文件名格式:redis-{{port}}.conf
config_dir:
# 设置同一时间最大客户连接数,默认无限制。redis可以同时连接的客户端数为redis程序可以打开的最大文件描述符,当客户端连接数到达限制时,Redis会关闭新的连接并向客户端返回 max number of clients reached 错误信息,默认-10000
max_clients:
# 最大内存,默认为0,表示不做限制,合法的格式:1048576(不写单位标识字节)、1048576B、1000KB、100MB、1GB、1000K、100M、1G(64位系统不限制内存,32位系统最多使用3GB内存)
max_memory:
# 指定更新日志的条件,有三个可选参数 - no:表示等操作系统进行数据缓存同步到磁盘(快),always:表示每次更新操作后手动调用fsync()将数据写到磁盘(慢,安全), everysec:表示每秒同步一次(折衷,默认值);
appendfsync:
# 淘汰策略:
# noeviction(默认策略):对于写请求不再提供服务,直接返回错误(DEL请求和部分特殊请求除外)
# allkeys-lru:从所有key中使用LRU算法进行淘汰(LRU算法:即最近最少使用算法)
# volatile-lru:从设置了过期时间的key中使用LRU算法进行淘汰
# allkeys-random:从所有key中随机淘汰数据
# volatile-random:从设置了过期时间的key中随机淘汰
# volatile-ttl:在设置了过期时间的key中,淘汰过期时间剩余最短的
# 当使用volatile-lru、volatile-random、volatile-ttl这三种策略时,如果没有key可以被淘汰,则和noeviction一样返回错误
max_memory_policy:
# 是否开启保护模式,开启后外部可以访问,默认-no,关闭
protected_mode:
# 集群中主节点名称,默认:redis_master
master_name:
# 哨兵模式相关配置
sentinel:
# 工作目录,默认放在安装目录下的 sentinel 目录下
dir:
# 日志文件,默认放在安装目录下的 sentinel 目录下,文件名称:sentinel.log
log_file:
# 指定主节点应答哨兵sentinel的最大时间间隔,超过这个时间,哨兵主观上认为主节点下线,默认30秒 ,单位:毫秒
down_after_milliseconds:
# 指定了在发生failover主备切换时,最多可以有多少个slave同时对新的master进行同步。这个数字越小,完成failover所需的时间就越长;反之,但是如果这个数字越大,就意味着越多的slave因为replication而不可用。可以通过将这个值设为1,来保证每次只有一个slave,处于不能处理命令请求的状态,默认值-1
parallel_syncs:
# 故障转移的超时时间failover-timeout,默认三分钟,可以用在以下这些方面:
# 1. 同一个sentinel对同一个master两次failover之间的间隔时间。
# 2. 当一个slave从一个错误的master那里同步数据时开始,直到slave被纠正为从正确的master那里同步数据时结束。
# 3. 当想要取消一个正在进行的failover时所需要的时间。
# 4.当进行failover时,配置所有slaves指向新的master所需的最大时间。不过,
# 即使过了这个超时,slaves依然会被正确配置为指向master,但是就不按parallel-syncs所配置的规则来同步数据了
failover_timeout:
# 当sentinel有任何警告级别的事件发生时(比如说redis实例的主观失效和客观失效等等),将会去调用这个脚本。一个脚本的最大执行时间为60s,如果超过这个时间,脚本将会被一个SIGKILL信号终止,之后重新执行。
notification_script:
# 通用脚本,可多次被调用
client_reconfig_script:
# 程序安装后的相关参数
config:
# 安装后是否立即运行,默认-true
run_after_install:
# 启动程序时使用的用户信息,默认-root
run_user:
name:
group:
# 是否开机自启,默认-true
start_follow_server:
EOF
创建模板文件
redis配置文件
cat > /etc/ansible/roles/redis/templates/redis.conf << \EOF
# Redis 默认只允许本机访问,把 bind 修改为 0.0.0.0 表示允许所有远程访问。如果想指定限制访问,可设置对应的 ip。
bind 0.0.0.0
# 启动AOF(Append Only File)持久化策略
appendonly yes
{% if not (current_host.redis.password is undefined or current_host.redis.password == None or current_host.redis.password | length == 0) %}
# 密码
requirepass {{current_host.redis.password}}
{% endif %}
# 运行时的端口
port {{current_host.redis.port}}
# 是否后台运行
daemonize yes
# 数据目录
dir {{redis.data_dir}}
# 日志文件,需要确保文件存在
logfile {{redis.log_file}}
# 进程文件
pidfile {{redis.pid_file}}
# 设置同一时间最大客户连接数,默认无限制。redis可以同时连接的客户端数为redis程序可以打开的最大文件描述符,当客户端连接数到达限制时,Redis会关闭新的连接并向客户端返回 max number of clients reached 错误信息,默认-10000
maxclients {{redis.max_clients}}
# 最大内存,默认为0,表示不做限制,合法的格式:1048576(不写单位标识字节)、1048576B、1000KB、100MB、1GB、1000K、100M、1G(64位系统不限制内存,32位系统最多使用3GB内存)
maxmemory {{redis.max_memory}}
# 指定更新日志的条件,有三个可选参数 - no:表示等操作系统进行数据缓存同步到磁盘(快),always:表示每次更新操作后手动调用fsync()将数据写到磁盘(慢,安全), everysec:表示每秒同步一次(折衷,默认值);
appendfsync {{redis.appendfsync}}
# 淘汰策略:
# noeviction(默认策略):对于写请求不再提供服务,直接返回错误(DEL请求和部分特殊请求除外)
# allkeys-lru:从所有key中使用LRU算法进行淘汰(LRU算法:即最近最少使用算法)
# volatile-lru:从设置了过期时间的key中使用LRU算法进行淘汰
# allkeys-random:从所有key中随机淘汰数据
# volatile-random:从设置了过期时间的key中随机淘汰
# volatile-ttl:在设置了过期时间的key中,淘汰过期时间剩余最短的
# 当使用volatile-lru、volatile-random、volatile-ttl这三种策略时,如果没有key可以被淘汰,则和noeviction一样返回错误
maxmemory-policy {{redis.max_memory_policy}}
EOF
redis服务自启文件
cat > /etc/ansible/roles/redis/templates/redis.service << \EOF
[Unit]
Description=redis system config
After=network.target
[Service]
Type=forking
User={{config.run_user.name}}
Group={{config.run_user.group}}
ExecStart={{redis.install_dir}}/{{redis.uncompress_folder_name}}/src/redis-server {{redis.config_dir}}/redis-{{current_host.redis.port}}.conf
{% if current_host.redis.password is undefined or current_host.redis.password == None or current_host.redis.password | length == 0 %}
ExecStop={{redis.install_dir}}/{{redis.uncompress_folder_name}}/src/redis-cli -h {{current_host.ip}} -p {{current_host.redis.port}} shutdown
{% else %}
# 有密码时需要添加-a参数,添加--no-auth-warning取消警告
ExecStop={{redis.install_dir}}/{{redis.uncompress_folder_name}}/src/redis-cli -h {{current_host.ip}} -p {{current_host.redis.port}} -a {{current_host.redis.password}} --no-auth-warning shutdown
{% endif %}
# Other directives omitted
# (file size)
LimitFSIZE=infinity
# (cpu time)
LimitCPU=infinity
# (virtual memory size)
LimitAS=infinity
# (locked-in-memory size)
LimitMEMLOCK=infinity
# (open files)
LimitNOFILE=64000
# (processes/threads)
LimitNPROC=64000
[Install]
WantedBy=multi-user.target
EOF
sentinel配置文件
cat > /etc/ansible/roles/redis/templates/sentinel.conf << \EOF
# 哨兵sentinel实例运行的端口,默认27011
port {{current_host.sentinel.port}}
# 哨兵sentinel的工作目录
dir {{sentinel.dir}}
# 是否开启保护模式,默认开启。
protected-mode no
# 是否设置为后台启动。
daemonize yes
# 哨兵sentinel的日志文件
logfile {{sentinel.log_file}}
# 哨兵sentinel监控的redis主节点的
## ip:master节点主机ip地址
## port:master节点redis端口号
## master-name:可以自己命名的主节点名字
## quorum:当这些quorum个数sentinel哨兵认为master主节点失联 那么这时
# 客观上认为主节点失联了
# sentinel monitor <master-name> <ip> <redis-port> <quorum>
sentinel monitor {{redis.master_name}} {{master_host.ip}} {{master_host.redis.port}} {{sentinel_monitor_quorum}}
# 当在Redis实例中开启了requirepass,所有连接Redis实例的客户端都要提供密码。
# sentinel auth-pass <master-name> <password>
sentinel auth-pass {{redis.master_name}} {{master_host.redis.password}}
# 指定主节点应答哨兵sentinel的最大时间间隔,超过这个时间,哨兵主观上认为主节点下线,
#默认30秒
# sentinel down-after-milliseconds <master-name> <milliseconds>
sentinel down-after-milliseconds {{redis.master_name}} {{sentinel.down_after_milliseconds}}
# 指定了在发生failover主备切换时,最多可以有多少个slave同时对新的master进行同步。
#这个数字越小,完成failover所需的时间就越长;反之,但是如果这个数字越大,就意味着
#越多的slave因为replication而不可用。可以通过将这个值设为1,来保证每次只有一个slave,
#处于不能处理命令请求的状态。
# sentinel parallel-syncs <master-name> <numslaves>
sentinel parallel-syncs {{redis.master_name}} {{sentinel.parallel_syncs}}
# 故障转移的超时时间failover-timeout,默认三分钟,可以用在以下这些方面:
## 1. 同一个sentinel对同一个master两次failover之间的间隔时间。
## 2. 当一个slave从一个错误的master那里同步数据时开始,直到slave被纠正为从正确
#的master那里同步数据时结束。
## 3. 当想要取消一个正在进行的failover时所需要的时间。
## 4.当进行failover时,配置所有slaves指向新的master所需的最大时间。不过,
#即使过了这个超时,slaves依然会被正确配置为指向master,但是就不按parallel-syncs所配置的规则来同步数据了
# sentinel failover-timeout <master-name> <milliseconds>
sentinel failover-timeout {{redis.master_name}} {{sentinel.failover_timeout}}
{% if not (sentinel.notification_script is undefined or sentinel.notification_script == None or sentinel.notification_script | length == 0) %}
# 当sentinel有任何警告级别的事件发生时(比如说redis实例的主观失效和客观失效等等),
#将会去调用这个脚本。一个脚本的最大执行时间为60s,如果超过这个时间,
#脚本将会被一个SIGKILL信号终止,之后重新执行。
# 对于脚本的运行结果有以下规则:
## 1. 若脚本执行后返回1,那么该脚本稍后将会被再次执行,重复次数目前默认为10。
## 2. 若脚本执行后返回2,或者比2更高的一个返回值,脚本将不会重复执行。
## 3. 如果脚本在执行过程中由于收到系统中断信号被终止了,则同返回值为1时的行为相同。
# sentinel notification-script <master-name> <script-path>
sentinel notification-script {{redis.master_name}} {{sentinel.notification_script}}
{% endif %}
{% if not (sentinel.client_reconfig_script is undefined or sentinel.client_reconfig_script == None or sentinel.client_reconfig_script | length == 0) %}
# 这个脚本应该是通用的,能被多次调用,不是针对性的。
# sentinel client-reconfig-script <master-name> <script-path>
sentinel client-reconfig-script {{redis.master_name}} {{sentinel.client_reconfig_script}}
{% endif %}
EOF
创建安装任务
必填参数校验任务
cat > /etc/ansible/roles/redis/tasks/valid-required-params.yaml << \EOF
- fail:
msg: "安装【redis】时【安装方式-install_from.local】不能为空"
when:
- redis.install_from.local is undefined or redis.install_from.local == None or redis.install_from.local | length == 0
- fail:
msg: "安装【redis】时【安装目录-install_dir】不能为空"
when:
- redis.install_dir is undefined or redis.install_dir == None or redis.install_dir | length == 0
- fail:
msg: "安装【redis】时【压缩包解压后的文件夹名称-uncompress_folder_name】不能为空"
when:
- redis.uncompress_folder_name is undefined or redis.uncompress_folder_name == None or redis.uncompress_folder_name | length == 0
- fail:
msg: "安装【redis】时【服务器列表-hostnames】不能为空"
when: not (hostnames is defined and (hostnames | type_debug == 'list') and (hostnames | count > 0))
EOF
创建变量默认值初始化任务
cat > /etc/ansible/roles/redis/tasks/default-var-init.yaml << \EOF
- name: 初始化 redis 安装方式
set_fact:
redis: "{{ redis | combine({'install_type': 'standalone'}) }}"
when: redis.install_type is undefined or redis.install_type == None or redis.install_type | length == 0
- name: 初始化 redis 集群名称
set_fact:
redis: "{{ redis | combine({'master_name': 'redis_master'}) }}"
when: redis.redis_cluster is undefined or redis.redis_cluster == None or redis.redis_cluster | length == 0
- name: 初始化 redis 数据目录
set_fact:
redis: "{{ redis | combine({'data_dir': redis.install_dir + '/' + redis.uncompress_folder_name + '/data'}) }}"
when: redis.data_dir is undefined or redis.data_dir == None or redis.data_dir | length == 0
- name: 初始化 redis 配置目录
set_fact:
redis: "{{ redis | combine({'config_dir': redis.install_dir + '/' + redis.uncompress_folder_name + '/conf'}) }}"
when: redis.config_dir is undefined or redis.config_dir == None or redis.config_dir | length == 0
- name: 实例化 hostnames 默认属性
set_fact:
temp_hostnames: >-
{{
temp_hostnames | default([]) +
[
item | combine(
{
"redis":{
"port": "7011" if (item.redis.port is undefined or item.redis.port == None or not item.redis.port) else item.redis.port | string
,"password": 'Redis@123456' if (item.redis.password is undefined or item.redis.password == None or item.redis.password | length == 0) else item.redis.password
,"is_master": false | lower | bool if (item.redis.is_master is undefined or item.redis.is_master == None) else item.redis.is_master
,"is_master": true if ((item.redis.is_master is undefined or item.redis.is_master == None) and ansible_loop.first) else false | lower | bool if (item.redis.is_master is undefined or item.redis.is_master == None) else item.redis.is_master
,"master_auth": 'master_auth' if (item.redis.master_auth is undefined or item.redis.master_auth == None or item.redis.master_auth | length == 0) else item.redis.master_auth
}
,"sentinel": {
"port": "27011" if (item.sentinel.port is undefined or item.sentinel.port == None or not item.sentinel.port) else item.sentinel.port | string
}
}
,recursive=True)
]
}}
loop: "{{ hostnames }}"
loop_control:
extended: true
vars:
temp_hostnames: []
- name: 重新给 hostnames 属性赋值
set_fact:
hostnames: "{{temp_hostnames}}"
- name: 获取定义的所有redis主机信息
set_fact:
temp_redis: "{{hostnames | json_query('[].{domain:hostname,ip:ip,port:redis.port | string}') }}"
- name: redis 安装后是否立运行配置
set_fact:
config: "{{ config | combine({'run_after_install': true },recursive=True) }}"
when: config.run_after_install is undefined or config.run_after_install == None or not config.run_after_install
- name: redis 是否开机自启
set_fact:
config: "{{ config | combine({'start_follow_server': true },recursive=True) }}"
when: config.start_follow_server is undefined or config.start_follow_server == None or not config.start_follow_server
- name: redis 安装后运行时使用的用户配置
set_fact:
config: "{{ config | combine({'run_user': {'name': 'root'}},recursive=True) }}"
when: config.run_user.name is undefined or config.run_user.name == None or mongo.install_from.local | length == 0
- name: mongo 安装后运行时使用的用户组配置
set_fact:
config: "{{ config | combine({'run_user': {'group': 'root'}},recursive=True) }}"
when: config.run_user.group is undefined or config.run_user.group == None or mongo.install_from.local | length == 0
- name: 获得当前主机下配置的所有自定义变量
set_fact:
# 获取当前主机自定义的变量
current_host: "{{hostnames | json_query(query_condition) }}"
vars:
# 查询出的结果为一个json数组,取第一条即可
query_condition: "[?hostname=='{{inventory_hostname}}'] | [0]"
- name: 初始化 redis 进程文件
set_fact:
redis: "{{ redis | combine({'pid_file': redis.install_dir + '/' + redis.uncompress_folder_name + '/pid/redis-'+ current_host.redis.port+'.pid'}) }}"
when: redis.pid_file is undefined or redis.pid_file == None or redis.pid_file | length == 0
- name: 初始化 redis 日志文件
set_fact:
redis: "{{ redis | combine({'log_file': redis.install_dir + '/' + redis.uncompress_folder_name + '/logs/redis-'+current_host.redis.port+'.log'}) }}"
when: redis.log_file is undefined or redis.log_file == None or redis.log_file | length == 0
- name: 初始化 redis 设置同一时间最大客户连接数
set_fact:
redis: "{{ redis | combine({'max_clients': 10000}) }}"
when: redis.max_clients is undefined or redis.max_clients == None or not redis.max_clients
- name: 初始化 redis 最大内存
set_fact:
redis: "{{ redis | combine({'max_memory': 0}) }}"
when: redis.max_memory is undefined or redis.max_memory == None or not redis.max_memory
- name: 初始化 redis 淘汰策略
set_fact:
redis: "{{ redis | combine({'max_memory_policy': 'noeviction'}) }}"
when: redis.max_memory_policy is undefined or redis.max_memory_policy == None or redis.max_memory_policy | length == 0
- name: 初始化 redis 是否开启保护模式
set_fact:
redis: "{{ redis | combine({'protected_mode': 'no'}) }}"
when: redis.protected_mode is undefined or redis.protected_mode == None or redis.protected_mode | length == 0
- name: 初始化 redis 是否开启保护模式
set_fact:
redis: "{{ redis | combine({'appendfsync': 'everysec'}) }}"
when: redis.appendfsync is undefined or redis.appendfsync == None or redis.appendfsync | length == 0
- name: 初始化 redis sentinel 工作目录
set_fact:
sentinel: "{{ sentinel | combine({'dir': redis.install_dir + '/' + redis.uncompress_folder_name + '/sentinel'}) }}"
when: sentinel.dir is undefined or sentinel.dir == None or sentinel.dir | length == 0
- name: 初始化 redis sentinel 日志文件
set_fact:
sentinel: "{{ sentinel | combine({'log_file': redis.install_dir + '/' + redis.uncompress_folder_name + '/sentinel/sentinel.log'}) }}"
when: sentinel.log_file is undefined or sentinel.log_file == None or sentinel.log_file | length == 0
- name: 初始化 redis sentinel 指定主节点应答哨兵sentinel的最大时间间隔
set_fact:
sentinel: "{{ sentinel | combine({'down_after_milliseconds': 30000}) }}"
when: sentinel.down_after_milliseconds is undefined or sentinel.down_after_milliseconds == None or sentinel.down_after_milliseconds | length == 0
- name: 初始化 redis sentinel 指定在发生failover主备切换时,最多可以有多少个slave同时对新的master进行同步
set_fact:
sentinel: "{{ sentinel | combine({'parallel_syncs': 1}) }}"
when: sentinel.parallel_syncs is undefined or sentinel.parallel_syncs == None or sentinel.parallel_syncs | length == 0
- name: 初始化 redis sentinel 指定故障转移的超时时间
set_fact:
sentinel: "{{ sentinel | combine({'failover_timeout': 18000}) }}"
when: sentinel.failover_timeout is undefined or sentinel.failover_timeout == None or sentinel.failover_timeout | length == 0
EOF
redis安装任务
cat > /etc/ansible/roles/redis/tasks/install-standalone-redis.yaml << \EOF
- name: 判断【redis】【安装包目录】是否存在,不存在自动创建
file:
path: "{{redis.install_from.local | dirname}}"
state: directory
when: not (redis.install_from.local is undefined or redis.install_from.local == None or redis.install_from.local | length == 0)
- name: 判断【redis】【安装目录】是否存在,不存在自动创建
file:
path: "{{redis.install_dir}}"
state: directory
when: not (redis.install_dir is undefined or redis.install_dir == None or redis.install_dir | length == 0)
- name: 复制【redis】【安装包到目标主机】
copy:
src: "{{redis.install_from.local}}"
dest: "{{redis.install_from.local}}"
backup: yes
when: not (redis.install_from.local is undefined or redis.install_from.local == None or redis.install_from.local | length == 0)
- name: 解压【redis装包】
shell:
cd {{redis.install_from.local | dirname }}
&& tar -zxvf {{redis.install_from.local}} -C {{redis.install_dir}}
when: not (redis.install_from.local is undefined or redis.install_from.local == None or redis.install_from.local | length == 0)
- name: 判断【redis】【配置目录】是否存在,不存在自动创建
file:
path: "{{redis.config_dir}}"
state: directory
when: not (redis.config_dir is undefined or redis.config_dir == None or redis.config_dir | length == 0)
- name: 判断【redis】【数据目录】是否存在,不存在自动创建
file:
path: "{{redis.data_dir}}"
state: directory
when: not (redis.data_dir is undefined or redis.data_dir == None or redis.data_dir | length == 0)
- name: 判断【redis】【进程目录】是否存在,不存在自动创建
file:
path: "{{redis.pid_file | dirname}}"
state: directory
when: not (redis.pid_file is undefined or redis.pid_file == None or redis.pid_file | length == 0)
- name: 判断【redis】【进程文件】是否存在,不存在自动创建
file:
path: "{{redis.pid_file}}"
state: touch
mode: '600'
when: not (redis.pid_file is undefined or redis.pid_file == None or redis.pid_file | length == 0)
- name: 判断【redis】【日志目录】是否存在,不存在自动创建
file:
path: "{{redis.log_file | dirname}}"
state: directory
when: not (redis.log_file is undefined or redis.log_file == None or redis.log_file | length == 0)
- name: 判断【redis】【日志文件】是否存在,不存在自动创建
file:
path: "{{redis.log_file}}"
state: touch
mode: '600'
when: not (redis.log_file is undefined or redis.log_file == None or redis.log_file | length == 0)
- name: 从【redis】【单机版配置模板】生成默认配置并将文件到目标主机
template:
src: "redis.conf"
dest: "{{redis.config_dir}}/redis-{{current_host.redis.port}}.conf"
backup: yes
- name: 从【redis】【服务自启模板】生成默认配置并将文件到目标主机
template:
src: "redis.service"
dest: "/etc/systemd/system/redis-{{current_host.redis.port}}.service"
backup: yes
- name: 配置【redis】【是否开机自启】
shell: "systemctl enable redis-{{current_host.redis.port}}.service"
when: config.start_follow_server
- name: 重载【redis】【环境变量】
shell: systemctl daemon-reload
- name: 重启【redis】
service:
name: "redis-{{current_host.redis.port}}"
state: started
when: config.run_after_install
EOF
redis集群版安装任务
cat > /etc/ansible/roles/redis/tasks/install-cluster-redis.yaml << \EOF
- name: 判断【redis】【集群配置文件】是否存在,不存在自动创建
file:
path: "{{redis.config_dir}}/cluster-{{current_host.redis.port}}.conf"
state: touch
when: redis.install_type == 'cluster'
- name: 获取 redis master 节点主机
set_fact:
# 获取 master 节点主机信息
master_host: "{{hostnames | json_query(query_condition) }}"
vars:
# 查询出的结果为一个 json 数组,取第一条即可
query_condition: "[?redis.is_master==`true`] | [0]"
when: redis.install_type == 'cluster'
- name: 设置 redis 主节点 密码说明
lineinfile:
path: "{{redis.config_dir}}/redis-{{current_host.redis.port}}.conf"
line: "# master主节点密码"
state: present
insertafter: EOF
when: redis.install_type == 'cluster'
- name: 设置 redis 主节点 密码
lineinfile:
path: "{{redis.config_dir}}/redis-{{current_host.redis.port}}.conf"
line: "masterauth {{master_host.redis.password}}"
state: present
insertafter: EOF
when: redis.install_type == 'cluster'
- name: 从节点 设置 redis 主节点 ip、端口说明
lineinfile:
path: "{{redis.config_dir}}/redis-{{current_host.redis.port}}.conf"
line: "# master 主节点ip、端口"
state: present
insertafter: EOF
when:
- redis.install_type == 'cluster'
- not current_host.redis.is_master
- name: 从节点 设置 redis 主节点 主节点 ip、端口
lineinfile:
path: "{{redis.config_dir}}/redis-{{current_host.redis.port}}.conf"
line: "replicaof {{master_host.ip}} {{master_host.redis.port}}"
state: present
insertafter: EOF
when:
- redis.install_type == 'cluster'
- not current_host.redis.is_master
- name: 删除【redis】从节点的 aof 文件
shell: rm -rf {{redis.install_dir}}/{{redis.uncompress_folder_name}}/appendonly.aof
when:
- redis.install_type == 'cluster'
- not current_host.redis.is_master
- name: 重启【redis】节点
service:
name: "redis-{{current_host.redis.port}}"
state: restarted
when:
- redis.install_type == 'cluster'
EOF
sentinel安装任务
cat > /etc/ansible/roles/redis/tasks/install-sentinel.yaml << \EOF
- name: 判断【sentinel】【配置目录】是否存在,不存在自动创建
file:
path: "{{sentinel.dir}}"
state: directory
when:
- not (sentinel.dir is undefined or sentinel.dir == None or sentinel.dir | length == 0)
- redis.install_type == 'cluster'
- name: 判断【sentinel】【日志文件】是否存在,不存在自动创建
file:
path: "{{sentinel.log_file}}"
state: touch
when:
- not (sentinel.log_file is undefined or sentinel.log_file == None or sentinel.log_file | length == 0)
- redis.install_type == 'cluster'
- name: 判断【sentinel】【中非主节点的数量】
set_fact:
# 获取 非 master 节点主机数量并-1信息
sentinel_monitor_quorum: "{{hostnames | json_query(query_condition) | length - 1 }}"
vars:
query_condition: "[?redis.is_master==`false`]"
when: redis.install_type == 'cluster'
- name: print val
debug:
msg: "{{sentinel_monitor_quorum}}"
when: redis.install_type == 'cluster'
- name: 从【sentinel】【配置文件】生成默认配置并将文件到目标主机
template:
src: "sentinel.conf"
dest: "{{sentinel.dir}}/sentinel.conf"
backup: yes
when: redis.install_type == 'cluster'
- name: 启动【sentinel】
shell: |
{{redis.install_dir}}/{{redis.uncompress_folder_name}}/src/redis-sentinel {{sentinel.dir}}/sentinel.conf
when: redis.install_type == 'cluster'
EOF
汇总安装任务
cat > /etc/ansible/roles/redis/tasks/main.yaml << \EOF
# 必填参数校验
- import_tasks: valid-required-params.yaml
# 默认变量处理任务
- import_tasks: default-var-init.yaml
# redis 单机版安装任务
- import_tasks: install-standalone-redis.yaml
# redis 集群版安装任务
- import_tasks: install-cluster-redis.yaml
# sentinel 安装任务
- import_tasks: install-sentinel.yaml
EOF
创建执行文件
cat > /etc/ansible/install-redis.yaml << \EOF
---
- hosts: redis
roles:
- redis
EOF
执行任务脚本
ansible-playbook /etc/ansible/install-redis.yaml
测试脚本
集群信息查看
# redis集群信息查看
/home/zhoujibin/redis-5.0.7/src/redis-cli -p 7011 -a Redis@123456 info replication
# sentinel信息查看
/home/zhoujibin/redis-5.0.7/src/redis-cli -p 27011 -a Redis@123456 info sentinel
数据同步验证
# 主节点
/home/zhoujibin/redis-5.0.7/src/redis-cli -p 7011 -a Redis@123456 set name test
# 从节点查看数据
/home/zhoujibin/redis-5.0.7/src/redis-cli -p 7011 -a Redis@123456 get name
!!!删除 redis(仅在测试时使用)
service redis-7011 stop && rm -rf /home/zhoujibin/redis-5.0.7
取消开机自启
systemctl disable redis-7011