1.新增一台VMware虚拟机
1.1环境如下
主机名称 IP地址 服务 描述
compute2 eth0:118.190.201.32 计算节点 1VCPU 2G内存 一块硬盘sda50G(动态扩展)
备注:安装的时候网卡为eth0 eth1
1.2开启虚拟化新加一块硬盘

1.3关闭不需要的服务
[root@compute2 ~]# getenforce Disabled [root@compute2 ~]# systemctl status firewalld.service ● firewalld.service - firewalld - dynamic firewall daemon Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled) Active: inactive (dead)
1.4域名解析
[root@compute2 ~]# scp 118.190.201.11:/etc/hosts /etc/hosts [root@compute2 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 118.190.201.11 controller 118.190.201.32 compute2
1.5更改YUM源
cd /etc/yum.repos.d/ ls mkdir test mv *.repo test/ vim openstack.repo [openstack] name=openstack baseurl=http://118.190.201.38/repo/ gpgcheck=0 ##执行如下命令 ~]# yum makecache #缓存到本地 Loaded plugins: fastestmirror openstack | 2.9 kB 00:00:00 (1/3): openstack/filelists_db | 465 kB 00:00:00 (2/3): openstack/other_db | 211 kB 00:00:00 (3/3): openstack/primary_db | 398 kB 00:00:00 Loading mirror speeds from cached hostfile Metadata Cache Created
1.6时间同步
[root@compute2 yum.repos.d]# yum install -y chrony #同步控制节点 [root@compute2 yum.repos.d]# sed -i '3s#.*#server controller iburst#;4d;5d;6d' /etc/chrony.conf #设置开机自启动 systemctl enable chronyd.service systemctl start chronyd.service
2.下载OpenStack组件包
yum install python-openstackclient -y yum install openstack-selinux -y yum install openstack-nova-compute -y yum install openstack-neutron-linuxbridge ebtables ipset -y
2.1编辑配置文件
[root@compute2 ~]# cp /etc/nova/nova.conf{,.bak}
##nova.conf完整配置文件内容
[root@compute2 ~]# cat /etc/nova/nova.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 118.190.201.32
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[conductor]
[cors]
[cors.subdomain]
[database]
[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = NOVA_PASS
[libvirt]
[matchmaker_redis]
[metrics]
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS
[oslo_middleware]
[oslo_policy]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[xenserver]
[root@compute2 ~]# cat /etc/nova/nova.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 118.190.201.32
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[conductor]
[cors]
[cors.subdomain]
[database]
[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = openstack
[oslo_middleware]
[oslo_policy]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[xenserver]
2.2将计算节点添加到元数据库
在 controller 节点上运行以下命令。
1.source admin 凭据启用 admin 专用 CLI 命令,然后确认数据库中有计算主机:
[root@controller ~]# source admin-openrc
当您添加新的计算节点时,您必须在控制节点上运行 nova manage cell_v2 discover_hosts 来注册这些新的计算节点。或者,您可以在 /etc/nova/nova.conf 中设置适当的间隔:
发现计算主机
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
修改网络服务配置文件
[root@compute2 ~]# cp /etc/neutron/neutron.conf{,.bak}
[root@compute2 ~]# vim /etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
[agent]
[cors]
[cors.subdomain]
[database]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = openstack
[oslo_policy]
[qos]
[quotas]
[ssl]
2.3启动服务
#计算服务 systemctl enable libvirtd.service openstack-nova-compute.service systemctl start libvirtd.service openstack-nova-compute.service #网络服务 systemctl enable neutron-linuxbridge-agent.service systemctl start neutron-linuxbridge-agent.service #控制节点执行命名验证 [root@controller ~]# source admin-openrc [root@controller ~]# nova service-list +----+------------------+------------+----------+---------+-------+----------------------------+-----------------+ | Id | Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | +----+------------------+------------+----------+---------+-------+----------------------------+-----------------+ | 1 | nova-consoleauth | controller | internal | enabled | up | 2018-07-22T05:34:12.000000 | - | | 2 | nova-scheduler | controller | internal | enabled | up | 2018-07-22T05:34:13.000000 | - | | 3 | nova-conductor | controller | internal | enabled | up | 2018-07-22T05:34:12.000000 | - | | 7 | nova-compute | compute1 | nova | enabled | down | 2018-07-20T19:29:21.000000 | - | | 8 | nova-compute | compute2 | nova | enabled | up | 2018-07-22T05:34:08.000000 | - | +----+------------------+------------+----------+---------+-------+----------------------------+-----------------+ #验证网络服务 [root@controller ~]# neutron agent-list #添加虚机实例检查 [root@compute2 ~]# virsh list
设置从那个主机创建实例,验证计算计算节点及资源管理

添加到可以域下次选择域创建实例

Get busy living or get busy dying. 努力活出精彩的人生,否则便如行尸走肉
如无特殊说明,文章均为本站原创,转载请注明出处
- 转载请注明来源:OpenStack系列八之增加计算节点compute
- 本文永久链接地址:https://www.xionghaier.cn/archives/558.html
该文章由 John 发布
这货来去如风,什么鬼都没留下!!!

