上一篇文章分享了glance镜像服务的安装配置,本文主要分享openstack的计算服务Nova的安装和配制方法
------------------ 完美的分割线 ---------------------
nova相关端口:
api:8774
metadata:8775
novncproxy:6080
4.1.在控制节点安装nova计算服务
1)创建nova相关数据库
# nova服务在本版本新增加了两个数据库,需要注意
mysql -u root -p123456
-----------------------------------
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
CREATE DATABASE placement; GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova'; GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova'; GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'placement';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement'; flush privileges;
show databases;
select user,host from mysql.user;
exit
----------------------------------
4.2.在keystone上面注册nova服务
# 创建服务证书
1)在keystone上创建nova用户
cd /server/tools
source keystone-admin-pass.sh
openstack user create --domain default --password=nova nova
openstack user list
2)在keystone上将nova用户配置为admin角色并添加进service项目
# 以下命令无输出
openstack role add --project service --user nova admin
3)创建nova计算服务的实体
openstack service create --name nova --description "OpenStack Compute" compute
openstack service list
4)创建计算服务的API端点(endpoint)
# 计算服务compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
openstack endpoint list
5)这个版本的nova增加了placement项目
# 同样,创建并注册该项目的服务证书
openstack user create --domain default --password=placement placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
# 创建placement项目的endpoint(API端口)
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
openstack endpoint list
# 完毕
4.3.在控制节点安装nova相关服务
1)安装nova相关软件包
yum install openstack-nova-api openstack-nova-conductor \
openstack-nova-console openstack-nova-novncproxy \
openstack-nova-scheduler openstack-nova-placement-api -y
2)快速修改nova配置
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.1.81
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron true
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:openstack@controller
openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:nova@controller/nova_api
openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:nova@controller/nova
openstack-config --set /etc/nova/nova.conf placement_database connection mysql+pymysql://placement:placement@controller/placement
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:5000/v3
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password nova
openstack-config --set /etc/nova/nova.conf vnc enabled true
openstack-config --set /etc/nova/nova.conf vnc server_listen '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc server_proxyclient_address '$my_ip'
openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf placement region_name RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
openstack-config --set /etc/nova/nova.conf placement project_name service
openstack-config --set /etc/nova/nova.conf placement auth_type password
openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:5000/v3
openstack-config --set /etc/nova/nova.conf placement username placement
openstack-config --set /etc/nova/nova.conf placement password placement
openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300
# 默认情况下,计算服务使用内置的防火墙服务。由于网络服务包含了防火墙服务,必须使用``nova.virt.firewall.NoopFirewallDriver``防火墙服务来禁用掉计算服务内置的防火墙服务
# 检查生效的nova配置
egrep -v "^#|^$" /etc/nova/nova.conf
# 实例演示:
[root@openstack01 tools]# egrep -v "^#|^$" /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
my_ip = 192.168.1.81
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
transport_url = rabbit://openstack:openstack@controller
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:nova@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[database]
connection = mysql+pymysql://nova:nova@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
[placement_database]
connection = mysql+pymysql://placement:placement@controller/placement
[powervm]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300 # 服务端的计算节点多久去检查一次新加入的host主机信息,可以自动将安装好的计算节点主机加入集群
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen =$my_ip
server_proxyclient_address =$my_ip
[workarounds]
[wsgi]
[xenserver]
[xvp]
[zvm]
# 以上是生效的配置
3)修改nova的虚拟主机配置文件
# 由于有个包的bug需要配置修改文件,需要修改nova虚拟主机配置文件,增加内容,完整的文件内容如下:
vim /etc/httpd/conf.d/00-nova-placement-api.conf
-----------------------------------
Listen 8778 <VirtualHost *:8778>
WSGIProcessGroup nova-placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
WSGIScriptAlias / /usr/bin/nova-placement-api
<IfVersion >= 2.4>
ErrorLogFormat "%M"
</IfVersion>
ErrorLog /var/log/nova/nova-placement-api.log
#SSLEngine On
#SSLCertificateFile ...
#SSLCertificateKeyFile ...
</VirtualHost> Alias /nova-placement-api /usr/bin/nova-placement-api
<Location /nova-placement-api>
SetHandler wsgi-script
Options +ExecCGI
WSGIProcessGroup nova-placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>
# made by zhaoshuai
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
-------------------------------------
# 修改完毕重启httpd服务
systemctl restart httpd
systemctl status httpd
# 实例演示:
[root@openstack01 conf.d]# systemctl restart httpd
[root@openstack01 conf.d]# systemctl status httpd
● httpd.service - The Apache HTTP Server
Loaded: loaded (/usr/lib/systemd/system/httpd.service; enabled; vendor preset: disabled)
Active: active (running) since 一 2018-10-29 13:56:03 CST; 134ms ago
Docs: man:httpd(8)
man:apachectl(8)
Process: 55849 ExecStop=/bin/kill -WINCH ${MAINPID} (code=exited, status=0/SUCCESS)
Main PID: 55861 (httpd)
Status: "Processing requests..."
CGroup: /system.slice/httpd.service
├─55861 /usr/sbin/httpd -DFOREGROUND
├─55862 /usr/sbin/httpd -DFOREGROUND
├─55863 /usr/sbin/httpd -DFOREGROUND
├─55864 /usr/sbin/httpd -DFOREGROUND
├─55865 (wsgi:keystone- -DFOREGROUND
├─55866 (wsgi:keystone- -DFOREGROUND
├─55867 (wsgi:keystone- -DFOREGROUND
├─55868 (wsgi:keystone- -DFOREGROUND
├─55869 (wsgi:keystone- -DFOREGROUND
├─55870 /usr/sbin/httpd -DFOREGROUND
├─55871 /usr/sbin/httpd -DFOREGROUND
├─55873 /usr/sbin/httpd -DFOREGROUND
├─55874 /usr/sbin/httpd -DFOREGROUND
└─55875 /usr/sbin/httpd -DFOREGROUND 10月 29 13:56:03 openstack01.zuiyoujie.com systemd[1]: Starting The Apache HTTP Server...
10月 29 13:56:03 openstack01.zuiyoujie.com systemd[1]: Started The Apache HTTP Server.
# 至此,nova计算服务的软件包安装完成
4.4.同步nova数据(注意同步顺序)
# nova_api有32张表,placement有32张表,nova_cell0有110张表,nova也有110张表
1)初始化nova-api和placement数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
# 验证数据库
mysql -h192.168.1.81 -unova -pnova -e "use nova_api;show tables;"
mysql -h192.168.1.81 -uplacement -pplacement -e "use placement;show tables;"
# 实例演示:
[root@openstack01 tools]# su -s /bin/sh -c "nova-manage api_db sync" nova
[root@openstack01 tools]# mysql -h192.168.1.81 -unova -pnova -e "use nova_api;show tables;"
+------------------------------+
| Tables_in_nova_api |
+------------------------------+
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
| build_requests |
| cell_mappings |
| consumers |
| flavor_extra_specs |
| flavor_projects |
| flavors |
| host_mappings |
| instance_group_member |
| instance_group_policy |
| instance_groups |
| instance_mappings |
| inventories |
| key_pairs |
| migrate_version |
| placement_aggregates |
| project_user_quotas |
| projects |
| quota_classes |
| quota_usages |
| quotas |
| request_specs |
| reservations |
| resource_classes |
| resource_provider_aggregates |
| resource_provider_traits |
| resource_providers |
| traits |
| users |
+------------------------------+
[root@openstack01 tools]# mysql -h192.168.1.81 -uplacement -pplacement -e "use placement;show tables;"
+------------------------------+
| Tables_in_placement |
+------------------------------+
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
| build_requests |
| cell_mappings |
| consumers |
| flavor_extra_specs |
| flavor_projects |
| flavors |
| host_mappings |
| instance_group_member |
| instance_group_policy |
| instance_groups |
| instance_mappings |
| inventories |
| key_pairs |
| migrate_version |
| placement_aggregates |
| project_user_quotas |
| projects |
| quota_classes |
| quota_usages |
| quotas |
| request_specs |
| reservations |
| resource_classes |
| resource_provider_aggregates |
| resource_provider_traits |
| resource_providers |
| traits |
| users |
+------------------------------+
# 通过对比可知,nova_api和placement都有32张表,区别在于nova_api数据库的cell_mappings表多两条数据,存放的是nova和rabbitmq等的配置信息
2)初始化nova_cell0和nova数据库
# 注册cell0数据库
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
# 创建cell1单元
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
# 初始化nova数据库
su -s /bin/sh -c "nova-manage db sync" nova
# 检查确认cell0和cell1注册成功
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
# 验证数据库
mysql -h192.168.1.81 -unova -pnova -e "use nova_cell0;show tables;"
mysql -h192.168.1.81 -unova -pnova -e "use nova;show tables;"
# 实例演示:
[root@openstack01 tools]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
[root@openstack01 tools]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
c078477e-cb43-40c9-ad8b-a9fde183747d
[root@openstack01 tools]# su -s /bin/sh -c "nova-manage db sync" nova # 这里遇到两个警告信息,不是很严重,后续版本会修复,再重新执行一下就不会报了
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1831, u'Duplicate index `block_device_mapping_instance_uuid_virtual_name_device_name_idx`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1831, u'Duplicate index `uniq_instances0uuid`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
[root@openstack01 tools]# mysql -h192.168.1.81 -unova -pnova -e "use nova_cell0;show tables;"
+--------------------------------------------+
| Tables_in_nova_cell0 |
+--------------------------------------------+
| agent_builds |
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
| block_device_mapping |
| bw_usage_cache |
| cells |
| certificates |
| compute_nodes |
| console_auth_tokens |
| console_pools |
| consoles |
| dns_domains |
| fixed_ips |
| floating_ips |
| instance_actions |
| instance_actions_events |
| instance_extra |
| instance_faults |
| instance_group_member |
| instance_group_policy |
| instance_groups |
| instance_id_mappings |
| instance_info_caches |
| instance_metadata |
| instance_system_metadata |
| instance_type_extra_specs |
| instance_type_projects |
| instance_types |
| instances |
| inventories |
| key_pairs |
| migrate_version |
| migrations |
| networks |
| pci_devices |
| project_user_quotas |
| provider_fw_rules |
| quota_classes |
| quota_usages |
| quotas |
| reservations |
| resource_provider_aggregates |
| resource_providers |
| s3_images |
| security_group_default_rules |
| security_group_instance_association |
| security_group_rules |
| security_groups |
| services |
| shadow_agent_builds |
| shadow_aggregate_hosts |
| shadow_aggregate_metadata |
| shadow_aggregates |
| shadow_block_device_mapping |
| shadow_bw_usage_cache |
| shadow_cells |
| shadow_certificates |
| shadow_compute_nodes |
| shadow_console_pools |
| shadow_consoles |
| shadow_dns_domains |
| shadow_fixed_ips |
| shadow_floating_ips |
| shadow_instance_actions |
| shadow_instance_actions_events |
| shadow_instance_extra |
| shadow_instance_faults |
| shadow_instance_group_member |
| shadow_instance_group_policy |
| shadow_instance_groups |
| shadow_instance_id_mappings |
| shadow_instance_info_caches |
| shadow_instance_metadata |
| shadow_instance_system_metadata |
| shadow_instance_type_extra_specs |
| shadow_instance_type_projects |
| shadow_instance_types |
| shadow_instances |
| shadow_key_pairs |
| shadow_migrate_version |
| shadow_migrations |
| shadow_networks |
| shadow_pci_devices |
| shadow_project_user_quotas |
| shadow_provider_fw_rules |
| shadow_quota_classes |
| shadow_quota_usages |
| shadow_quotas |
| shadow_reservations |
| shadow_s3_images |
| shadow_security_group_default_rules |
| shadow_security_group_instance_association |
| shadow_security_group_rules |
| shadow_security_groups |
| shadow_services |
| shadow_snapshot_id_mappings |
| shadow_snapshots |
| shadow_task_log |
| shadow_virtual_interfaces |
| shadow_volume_id_mappings |
| shadow_volume_usage_cache |
| snapshot_id_mappings |
| snapshots |
| tags |
| task_log |
| virtual_interfaces |
| volume_id_mappings |
| volume_usage_cache |
+--------------------------------------------+
[root@openstack01 tools]# mysql -h192.168.1.81 -unova -pnova -e "use nova;show tables;"
+--------------------------------------------+
| Tables_in_nova |
+--------------------------------------------+
| agent_builds |
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
| block_device_mapping |
| bw_usage_cache |
| cells |
| certificates |
| compute_nodes |
| console_auth_tokens |
| console_pools |
| consoles |
| dns_domains |
| fixed_ips |
| floating_ips |
| instance_actions |
| instance_actions_events |
| instance_extra |
| instance_faults |
| instance_group_member |
| instance_group_policy |
| instance_groups |
| instance_id_mappings |
| instance_info_caches |
| instance_metadata |
| instance_system_metadata |
| instance_type_extra_specs |
| instance_type_projects |
| instance_types |
| instances |
| inventories |
| key_pairs |
| migrate_version |
| migrations |
| networks |
| pci_devices |
| project_user_quotas |
| provider_fw_rules |
| quota_classes |
| quota_usages |
| quotas |
| reservations |
| resource_provider_aggregates |
| resource_providers |
| s3_images |
| security_group_default_rules |
| security_group_instance_association |
| security_group_rules |
| security_groups |
| services |
| shadow_agent_builds |
| shadow_aggregate_hosts |
| shadow_aggregate_metadata |
| shadow_aggregates |
| shadow_block_device_mapping |
| shadow_bw_usage_cache |
| shadow_cells |
| shadow_certificates |
| shadow_compute_nodes |
| shadow_console_pools |
| shadow_consoles |
| shadow_dns_domains |
| shadow_fixed_ips |
| shadow_floating_ips |
| shadow_instance_actions |
| shadow_instance_actions_events |
| shadow_instance_extra |
| shadow_instance_faults |
| shadow_instance_group_member |
| shadow_instance_group_policy |
| shadow_instance_groups |
| shadow_instance_id_mappings |
| shadow_instance_info_caches |
| shadow_instance_metadata |
| shadow_instance_system_metadata |
| shadow_instance_type_extra_specs |
| shadow_instance_type_projects |
| shadow_instance_types |
| shadow_instances |
| shadow_key_pairs |
| shadow_migrate_version |
| shadow_migrations |
| shadow_networks |
| shadow_pci_devices |
| shadow_project_user_quotas |
| shadow_provider_fw_rules |
| shadow_quota_classes |
| shadow_quota_usages |
| shadow_quotas |
| shadow_reservations |
| shadow_s3_images |
| shadow_security_group_default_rules |
| shadow_security_group_instance_association |
| shadow_security_group_rules |
| shadow_security_groups |
| shadow_services |
| shadow_snapshot_id_mappings |
| shadow_snapshots |
| shadow_task_log |
| shadow_virtual_interfaces |
| shadow_volume_id_mappings |
| shadow_volume_usage_cache |
| snapshot_id_mappings |
| snapshots |
| tags |
| task_log |
| virtual_interfaces |
| volume_id_mappings |
| volume_usage_cache |
+--------------------------------------------+
# 通过对比可知,这两个数据库的表目前完全一样,区别在于nova数据库的service表中有4条数据,存放的是当前版本nova相关服务的注册信息
5)检查确认cell0和cell1注册成功
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
# 实例演示:
[root@openstack01 conf.d]# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
+-------+--------------------------------------+------------------------------------+-------------------------------------------------+----------+
| 名称 | UUID | Transport URL | 数据库连接 | Disabled |
+-------+--------------------------------------+------------------------------------+-------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0 | False |
| cell1 | c078477e-cb43-40c9-ad8b-a9fde183747d | rabbit://openstack:****@controller | mysql+pymysql://nova:****@controller/nova | False |
+-------+--------------------------------------+------------------------------------+-------------------------------------------------+----------+
# 返回的数据存储在nova_api数据库的cell_mappings表中
4.5.启动nova服务
1)启动nova服务并设置为开机自启动
# 需要启动5个服务
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service \
openstack-nova-scheduler.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service systemctl status openstack-nova-api.service openstack-nova-consoleauth.service \
openstack-nova-scheduler.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service \
openstack-nova-scheduler.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service systemctl list-unit-files |grep openstack-nova* |grep enabled
# 实例演示:
[root@openstack01 conf.d]# systemctl start openstack-nova-api.service \
> openstack-nova-scheduler.service openstack-nova-conductor.service \
> openstack-nova-novncproxy.service
[root@openstack01 conf.d]# systemctl status openstack-nova-api.service \
> openstack-nova-scheduler.service openstack-nova-conductor.service \
> openstack-nova-novncproxy.service
● openstack-nova-api.service - OpenStack Nova API Server
Loaded: loaded (/usr/lib/systemd/system/openstack-nova-api.service; disabled; vendor preset: disabled)
Active: active (running) since 一 2018-10-29 14:30:22 CST; 6s ago
Main PID: 56510 (nova-api)
CGroup: /system.slice/openstack-nova-api.service
├─56510 /usr/bin/python2 /usr/bin/nova-api
├─56562 /usr/bin/python2 /usr/bin/nova-api
└─56564 /usr/bin/python2 /usr/bin/nova-api 10月 29 14:30:06 openstack01.zuiyoujie.com systemd[1]: Starting OpenStack Nova API Server...
10月 29 14:30:22 openstack01.zuiyoujie.com systemd[1]: Started OpenStack Nova API Server. ● openstack-nova-scheduler.service - OpenStack Nova Scheduler Server
Loaded: loaded (/usr/lib/systemd/system/openstack-nova-scheduler.service; disabled; vendor preset: disabled)
Active: active (running) since 一 2018-10-29 14:30:21 CST; 8s ago
Main PID: 56511 (nova-scheduler)
CGroup: /system.slice/openstack-nova-scheduler.service
└─56511 /usr/bin/python2 /usr/bin/nova-scheduler 10月 29 14:30:06 openstack01.zuiyoujie.com systemd[1]: Starting OpenStack Nova Scheduler Server...
10月 29 14:30:21 openstack01.zuiyoujie.com systemd[1]: Started OpenStack Nova Scheduler Server. ● openstack-nova-conductor.service - OpenStack Nova Conductor Server
Loaded: loaded (/usr/lib/systemd/system/openstack-nova-conductor.service; disabled; vendor preset: disabled)
Active: active (running) since 一 2018-10-29 14:30:19 CST; 9s ago
Main PID: 56512 (nova-conductor)
CGroup: /system.slice/openstack-nova-conductor.service
└─56512 /usr/bin/python2 /usr/bin/nova-conductor 10月 29 14:30:06 openstack01.zuiyoujie.com systemd[1]: Starting OpenStack Nova Conductor Server...
10月 29 14:30:19 openstack01.zuiyoujie.com systemd[1]: Started OpenStack Nova Conductor Server. ● openstack-nova-novncproxy.service - OpenStack Nova NoVNC Proxy Server
Loaded: loaded (/usr/lib/systemd/system/openstack-nova-novncproxy.service; disabled; vendor preset: disabled)
Active: active (running) since 一 2018-10-29 14:30:06 CST; 22s ago
Main PID: 56513 (nova-novncproxy)
CGroup: /system.slice/openstack-nova-novncproxy.service
└─56513 /usr/bin/python2 /usr/bin/nova-novncproxy --web /usr/share/novnc/ 10月 29 14:30:06 openstack01.zuiyoujie.com systemd[1]: Started OpenStack Nova NoVNC Proxy Server.
10月 29 14:30:06 openstack01.zuiyoujie.com systemd[1]: Starting OpenStack Nova NoVNC Proxy Server...
[root@openstack01 conf.d]# systemctl enable openstack-nova-api.service \
> openstack-nova-scheduler.service openstack-nova-conductor.service \
> openstack-nova-novncproxy.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-api.service to /usr/lib/systemd/system/openstack-nova-api.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-scheduler.service to /usr/lib/systemd/system/openstack-nova-scheduler.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-conductor.service to /usr/lib/systemd/system/openstack-nova-conductor.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-novncproxy.service to /usr/lib/systemd/system/openstack-nova-novncproxy.service.
[root@openstack01 conf.d]# systemctl list-unit-files |grep openstack-nova* |grep enabled
openstack-nova-api.service enabled
openstack-nova-conductor.service enabled
openstack-nova-novncproxy.service enabled
openstack-nova-scheduler.service enabled
# 至此,在控制节点安装nova计算服务就完成了,下篇文章介绍独立的nova计算节点的安装方法
======== 完毕,呵呵呵呵 ========