openstack--部暑

##1.Centos7环境准备-- openstack pike

[https://blog.csdn.net/shiyu1157758655/article/category/7063423]

##1.Centos7环境准备

#Centos 7 x86_64

#安装

yum -y install wget vim ntp net-tools tree openssh

#更换阿里源

mv /etc/yum.repos.d/CentOS-Base.repo{,.bak}

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

yum install centos-release-openstack-pike -y #安装OpenStack库

yum clean all && yum makecache #生成缓存

yum install python-openstackclient openstack-selinux python2-PyMySQL -y #OpenStack客户端

yum install openstack-utils -y #openstack工具

#关闭selinux、防火墙

systemctl stop firewalld.service

systemctl disable firewalld.service

firewall-cmd --state

sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config

sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config

grep --color=auto '^SELINUX' /etc/selinux/config

setenforce 0

#设置hostname

Host=controller.www.local

hostnamectl set-hostname $Host

# hostname $Host

# echo $Host>/etc/hostname

#设置网卡(Vlan、bond等),规划好IP地址
#controller节点需要外网IP(使用其它网段IP地址)作为VNC代理

#hosts添加 ,controller也要添加

echo
"10.2.1.20   controller">>/etc/hosts

echo
"10.2.1.21   computer01">>/etc/hosts

echo
"10.2.1.22   computer02">>/etc/hosts

#查看本机IP

ip add|sed
-nr  's#^.*inet (.*)/24.*$#\1#gp'

#时间同步

/usr/sbin/ntpdate ntp6.aliyun.com

echo
"*/3 * * * * /usr/sbin/ntpdate
ntp6.aliyun.com  &>
/dev/null" > /tmp/crontab

crontab /tmp/crontab

#升级,重启

yum
update -y  && reboot

yum install mariadb mariadb-server python2-PyMySQL -y
echo "#
[mysqld]
bind-address = 0.0.0.0
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
#">/etc/my.cnf.d/openstack.cnf
 
#启动数据库服务
systemctl enable mariadb.service
systemctl start mariadb.service
netstat -antp|grep mysqld
#mysql_secure_installation #初始化设置密码,自动交互
#数据库配置,创建数据库、用户授权
#mysql -u root -p 
create database keystone;
create database nova;
create database cinder;
create database neutron;
create database nova_aip;
create database glance;
 
授权:
grant all privileges on keystone.* to 'keystone'@'localhost' identified by 'keystone';
grant all privileges on keystone.* to 'keystone'@'%' identified by 'keystone';
create database glance;
grant all privileges on glance.* to 'glance'@'localhost' identified by 'glance';
grant all privileges on glance.* to 'glance'@'%' identified by 'glance';
 
database nova;
grant all privileges on nova.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova.* to 'nova'@'%' identified by 'nova';
nova_api;
grant all privileges on nova_api.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova_api.* to 'nova'@'%' identified by 'nova';
create database nova_cell0;
grant all privileges on nova_cell0.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova_cell0.* to 'nova'@'%' identified by 'nova';
 
neutron;
grant all privileges on neutron.* to 'neutron'@'localhost' identified by 'neutron';
grant all privileges on neutron.* to 'neutron'@'%' identified by 'neutron';
 
cinder;
grant all privileges on cinder.* to 'cinder'@'localhost' identified by 'cinder';
 grant all privileges on cinder.* to 'cinder'@'%' identified by 'cinder';
 
flush privileges;
select user,host from mysql.user;
show databases
 
验证:这一步一定一个一个都要试
[root@node01 ~ ]#mysql -u nova –pnova
show databases;
[root@node01 ~ ]#mysql -u glance –pglance
show databases;
[root@node01 ~ ]#mysql -u neutron –pneutron
show databases;
[root@node01 ~ ]#mysql -u cinder –pcinder
show databases;
 
 
 
#RabbitMQ #消息队列
yum -y install erlang socat
yum install -y rabbitmq-server
#启动 rabbitmq ,端口5672
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmq-plugins enable rabbitmq_management  #启动web插件端口15672
#添加用户及密码
rabbitmqctl  add_user admin admin
rabbitmqctl  set_user_tags admin administrator
rabbitmqctl add_user openstack openstack 
rabbitmqctl set_permissions openstack ".*" ".*" ".*" 
rabbitmqctl  set_user_tags openstack administrator
systemctl restart rabbitmq-server.service
netstat -antp|grep '5672'
 
# rabbitmq-plugins list  #查看支持的插件
# lsof -i:15672
#访问RabbitMQ,访问地址是http://ip:15672
#默认用户名密码都是guest,浏览器添加openstack用户到组并登陆测试

保证两节点能通:

[root@node01 ~ ]#vi /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

nameserver 8.8.8.8

192.168.5.107 node01

192.168.5.106  node02

[root@node02 ~ ]#cat /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.5.106 node02

192.168.5.107 node01

[root@node01 ~ ]#mysql -u keystone -h 192.168.5.107 –pkeystone   验证是否正常

show databases;

| Database           |

+--------------------+

| information_schema |

| keystone           |

#SQL上创建数据库并授权
 
#Keystone安装
yum install -y openstack-keystone httpd mod_wsgi memcached python-memcached
yum install apr apr-util -y
#memcached启动和设置
cp /etc/sysconfig/memcached{,.bak}
systemctl enable memcached.service
systemctl start memcached.service
netstat -antp|grep 11211
 
#Keystone 配置
cp /etc/keystone/keystone.conf{,.bak}  #备份默认配置
[root@node01 ~ ]#openssl rand -hex 10   生成随机密码
0309f3af5f912cd164be
[root@node01 ~ ]#echo "kestone 0309f3af5f912cd164be" >> ~/openstack.log
[root@node01 ~ ]#cat  /etc/keystone/keystone.conf
[DEFAULT]
admin_token = 0309f3af5f912cd164be
verbose = true
[database]
connection = mysql+pymysql://keystone:keystone@node01/keystone
[token]
provider = fernet
driver = memcache
[memcache]
servers = node01:11211
 
 
#初始化身份认证服务的数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
#检查表是否创建成功
mysql -h node01 -ukeystone -pkeystone -e "use keystone;show tables;"
#初始化密钥存储库
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
#设置admin用户(管理用户)和密码
keystone-manage bootstrap --bootstrap-password admin \
  --bootstrap-admin-url http://192.168.5.107:35357/v3/ \
  --bootstrap-internal-url http://192.168.5.107:5000/v3/ \
  --bootstrap-public-url http://192.168.5.107:5000/v3/ \
  --bootstrap-region-id RegionOne
 
 
openstack endpoint create --region RegionOne \
  identity admin http://192.168.5.107:35357/v3
 
#apache配置
cp /etc/httpd/conf/httpd.conf{,.bak}
修改为 "ServerName nodeo1" >>/etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
 
#Apache HTTP 启动并设置开机自启动
systemctl enable httpd.service
systemctl restart httpd.service
netstat -antp|egrep ':5000|:35357|:80'
# systemctl disable
 
#创建 OpenStack 客户端环境脚本
#admin环境脚本
echo "
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default 
export OS_PROJECT_NAME=admin 
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://192.168.5.107:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
">./admin-openstack.sh
#测试脚本是否生效
source ./admin-openstack.sh
openstack token issue
 
 
#创建service项目,创建glance,nova,neutron用户,并授权
openstack project create --domain default --description "Service Project" service
openstack user create --domain default --password=glance glance
openstack role add --project service --user glance admin
openstack user create --domain default --password=nova nova
openstack role add --project service --user nova admin
openstack user create --domain default --password=neutron neutron
openstack role add --project service --user neutron admin
 
 
 
#创建demo项目(普通用户密码及角色)
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password=demo demo
openstack role create user
openstack role add --project demo --user demo user
#demo环境脚本
echo "
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
">./demo-openstack.sh
#测试脚本是否生效
source ./demo-openstack.sh
openstack token issue
#创建Glance数据库、用户、认证,前面已设置
 
# keystone上服务注册 ,创建glance服务实体,API端点(公有、私有、admin)
source ./admin-openstack.sh || { echo "加载前面设置的admin-openstack.sh环境变量脚本";exit; }
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://192.168.5.107:9292
openstack endpoint create --region RegionOne image internal http://192.168.5.107:9292
openstack endpoint create --region RegionOne image admin http://192.168.5.107:9292
 
# Glance 安装
yum install -y openstack-glance python-glance
#配置
cp /etc/glance/glance-api.conf{,.bak}
cp /etc/glance/glance-registry.conf{,.bak}
# images默认/var/lib/glance/images/
[root@node01 ~ ]#mkdir -p /XLH_DATE/images
chown glance:nobody /XLH_DATE/images  
 
[root@node01 ~ ]#vim /etc/glance/glance-api.conf
 [database]
connection = mysql+pymysql://glance:glance@192.168.5.107/glance
[keystone_authtoken]
auth_uri = http://192.168.5.107:5000/v3
auth_url = http://192.168.5.107:35357/v3
memcached_servers = 192.168.5.107:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /XLH_DATE/images
#">/etc/glance/glance-api.conf
#
echo "#
[database]
connection = mysql+pymysql://glance:glance@192.168.5.107/glance
[keystone_authtoken]
auth_uri = http://192.168.5.107:5000/v3
auth_url = http://192.168.5.107:35357/v3
memcached_servers = 192.168.5.107:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
#">/etc/glance/glance-registry.conf
 
#同步数据库,检查数据库
su -s /bin/sh -c "glance-manage db_sync" glance
mysql -h 192.168.5.107 -u glance -pglance -e "use glance;show tables;"
 
#启动服务并设置开机自启动
systemctl enable openstack-glance-api openstack-glance-registry
systemctl start openstack-glance-api openstack-glance-registry
#systemctl restart openstack-glance-api  openstack-glance-registry
netstat -antp|egrep '9292|9191' #检测服务端口
 
#镜像测试,下载有时很慢
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img #下载测试镜像源
#使用qcow2磁盘格式,bare容器格式,上传镜像到镜像服务并设置公共可见
source ./admin-openstack.sh
openstack image create "cirros" \
  --file cirros-0.3.5-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --public
 
#检查是否上传成功
openstack image list
#glance image-list
ls $Imgdir
 
#删除镜像 glance image-delete 镜像id
##5.1 Nova控制节点
# controller 安装
#
 
#创建Nova数据库、用户、认证,前面已设置
source ./admin-openstack.sh || { echo "加载前面设置的admin-openstack.sh环境变量脚本";exit; }
 
# keystone上服务注册 ,创建nova用户、服务、API
# nova用户前面已建
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://192.168.5.107:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://192.168.5.107:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://192.168.5.107:8774/v2.1
#创建placement用户、服务、API
openstack user create --domain default --password=placement placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://192.168.5.107:8778
openstack endpoint create --region RegionOne placement internal http://192.168.5.107:8778
openstack endpoint create --region RegionOne placement admin http://192.168.5.107:8778
#openstack endpoint delete id?
 
## 安装nova控制节点
yum install -y openstack-nova-api openstack-nova-conductor \
  openstack-nova-console openstack-nova-novncproxy \
  openstack-nova-scheduler openstack-nova-placement-api
yum install -y openstack-utils
 
 
# #nova控制节点配置

2、配置部分---数据库连接配置

/etc/nova/nova.conf
在[api_database]和[database]部分,配置数据库的连接:

更改结果如下

[root@linux-node1 ~]#
grep -n '^[a-Z]' /etc/nova/nova.conf
2161:connection = mysql+pymysql://nova:nova@192.168.5.107/nova_api
3106:connection = mysql+pymysql://nova:nova@192.168.5.107/nova
[root@linux-node1 ~]#

同步数据到mysql库,出现警告也没关系

[root@linux-node1 ~]#  su -s /bin/sh
-c "nova-manage api_db sync" nova
[root@linux-node1 ~]# su -s /bin/sh -c "nova-manage db sync" nova

检查数据库表的创建情况

[root@linux-node1 ~]# mysql -h192.168.5.107 -unova -pnova -e "use nova;show tables;"
root@linux-node1 ~]# mysql -h192.168.5.107 -unova -pnova -e "use nova_api;show tables;"

3、 配置部分---keystone配置

在[DEFAULT] 和 [keystone_authtoken] 部分,配置认证服务访问:

auth_strategy = keystone

[keystone_authtoken]

...

auth_uri = http:// 192.168.5.107:5000

auth_url = http:// 192.168.5.107:35357

memcached_servers = 192.168.5.107:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = nova

password = nova

4、配置部分---RabbitMQ配置

修改rabbitmq的配置,nova它们各个组件之间要用到


[DEFAULT] 和 [oslo_messaging_rabbit]部分,配置 RabbitMQ 消息队列访问,

[DEFAULT]

...

rpc_backend = rabbit

[oslo_messaging_rabbit]

...

rabbit_host=192.168.5.107

rabbit_userid = openstack

rabbit_password = openstack

5、配置部分---nova自身功能模块的配置

在[DEFAULT]部分,只启用计算和元数据API:

[DEFAULT]

...

enabled_apis = osapi_compute,metadata

文档里说设置my_ip ,这里我们不设置它,因为有后面配置调用my_ip这个变量。我们可以直接配置,虽然麻烦点,但是知道哪些地方使用了这个IP

所以下面my_ip就不配置了

在 [DEFAULT] 部分,使能 Networking 服务:
默认情况下,计算服务使用内置的防火墙服务。由于网络服务包含了防火墙服务
你必须使用nova.virt.firewall.NoopFirewallDriver防火墙服务来禁用掉计算服务内置的防火墙服务

[DEFAULT]

...

use_neutron = True

firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver

在[vnc]部分,配置VNC代理使用控制节点的管理接口IP地址 (这里原来是$my_ip):

[vnc]

...

vncserver_listen =192.168.5.107

vncserver_proxyclient_address =192.168.5.107

在 [glance] 区域,配置镜像服务 API 的位置:

[glance]

...

api_servers = http:// 192.168.5.107:9292

在 [oslo_concurrency] 部分,配置锁路径:

[oslo_concurrency]

...

lock_path =/var/lib/nova/tmp

6、检查配置

配置完毕,过滤一下
[root@linux-node1 ~]# grep -n '^[a-Z]' /etc/nova/nova.conf
267:enabled_apis=osapi_compute,metadata
382:auth_strategy = keystone
1561:firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
1684:use_neutron=True
2119:rpc_backend=rabbit
2161:connection = mysql+pymysql://nova:nova@192.168.5.107/nova_api
3106:connection = mysql+pymysql://nova:nova@192.168.5.107/nova
3323:api_servers = http://192.168.5.107:9292
3523:auth_uri = http://192.168.5.107:5000
3524:auth_url = http://192.168.5.107:35357
3525:memcached_servers = 192.168.5.107:11211
3526:auth_type = password
3527:project_domain_name = default
3528:user_domain_name = default
3529:project_name = service
3530:username = nova
3531:password = nova
4292:lock_path = /var/lib/nova/tmp
4403:rabbit_host=192.168.5.107
4404:rabbit_userid = openstack
4405:rabbit_password = openstack
4465:rabbit_port=5672
5359:vncserver_listen = 192.168.5.107
5360:vncserver_proxyclient_address = 192.168.5.107

 
# 把下面这段追加
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
">>/etc/httpd/conf.d/00-nova-placement-api.conf
systemctl restart httpd
sleep 2
 
#同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
 
#检测数据
nova-manage cell_v2 list_cells
mysql -h controller -u nova -pnova -e "use nova_api;show tables;"
mysql -h controller -u nova -pnova -e "use nova;show tables;" 
mysql -h controller -u nova -pnova -e "use nova_cell0;show tables;"
 
#开机自启动
 systemctl enable openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service
#启动服务
systemctl start openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service
 
#查看节点
#nova service-list 
openstack catalog list
nova-status upgrade check
openstack compute service list
 
#nova-manage cell_v2 delete_cell --cell_uuid  b736f4f4-2a67-4e60-952a-14b5a68b0f79
 

计算节点安装和配置nova

nova compute通过libvirt管理kvm,计算节点是真正运行虚拟机的

vmware支持嵌套虚拟机,其它虚拟机软件不支持

计算节点机器必须打开vt-x

1、安装软件包

[root@linux-node2 ~]# yum install openstack-nova-compute -y
关于novncproxy

novncproxy的端口是6080 ,登录控制节点查看下

[root@linux-node1 ~]# netstat -lntp|grep 6080
tcp        0      0
0.0.0.0:6080           
0.0.0.0:*              
LISTEN      931/python2

2、计算节点配置文件修改

计算节点的配置文件更改的地方和控制节点几乎一致,因此可以把控制节点的配置文件拷贝过来使用,还需要修改个别地方
1、计算节点没有配置连接数据库。(其实拷贝过来不删数据库的配置也能运行正常,但是不规范)
2、计算节点vnc多配置一行

拷贝之前先查看控制节点的权限

[root@linux-node1 ~]# ls /etc/nova/ -l
total 224
-rw-r----- 1 root nova   3673 Mar 22 18:14 api-paste.ini
-rw-r----- 1 root nova 184584 Jul 30 20:13 nova.conf
-rw-r----- 1 root nova  27914 Mar 22 18:14 policy.json
-rw-r--r-- 1 root root     72 May 24 06:43 release
-rw-r----- 1 root nova    966 Mar 22 18:13 rootwrap.conf

拷贝文件过去

[root@linux-node1 ~]# scp -r /etc/nova/nova.conf
192.168.1.3:/etc/nova/

查看推送过来的文件权限是否正确
[root@linux-node2 ~]# ll /etc/nova/
total 224
-rw-r----- 1 root nova   3673 Jul 31 08:36 api-paste.ini
-rw-r----- 1 root nova 184584 Jul 31 08:36 nova.conf
-rw-r----- 1 root nova  27914 Jul 31 08:36 policy.json
-rw-r--r-- 1 root root     72 Jul 31 08:36 release
-rw-r----- 1 root nova    966 Jul 31 08:36 rootwrap.conf

更改配置,把mysql的部分删除了。然后注释掉这行

涉及到[api_database]和[database]模块两个地方
[api_database]
#connection = mysql+pymysql://nova:nova@192.168.1.2/nova_api

[database]
#connection = mysql+pymysql://nova:nova@192.168.1.2/nova

在[vnc]部分,启用并配置远程控制台访问:

[vnc]
enabled=true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 192.168.1.3
novncproxy_base_url = http://192.168.1.2:6080/vnc_auto.html

修改[libvirt]模块

修改之前,确定您的计算节点是否支持虚拟机的硬件加速。
[root@linux-node2 ~]# egrep -c '(vmx|svm)' /proc/cpuinfo
1

如果这个命令返回了 one or greater 的值,那么你的计算节点支持硬件加速且不需要额外的配置。
如果这个命令返回了 zero 值,那么你的计算节点不支持硬件加速。你必须配置 libvirt 来使用 QEMU 去代替 KVM在
/etc/nova/nova.conf 文件的 [libvirt] 区域做出如下的编辑:
[libvirt]
virt_type=kvm

查看下所有修改的地方
[root@linux-node2 ~]# grep -n '^[a-Z]'  /etc/nova/nova.conf 
267:enabled_apis=osapi_compute,metadata
382:auth_strategy=keystone
1561:firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
1684:use_neutron=true
2119:rpc_backend=rabbit
3323:api_servers=http://192.168.1.2:9292
3523:auth_uri=http://192.168.1.2:5000
3524:auth_url=http://192.168.1.2:35357
3525:memcached_servers=192.168.1.2:11211
3526:auth_type = password
3527:project_domain_name = default
3528:user_domain_name = default
3529:project_name = service
3530:username = nova
3531:password = nova
3682:virt_type=kvm
4292:lock_path=/var/lib/nova/tmp
4403:rabbit_host=192.168.1.2
4404:rabbit_userid = openstack
4405:rabbit_password = openstack
4465:rabbit_port=5672
5359:enabled=true
5360:vncserver_listen=0.0.0.0
5361:vncserver_proxyclient_address=192.168.1.3
5362:novncproxy_base_url = http://192.168.1.2:6080/vnc_auto.html

3、启动服务和检查状态

[root@linux-node2 ~]# systemctl enable libvirtd.service
openstack-nova-compute.service
Created symlink from
/etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to
/usr/lib/systemd/system/openstack-nova-compute.service.
[root@linux-node2 ~]# systemctl start libvirtd.service
openstack-nova-compute.service

控制节点查看,有了计算节点。说明计算节点的服务正常启动了,而且配置没问题

[root@linux-node1 ~]# openstack host list

控制节点列出nova的服务,后面的update时间都几乎一致,如果差距过大,可能造成无法创建虚拟机

[root@linux-node1 ~]# nova service-list

下面命令测试nova连接glance是否正常

[root@linux-node1 ~]# nova image-list

 
# #发现计算节点,新增计算节点时执行
#su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
 
##6.1 Neutron控制节点-- openstack pike

网络介绍

配置网络选项,分公共网络和私有网络
部署网络服务使用公共网络和私有网络两种架构中的一种来部署网络服务。
公共网络:采用尽可能简单的架构进行部署,只支持实例连接到公有网络(外部网络)。没有私有网络(个人网络),路由器以及浮动IP地址。
只有admin或者其他特权用户才可以管理公有网络。
私有网络:在公共网络的基础上多了layer3服务,支持实例连接到私有网络

本次实验使用公共网络

控制节点安装配置Neutron

1控制节点安装组件

[root@linux-node1 ~]# yum install openstack-neutron
openstack-neutron-ml2   openstack-neutron-linuxbridge ebtables

2、控制节点配置部分---数据库

编辑/etc/neutron/neutron.conf
文件并完成如下操作:
在 [database] 部分,配置数据库访问:

[database]
...
connection = mysql+pymysql://neutron:neutron@192.168.5.107/neutron

neutron改完数据库连接配置之后,并不需要立即同步数据库,还需要继续配置

3、控制节点配置部分---keystone

在[DEFAULT]和[keystone_authtoken]部分,配置认证服务访问:

[DEFAULT]
...
auth_strategy = keystone

[keystone_authtoken]模块配置
加入下面参数

auth_uri =
http:// 192.168.5.107:5000
auth_url = http://192.168.1.2:35357
memcached_servers = 192.168.5.107
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

4、控制节点配置部分---RabbitMQ

在 [DEFAULT] 和 [oslo_messaging_rabbit]部分,配置 RabbitMQ 消息队列的连接:
[DEFAULT]
...
rpc_backend = rabbit

[oslo_messaging_rabbit]模块下面配置

[oslo_messaging_rabbit]

...

rabbit_host = 192.168.5.107

rabbit_userid = openstack

rabbit_password = openstack

5、控制节点配置部分---Neutron核心配置

在[DEFAULT]部分,启用ML2插件并禁用其他插件,等号后面不写,就表示禁用其它插件的意思

[DEFAULT]
...
core_plugin = ml2
service_plugins =

6控制节点配置部分---结合nova的配置

在[DEFAULT]和[nova]部分,配置网络服务来通知计算节点的网络拓扑变化
打开这两行的注释
意思是端口状态发生改变,通知nova

[DEFAULT]
...
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True

[nova]模块下面配置(Neutron配置文件有nova模块)

auth_url = http:// 192.168.5.107:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova

7、控制节点配置部分---结合锁路径配置


[oslo_concurrency] 部分,配置锁路径:

[oslo_concurrency]
...
lock_path = /var/lib/neutron/tmp

8、控制节点检查主配置文件

控制节点neutron主配置文件的配置完毕

[root@linux-node1 ~]# grep -n '^[a-Z]' /etc/neutron/neutron.conf
2:auth_strategy = keystone
3:core_plugin = ml2
4:service_plugins =
5:notify_nova_on_port_status_changes = true
6:notify_nova_on_port_data_changes = true
515:rpc_backend = rabbit
658:connection = mysql+pymysql://neutron:neutron@192.168.5.107/neutron
767:auth_uri = http:// 192.168.5.107:5000
768:auth_url = http:// 192.168.5.107:35357
769:memcached_servers = 192.168.1.2:11211
770:auth_type = password
771:project_domain_name = default
772:user_domain_name = default
773:project_name = service
774:username = neutron
775:password = neutron
944:auth_url = http:// 192.168.5.107:35357
945:auth_type = password
946:project_domain_name = default
947:user_domain_name = default
948:region_name = RegionOne
949:project_name = service
950:username = nova
951:password = nova
1050:lock_path = /var/lib/neutron/tmp
1069:rabbit_host = 192.168.5.107

1070:rabbit_userid = openstack
1071:rabbit_password = openstack
1224:rabbit_port = 5672

9、控制节点配置 Modular
Layer 2 (ML2)
插件

ML2是2层网络的配置,ML2插件使用Linuxbridge机制来为实例创建layer-2虚拟网络基础设施
编辑/etc/neutron/plugins/ml2/ml2_conf.ini文件并完成以下操作:
在[ml2]部分,启用flat和VLAN网络:
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,gre,vxlan,geneve
tenant_network_types =

在[ml2]部分,启用Linuxbridge机制:
这个的作用是你告诉neutron使用哪几个插件创建网络,此时是linuxbridge

[ml2]
...
mechanism_drivers
= linuxbridge

它是个列表,你可以写多个,比如再添加个openvswitch

mechanism_drivers = linuxbridge,openvswitch

在[ml2]部分,启用端口安全扩展驱动:

[ml2]
...
extension_drivers = port_security

在[ml2_type_flat]部分,配置公共虚拟网络为flat网络,官方文档写的改为provider,我们改为flat_networks = public

[ml2_type_flat]

...

flat_networks = public

在[securitygroup]部分,启用 ipset 增加安全组规则的高效性:

[securitygroup]

...

enable_ipset = True

10、控制节点检查ML2配置文件

至此控制节点,ML2的配置更改完毕,如下

[root@linux-node1 ~]# grep -n '^[a-Z]'
/etc/neutron/plugins/ml2/ml2_conf.ini
107:type_drivers = flat,vlan,gre,vxlan,geneve
112:tenant_network_types = 
116:mechanism_drivers = linuxbridge,openvswitch
121:extension_drivers = port_security
153:flat_networks = public
215:enable_ipset = true

11、控制节点配置Linuxbridge代理

Linuxbridge代理为实例建立layer-2虚拟网络并且处理安全组规则。
编辑/etc/neutron/plugins/ml2/linuxbridge_agent.ini文件并且完成以下操作:
在[linux_bridge]部分,将公共虚拟网络和公共物理网络接口对应起来:
将PUBLIC_INTERFACE_NAME替换为底层的物理公共网络接口

[linux_bridge]

physical_interface_mappings = public:ens33

在[vxlan]部分,禁止VXLAN覆盖网络:

[vxlan]

enable_vxlan = False


[securitygroup]部分,启用安全组并配置 Linuxbridge
iptables firewall driver:

[securitygroup]

...

enable_security_group = True

firewall_driver =
neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

查看更改了哪些配置
[root@linux-node1 ~]# grep -n '^[a-Z]'
/etc/neutron/plugins/ml2/linuxbridge_agent.ini
128:physical_interface_mappings = public:ens33
156:enable_security_group = true
157:firewall_driver =
neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
165:enable_vxlan = false

12、控制节点配置DHCP代理

编辑/etc/neutron/dhcp_agent.ini文件并完成下面的操作:
在[DEFAULT]部分,配置Linuxbridge驱动接口,DHCP驱动并启用隔离元数据,这样在公共网络上的实例就可以通过网络来访问元数据

[DEFAULT]

...

interface_driver =
neutron.agent.linux.interface.BridgeInterfaceDriver

dhcp_driver =
neutron.agent.linux.dhcp.Dnsmasq

enable_isolated_metadata =
True

查看更改了哪些配置

第一行是底层接口的配置

第二行dnsmasq是一个小的dhcp开源项目

第三行是刷新路由用的

[root@linux-node1 ~]# grep -n '^[a-Z]'
/etc/neutron/dhcp_agent.ini
2:interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
3:dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
4:enable_isolated_metadata = true

13、控制节点配置元数据代理 

编辑/etc/neutron/metadata_agent.ini文件并完成以下操作:
在[DEFAULT] 部分,配置元数据主机以及共享密码:

[DEFAULT]

...

nova_metadata_ip = controller

metadata_proxy_shared_secret
= METADATA_SECRET

用你为元数据代理设置的密码替换 METADATA_SECRET。下面的zyx是自定义的共享密钥
这个共享密钥,在nova里还要配置一遍,你要保持一致的
[root@linux-node1 ~]# grep -n '^[a-Z]'  /etc/neutron/metadata_agent.ini
2:nova_metadata_ip = 192.168.5.107

3:metadata_proxy_shared_secret = shi

14、在控制节点的nova上面配置neutron

下面配置的是neutron的keystone的认证地址。9696是neutron-server的端口
编辑/etc/nova/nova.conf文件并完成以下操作:
在[neutron]部分,配置访问参数,启用元数据代理并设置密码:

url = http:// 192.168.5.107:9696

auth_url =
http://192.168.1.2:35357

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = neutron

然后打开下面并配置如下

service_metadata_proxy = True

metadata_proxy_shared_secret
= shi

15、控制节点配置超链接

网络服务初始化脚本需要一个超链接
/etc/neutron/plugin.ini指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf.ini 如果超链接不存在,使用下面的命令创建它:

[root@linux-node1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini
/etc/neutron/plugin.ini

16、控制节点同步数据库

[root@linux-node1 ~]# su -s /bin/sh -c "neutron-db-manage
--config-file /etc/neutron/neutron.conf --config-file
/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

OK

17、控制节点重启nova服务以及启动neutron服务

重启计算nova-api 服务,在控制节点上操作:

[root@linux-node1 ~]# systemctl restart
openstack-nova-api.service

启动以下neutron相关服务,并设置开机启动

[root@linux-node1 ~]# systemctl enable neutron-server.service
neutron-linuxbridge-agent.service neutron-dhcp-agent.service
neutron-metadata-agent.service

官方文档提到下面,我们用不到,不用操作,这里以删除线标识

对于网络选项2,同样启用layer-3服务并设置其随系统自启动

# systemctl enable
neutron-l3-agent.service

# systemctl start
neutron-l3-agent.service

查看监听,多了9696端口

[root@linux-node1 ~]# netstat -nltp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local
Address           Foreign
Address        
State       PID/Program name    
tcp        0      0
0.0.0.0:3306           
0.0.0.0:*              
LISTEN     
1874/mysqld         
tcp        0      0
0.0.0.0:11211          
0.0.0.0:*              
LISTEN     
910/memcached       
tcp        0      0
0.0.0.0:9292           
0.0.0.0:*              
LISTEN     
4019/python2        
tcp        0      0
0.0.0.0:80             
0.0.0.0:*              
LISTEN     
912/httpd           
tcp        0      0
0.0.0.0:4369           
0.0.0.0:*              
LISTEN     
1/systemd           
tcp        0      0
0.0.0.0:22             
0.0.0.0:*              
LISTEN     
997/sshd            
tcp        0      0
0.0.0.0:15672          
0.0.0.0:*              
LISTEN     
898/beam            
tcp        0      0
0.0.0.0:35357          
0.0.0.0:*              
LISTEN     
912/httpd           
tcp        0      0
0.0.0.0:9696           
0.0.0.0:*              
LISTEN      6659/python2      
 
tcp        0      0
0.0.0.0:6080           
0.0.0.0:*              
LISTEN     
4569/python2        
tcp        0      0
0.0.0.0:8774           
0.0.0.0:*              
LISTEN     
6592/python2        
tcp        0      0
0.0.0.0:8775           
0.0.0.0:*              
LISTEN     
6592/python2        
tcp        0      0
0.0.0.0:9191           
0.0.0.0:*              
LISTEN     
4020/python2        
tcp        0      0
0.0.0.0:25672          
0.0.0.0:*              
LISTEN      898/beam          
 
tcp        0      0
0.0.0.0:5000           
0.0.0.0:*              
LISTEN     
912/httpd           
tcp6       0      0
:::22                  
:::*                   
LISTEN     
997/sshd            
tcp6       0      0
:::5672                
:::*                   
LISTEN      898/beam 

18、控制节点创建服务实体和注册端点

在keystone上创建服务和注册端点

创建neutron服务实体:

[root@linux-node1 ~]# source admin-openstack.sh 
[root@linux-node1 ~]# openstack service create --name neutron 
--description "OpenStack Networking" network

创建网络服务API端点

创建public端点

[root@linux-node1 ~]# openstack endpoint create --region
RegionOne network public http://192.168.5.107:9696

创建internal端点

[root@linux-node1 ~]# openstack endpoint create --region
RegionOne  network internal http://192.168.5.107:9696

创建admin端点

[root@linux-node1
~]# openstack endpoint create --region RegionOne  network admin http://192.168.5.107:9696

检查,看到下面3行,说明没问题,右边alive是笑脸状态。表示正常

[root@linux-node1
~]# neutron agent-list

计算节点安装和配置neutron

早期版本nova-compute可以直接连接数据库,那么存在一个问题,任何一个计算节点被入侵了。那么数据库整个就危险了。后来就出现了个nova-condutor,它作为中间访问的

1、安装组件
[root@linux-node2 ~]# yum install
openstack-neutron-linuxbridge ebtables ipset -y

计算节点要改2个文件配置通用组件和配置网络选项

配置通用组件
Networking 通用组件的配置包括认证机制、消息队列和插件。
/etc/neutron/neutron.conf

配置网络选项
配置Linuxbridge代理
/etc/neutron/plugins/ml2/linuxbridge_agent.ini

文档连接可以参照
https://docs.openstack.org/mitaka/zh_CN/install-guide-rdo/neutron-compute-install-option1.html

因为计算节点和控制节点neutron配置相似,可以在控制节点配置文件基础上完善下

[root@linux-node1
~]# scp -p /etc/neutron/neutron.conf 192.168.1.3:/etc/neutron/

2、计算节点更改配置

删除mysql的配置,并注释这行

[database]
#connection =

把下面nova的下面配置删除
[nova]
auth_url = http://192.168.1.2:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova

注释下面4行
#notify_nova_on_port_status_changes = true
#notify_nova_on_port_data_changes = true
#core_plugin = ml2
#service_plugins =

3、查看更改后的配置

[root@linux-node2 neutron]# grep -n '^[a-Z]' /etc/neutron/neutron.conf
2:auth_strategy = keystone
515:rpc_backend = rabbit
767:auth_uri = http://192.168.1.2:5000
768:auth_url = http://192.168.1.2:35357
769:memcached_servers = 192.168.1.2:11211
770:auth_type = password
771:project_domain_name = default
772:user_domain_name = default
773:project_name = service
774:username = neutron
775:password = neutron
1042:lock_path = /var/lib/neutron/tmp
1061:rabbit_host = 192.168.1.2
1062:rabbit_userid = openstack
1063:rabbit_password = openstack
1216:rabbit_port = 5672

4、计算节点更改nova主配置文件

编辑/etc/nova/nova.conf文件并完成下面的操作:
在[neutron] 部分,配置访问参数:

[neutron]

...

url = http://192.168.1.2:9696

auth_url =
http://192.168.1.2:35357

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = neutron

5、计算节点配置Linuxbridge代理

(1)Linuxbridge代理为实例建立layer-2虚拟网络并且处理安全组规则。
编辑/etc/neutron/plugins/ml2/linuxbridge_agent.ini文件并且完成以下操作:
在[linux_bridge]部分,将公共虚拟网络和公共物理网络接口对应起来:

[linux_bridge]

physical_interface_mappings =
provider:PROVIDER_INTERFACE_NAME

将PUBLIC_INTERFACE_NAME
替换为底层的物理公共网络接口

(2)在[vxlan]部分,禁止VXLAN覆盖网络:

[vxlan]

enable_vxlan = False

(3)在 [securitygroup]部分,启用安全组并配置 Linuxbridge
iptables firewall driver:

[securitygroup]

...

enable_security_group = True

firewall_driver =
neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

由于上面3处的配置和控制节点的一模一样,直接拷贝控制节点文件到此替换即可
[root@linux-node1 ~]# scp -r /etc/neutron/plugins/ml2/linuxbridge_agent.ini
192.168.1.3/etc/neutron/plugins/ml2/

6、在计算节点检查linuxbridge_agent配置文件

[root@linux-node2 neutron]# grep -n '^[a-Z]'
/etc/neutron/plugins/ml2/linuxbridge_agent.ini
128:physical_interface_mappings = public:ens33
156:enable_security_group = true
157:firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
165:enable_vxlan = false

7、重启nova服务并启动neutron服务

因为改动了nova主配置文件,需要重启nova服务

同时启动neutron服务,并设置开机启动

[root@linux-node2 neutron]# systemctl restart
openstack-nova-compute.service
[root@linux-node2 neutron]# systemctl enable neutron-linuxbridge-agent.service
Created symlink from
/etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service
to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
[root@linux-node2 neutron]# systemctl start neutron-linuxbridge-agent.service

8、控制节点检查

看到多了个计算节点Linux bridge agent
[root@linux-node1 ~]# source admin-openstack.sh
[root@linux-node1 ~]# neutron agent-list
+--------------------------------+--------------------+---------------------+-------------------+-------+----------------+---------------------------+
|
id                            
| agent_type         | host               
| availability_zone | alive | admin_state_up |
binary                   
|
+--------------------------------+--------------------+---------------------+-------------------+-------+----------------+---------------------------+
| 0ebb213b-4933-4a34-be61-2aeeb4 | DHCP
agent         | linux-node1.shi.com |
nova             
| :-)   | True          
| neutron-dhcp-agent        |
|
6574a6                        
|                   
|                    
|                  
|      
|               
|                          
|
| 4677fa97-6569-4ab1-a3db-       | Linux bridge
agent | linux-node1.shi.com
|                  
| :-)   | True          
| neutron-linuxbridge-agent |
|
71d5736b40fb                  
|                   
|                    
|                  
|      
|               
|                          
|
|
509da84b-                     
| Linux bridge agent | linux-node2.shi.com |                  
| :-)   |
True           |
neutron-linuxbridge-agent |
| 8bd3-4be0-9688-94d45225c3c0   
|                   
|                    
|                  
|      
|               
|                          
|
|
5ec0f2c1-3dd3-40ba-           
| Metadata agent     | linux-node1.shi.com
|                  
| :-)   | True          
| neutron-metadata-agent    |
|
a42e-e53313864087             
|                   
|                    
|                  
|       |               
|                          
|
+--------------------------------+--------------------+---------------------+-------------------+-------+----------------+---------------------------+

下面映射,可以理解为给网卡起个别名,便于区分用途
同时你的物理网卡名字必须是eth0,对应上配置文件。或者说配置文件对用上实际的网卡名
[root@linux-node2 ~]# grep physical_interface_mappings
/etc/neutron/plugins/ml2/linuxbridge_agent.ini
physical_interface_mappings = public:ens33

 
 
 

7.openstack搭建--7--创建一台虚拟机

创建一个单一扁平网络和子网

1、创建单一扁平网络

在控制节点上,加载 admin 凭证来获取管理员能执行的命令访问权限:
source admin-openstack.sh ,提供者网络必须使用admin创建,如果source demo-openstack.sh ,不会创建成功

执行命令语法如下

下面命令把provider改成public,表示物理网卡是public这个,这个是映射的那个public,它对应eth0

neutron net-create --shared
--provider:physical_network provider--provider:network_type flat provider

执行过程如下:

[root@linux-node1 ~]# source admin-openstack.sh 
[root@linux-node1 ~]# neutron net-create --shared --provider:physical_network
public --provider:network_type flat public-net
Created a new network:    |

上面的tenant_id
和下面的project的id一致。因为我们是admin创建的,属于admin的

[root@linux-node1
~]# openstack project list
+----------------------------------+---------+
查看创建的网络

[root@linux-node1 ~]#  neutron
net-list

2、创建一个子网
执行过如下:

[root@linux-node1
~]# neutron subnet-create --name public-subnet --allocation-pool
start=192.168.1.100,end=192.168.1.200 --dns-nameserver 8.8.8.8 --gateway
192.168.1.1 public-net 192.168.1.0/24
Created a new subnet:

再次执行下面命令,可以看到subnets这里列有值了

[root@linux-node1
~]# neutron net-list

创建一个nano规格的实例

1、创建nano套餐类型

网络创建完毕后,创建一个nano规格的主机

默认的最小规格的主机需要512 MB内存。对于环境中计算节点内存不足4 GB的,我们推荐创建只需要64 MB的m1.nano规格的主机。
若单纯为了测试的目的,请使用m1.nano规格的主机来加载CirrOS镜像

硬盘是1GB,内存64MB,cpu是1个

[root@linux-node1 ~]# openstack flavor
create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano

查看主机类型列表

1-5是默认的,0是我创建的

[root@linux-node1
~]# openstack flavor list

大部分云镜像支持公共密钥认证而不是传统的密码认证。在启动实例前,你必须添加一个公共密钥到计算服务。

创建一个密钥,并把这个密钥加到openstack上

[root@linux-node1
~]# source demo-openstack.sh 
[root@linux-node1 ~]# ssh-keygen -q -N
""
Enter file in which to save the key
(/root/.ssh/id_rsa): 
[root@linux-node1 ~]# openstack keypair create
--public-key ~/.ssh/id_rsa.pub mykey
+
验证公钥的添加

[root@linux-node1 ~]# openstack keypair
list
+-------+-------------------------------------------------+
2、增加安全组规则

默认情况下,它有一个default安全组,这个安全组阻止了所有访问,这里添加icmp和ssh

[root@linux-node1
~]# openstack security group rule create --proto icmp default
 +-----------------------+--------------------------------------+
|
Field                
|
Value                               
|
[root@linux-node1 ~]# openstack security group
rule create --proto tcp --dst-port 22 default

3、列出可用镜像、网络、安全组等

创建之前先列出可用类型和列出可用镜像:

[root@linux-node1
~]# source demo-openstack.sh 
[root@linux-node1 ~]# openstack flavor list
[root@linux-node1 ~]# openstack image list
+--------------------------------------+--------+--------+
列出可用网络

[root@linux-node1 ~]#  openstack
network list

列出可用的安全组

[root@linux-node1 ~]# openstack security
group list
4、创建实例

创建实例的语法如下

openstack server create
--flavor m1.tiny --image cirros \

  --nic
net-id=PROVIDER_NET_ID --security-group default \

  --key-name mykey
provider-instance

如果你选择选项1并且你的环境只有一个网络,你可以省去–nic 选项因为OpenStack会自动选择这个唯一可用的网络。
net-id就是openstack network
list 显示的id, 不是subnet的id

执行过程如下:

[root@linux-node1 ~]# openstack server
create --flavor m1.nano --image cirros --nic net-id=dc3a90b3-b3ca-4c3d-8d7a-24587907659e
--security-group default --key-name mykey
provider-instance            
|

5、检查实例的状态和登录实例

[root@linux-node1
~]# openstack server list计算节点可以看到kvm起来了

[root@linux-node2 ~]#  virsh list
 Id   
Name                          
State
----------------------------------------------------
 1    
instance-00000001             
running

登录机器成功。因为密钥传进去了。不用密码

创建过程中可以查看计算节点的日志,因为是计算节点创建的虚拟

[root@linux-node2 ~]# tail -f /var/log/nova/nova-compute.log 
2017-08-06 14:13:19.589 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Final resource view:
name=linux-node2.shi.com phys_ram=1023MB used_ram=576MB phys_disk=46GB
used_disk=1GB total_vcpus=1 used_vcpus=1 pci_stats=[]
2017-08-06 14:13:19.841 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Compute_service record
updated for linux-node2.shi.com:linux-node2.shi.com
2017-08-06 14:14:14.270 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Auditing locally available
compute resources for node linux-node2.shi.com
2017-08-06 14:14:17.336 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Total usable vcpus: 1,
total allocated vcpus: 1
2017-08-06 14:14:17.338 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Final resource view:
name=linux-node2.shi.com phys_ram=1023MB used_ram=576MB phys_disk=46GB
used_disk=1GB total_vcpus=1 used_vcpus=1 pci_stats=[]
2017-08-06 14:14:17.539 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Compute_service record
updated for linux-node2.shi.com:linux-node2.shi.com
2017-08-06 14:15:18.450 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Auditing locally available
compute resources for node linux-node2.shi.com
2017-08-06 14:15:19.397 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Total usable vcpus: 1,
total allocated vcpus: 1
2017-08-06 14:15:19.397 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Final resource view:
name=linux-node2.shi.com phys_ram=1023MB used_ram=576MB phys_disk=46GB
used_disk=1GB total_vcpus=1 used_vcpus=1 pci_stats=[]
2017-08-06 14:15:19.445 3221 INFO nova.compute.resource_tracker
[req-740c7604-1350-4295-a10f-d75652b4642d - - - - -] Compute_service record
updated for linux-node2.shi.com:linux-node2.shi.com

虚拟机创建失败,需要看所有服务的所有日志,根据时间查看可疑的原因

[root@linux-node1 ~]# grep 'ERROR' /var/log/glance/*
[root@linux-node1 ~]#grep 'ERROR' /var/log/keystone/*
[root@linux-node1 ~]# grep 'ERROR' /var/log/nova/*
[root@linux-node1 ~]#grep 'ERROR' /var/log/neutron/*

要使用demo用户才能查看创建的主机,因为本来就是demo用户创建的

[root@linux-node1
~]# openstack server list

[root@linux-node1 ~]# source demo-openstack.sh

[root@linux-node1 ~]# openstack server list
+--------------------------------------+-------------------+--------+--------------------------+
|
ID                                  
| Name             
| Status |
Networks                
|
+--------------------------------------+-------------------+--------+--------------------------+
| 3365c4b4-d487-4778-ad28-e2c675f085eb | provider-instance | ACTIVE |
public-net=192.168.1.101 |
+--------------------------------------+-------------------+--------+--------------------------+

获取它控制台的地址
[root@linux-node1 ~]# openstack console url show provider-instance
+-------+----------------------------------------------------------------------------------+
| Field |
Value                                                                           
|
+-------+----------------------------------------------------------------------------------+
| type  |
novnc                                                                           
|
| url   |
http://192.168.1.2:6080/vnc_auto.html?token=7f9daf00-54b3-4b9f-99eb-a3c30981de38
|
+-------+----------------------------------------------------------------------------------+

把上面这一串复制到浏览器。可以以网页方式打开一个vnc窗口,上面链接的token是有时间有效期的,会变的

上面的6080端口映射到了192.168.1.3的5900端口

浏览器页面可以登录

查看计算节点端口启动情况,有个5900端口,就是vnc的

[root@linux-node2 ~]#  netstat -lntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local
Address           Foreign
Address        
State       PID/Program name    
tcp        0      0
0.0.0.0:5900           
0.0.0.0:*              
LISTEN     
3456/qemu-kvm       
tcp        0      0
0.0.0.0:111            
0.0.0.0:*              
LISTEN     
1/systemd           
tcp        0      0
0.0.0.0:22             
0.0.0.0:*              
LISTEN     
1041/sshd           
tcp        0      0
127.0.0.1:25           
0.0.0.0:*              
LISTEN     
1728/master         
tcp6       0      0
:::111                 
:::*                   
LISTEN     
1/systemd           
tcp6       0      0
:::22                  
:::*                   
LISTEN     
1041/sshd           
tcp6       0      0
::1:25                 
:::*                   
LISTEN      1728/master

查看下连接情况

计算节点的5900端口和控制节点的6080端口

[root@linux-node2 ~]# lsof -i:5900
COMMAND   PID USER   FD   TYPE DEVICE SIZE/OFF
NODE NAME
qemu-kvm 3456 qemu   21u  IPv4 
54787      0t0  TCP *:rfb (LISTEN)
qemu-kvm 3456 qemu   24u  IPv4 
56301      0t0  TCP
linux-node2:rfb->linux-node1:53158 (ESTABLISHED)

[root@linux-node1 ~]# lsof -i:6080
COMMAND    PID USER   FD   TYPE DEVICE
SIZE/OFF NODE NAME
nova-novn 3509 nova    4u  IPv4 
26745      0t0  TCP *:6080 (LISTEN)
nova-novn 9627 nova    4u  IPv4 
26745      0t0  TCP *:6080 (LISTEN)
nova-novn 9627 nova    5u  IPv4 
74962      0t0  TCP
linux-node1:6080->promote.cache-dns.local:52711 (ESTABLISHED)
nova-novn 9629 nova    4u  IPv4 
26745      0t0  TCP *:6080 (LISTEN)
nova-novn 9629 nova    5u  IPv4 
74964      0t0  TCP linux-node1:6080->promote.cache-dns.local:52715
(ESTABLISHED)
nova-novn 9630 nova    4u  IPv4 
26745      0t0  TCP *:6080 (LISTEN)
nova-novn 9630 nova    5u  IPv4 
74965      0t0  TCP
linux-node1:6080->promote.cache-dns.local:52716 (ESTABLISHED)
nova-novn 9631 nova    4u  IPv4  26745     
0t0  TCP *:6080 (LISTEN)
nova-novn 9631 nova    5u  IPv4 
74966      0t0  TCP
linux-node1:6080->promote.cache-dns.local:52717 (ESTABLISHED)
nova-novn 9633 nova    4u  IPv4 
26745      0t0  TCP *:6080 (LISTEN)
nova-novn 9633 nova    5u  IPv4  74969     
0t0  TCP linux-node1:6080->promote.cache-dns.local:52721 (ESTABLISHED)

 
 
 
上一篇:postman使用教程5-Test脚本中自定义变量(参数关联 提取 token 和引用 token )


下一篇:怎么使用jstack精确找到异常代码