5.2 OpenStack

(草稿)
先起1再起2

source admin
查看状态,status
keystone是用apache启动

web界面
default
dmeo
demo

一、网络服务
私有网络

vim
打开iarouter
allow(重叠代地址)
(2处)

[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 \
>   openstack-neutron-linuxbridge ebtables
[root@controller ~]# vim /etc/neutron/neutron.conf 

[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True

vim
加入vxlan
vxlan
l2
1:1000
(4处)

[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security

[ml2_type_vxlan]
vni_ranges = 1:1000

[securitygroup]
enable_ipset = True

vim
True
localip 31

[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[vxlan]
enable_vxlan = True
local_ip = 172.25.21.31
l2_population = True

vim
2行

[root@controller ~]# vim /etc/neutron/l3_agent.ini

[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
external_network_bridge =

返回网络服务

restraer 2个服务

[root@controller ~]# systemctl restart neutron-server.service neutron-linuxbridge-agent.service

enable l3

[root@controller ~]# systemctl enable --now neutron-l3-agent.service

neurton list

[root@controller ~]# neutron agent-list
+--------------+--------------+------------+-------------------+-------+----------------+-----------------+
| id           | agent_type   | host       | availability_zone | alive | admin_state_up | binary          |
+--------------+--------------+------------+-------------------+-------+----------------+-----------------+
| 472d998b-    | L3 agent     | controller | nova              | :-)   | True           | neutron-l3-agen |
| 0d1c-416c-91 |              |            |                   |       |                | t               |
| 58-77c3c289b |              |            |                   |       |                |                 |
| 343          |              |            |                   |       |                |                 |
| 6986dc5b-f88 | DHCP agent   | controller | nova              | :-)   | True           | neutron-dhcp-   |
| 7-4ed0-9c4b- |              |            |                   |       |                | agent           |
| fb02316f61bb |              |            |                   |       |                |                 |
| 99832d4c-    | Linux bridge | controller |                   | :-)   | True           | neutron-        |
| 1e78-4d05    | agent        |            |                   |       |                | linuxbridge-    |
| -a6ba-       |              |            |                   |       |                | agent           |
| 5526dbef1548 |              |            |                   |       |                |                 |
| ac99e504-b44 | Metadata     | controller |                   | :-)   | True           | neutron-        |
| 9-4c7d-b681- | agent        |            |                   |       |                | metadata-agent  |
| 7f570662bf4b |              |            |                   |       |                |                 |
| edef715c-502 | Linux bridge | compute1   |                   | :-)   | True           | neutron-        |
| 4-4ecf-9887- | agent        |            |                   |       |                | linuxbridge-    |
| 37e5d08ed806 |              |            |                   |       |                | agent           |
+--------------+--------------+------------+-------------------+-------+----------------+-----------------+

2、安装配置计算节点

2:
vim
vxlan True
localip 32
l2


[root@compute1 ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[vxlan]
enable_vxlan = True
local_ip = 172.25.21.32
l2_population = True

restar bridge桥接

[root@compute1 ~]# systemctl restart neutron-linuxbridge-agent.service

3、dasbron

1:
vim
所有Flase改成True
(3层服务打开)
rsatr httpd memcached

[root@controller ~]# vim /etc/openstack-dashboard/local_settings 

OPENSTACK_NEUTRON_NETWORK = {
    'enable_router': True,
    'enable_quotas': True,
    'enable_ipv6': True,
    'enable_distributed_router': True,
    'enable_ha_router': True,
    'enable_lb': True,
    'enable_firewall': True,
    'enable_vpn': True,
    'enable_fip_topology_check': True,
[root@controller ~]# systemctl restart httpd memcached.service 

4、 图形化
重新登陆demo

进入admini用户
选择外部网络

demo

创建虚拟机

(截图)

21:
2:安装
virsh list

[root@compute1 ~]# yum install -y libvirt-client

(SDN:软件定义网络)

2个网路互联
route

免密
1:
ssh 172。 。103

[root@compute1 ~]# virsh list
 Id    Name                           State
----------------------------------------------------
 1     instance-00000003              running
 2     instance-00000002              running

ssh连接外网,外网ping不到内网,但是可以ping到内网的浮动IP

[kiosk@foundation21 Desktop]$ ssh cirros@172.25.21.101
cirros@172.25.21.101's password: 
$ ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
    link/ether fa:16:3e:1b:ca:ca brd ff:ff:ff:ff:ff:ff
    inet 172.25.21.101/24 brd 172.25.21.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe1b:caca/64 scope link 
       valid_lft forever preferred_lft forever

$ ping -w 3 10.0.0.3
PING 10.0.0.3 (10.0.0.3): 56 data bytes

--- 10.0.0.3 ping statistics ---
3 packets transmitted, 0 packets received, 100% packet loss

$ ping -w 3 172.25.21.103
PING 172.25.21.103 (172.25.21.103): 56 data bytes
64 bytes from 172.25.21.103: seq=0 ttl=63 time=2.964 ms
64 bytes from 172.25.21.103: seq=1 ttl=63 time=1.156 ms
64 bytes from 172.25.21.103: seq=2 ttl=63 time=1.331 ms

--- 172.25.21.103 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 1.156/1.817/2.964 ms

ssh登陆内网,查看IP就是内网段的,内网可以ping到外网


[kiosk@foundation21 Desktop]$ ssh cirros@172.25.21.103
cirros@172.25.21.103's password: gocubsgo
$ ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc pfifo_fast qlen 1000
    link/ether fa:16:3e:e9:28:3a brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.3/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fee9:283a/64 scope link 
       valid_lft forever preferred_lft forever
$ ping -w  3 172.25.21.101
PING 172.25.21.101 (172.25.21.101): 56 data bytes
64 bytes from 172.25.21.101: seq=0 ttl=63 time=1.250 ms
64 bytes from 172.25.21.101: seq=1 ttl=63 time=1.348 ms
64 bytes from 172.25.21.101: seq=2 ttl=63 time=1.238 ms

--- 172.25.21.101 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 1.238/1.278/1.348 ms

二、封装镜像

1、
virt-manager
封装7。6

reboot samll
e
selinux=0

登陆
disabled
firewalld

分配IP
21:ssh 200

[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:72:36:55 brd ff:ff:ff:ff:ff:ff
    inet 172.25.21.200/24 scope global eth0
       valid_lft forever preferred_lft forever

m:
设置yum。repo
acpid
enablde acpid

[root@localhost ~]# cat /etc/yum.repos.d/westos.repo 
[rhel7.6]
name=rhel7.6
baseurl=http://172.25.21.250/westos
gpgcheck=0
[root@localhost ~]# yum repolist
Loaded plugins: product-id, search-disabled-repos, subscription-manager
This system is not registered with an entitlement server. You can use subscription-manager to register.
rhel7.6                                                                      | 4.3 kB  00:00:00     
(1/2): rhel7.6/group_gz                                                      | 146 kB  00:00:00     
(2/2): rhel7.6/primary_db                                                    | 4.2 MB  00:00:00     
repo id                                        repo name                                      status
rhel7.6                                        rhel7.6                                        5,152
repolist: 5,152

[root@localhost ~]# yum install -y acpid
[root@localhost ~]# systemctl enable acpid
[root@localhost ~]# systemctl status acpid
● acpid.service - ACPI Event Daemon
   Loaded: loaded (/usr/lib/systemd/system/acpid.service; enabled; vendor preset: enabled)
   Active: inactive (dead)

pub,cloud-initrhel7包放到http下
写yumrepo
安装

[root@localhost ~]# vi /etc/yum.repos.d/cloud.repo
[cloud]
name=cloud-init
baseurl=http://172.25.21.250/rhel7
gpgcheck=0

[root@localhost ~]# vi /etc/yum.repos.d/cloud.repo
[root@localhost ~]# yum repolist
Loaded plugins: product-id, search-disabled-repos, subscription-manager
This system is not registered with an entitlement server. You can use subscription-manager to register.
cloud                                                                        | 2.9 kB  00:00:00     
cloud/primary_db                                                             |  27 kB  00:00:00     
repo id                                      repo name                                        status
cloud                                        cloud-init                                          27
rhel7.6                                      rhel7.6                                          5,152
repolist: 5,179

[root@localhost ~]# yum install -y cloud-init cloud-utils-growpart

不用改配置文件

[root@localhost ~]# echo "NOZEROCONF=yes" >> /etc/sysconfig/network

内核
vim boot

[root@localhost ~]# vi /boot/grub2/grub.cfg 

        fi
        linux16 /boot/vmlinuz-3.10.0-957.el7.x86_64 root=UUID=dfd16c63-115d-40c1-af5e-8485ee196c86 ro rhgb quiet LANG=en_US.UTF-8 console=tty0 console=ttyS0,115200n8

网络
vim eth0(3)

[root@localhost ~]# cd /etc/sysconfig/network-scripts/
[root@localhost network-scripts]# vi ifcfg-eth0 
BOOTPROTO=dhcp
DEVICE=eth0
ONBOOT=yes
[root@localhost network-scripts]# poweroff 

poweroff

m:不要再启动了

21:
清理sysprep
压缩msmall

[root@foundation21 images]# virt-sysprep -d small
[root@foundation21 images]# virt-sparsify --compress small.qcow2 /content/small.qcow2
[   0.0] Create overlay file in /tmp to protect source disk
[   0.0] Examine source disk
[   1.8] Fill free space in /dev/sda1 with zero
 100% ⟦▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒⟧ 00:00
[  11.0] Copy to destination and make sparse
[  78.6] Sparsify operation completed with no errors.
virt-sparsify: Before deleting the old disk, carefully check that the 
target disk boots and works correctly.

[root@foundation21 images]# du -sh small.qcow2 
5.1G	small.qcow2
[root@foundation21 images]# du -sh /content/small.qcow2 
514M	/content/small.qcow2

2、上传openstack(截图)

admin
5G
512内存
复制
共有

云主机类型
m2
6
1
512
10G(先看/下有木有10G

[root@compute1 ~]# df -h /
Filesystem             Size  Used Avail Use% Mounted on
/dev/mapper/rhel-root   17G  1.6G   16G  10% /

demo:
卡的话,关之前的
m2

[root@compute1 ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:            991         253         356          12         381         522
Swap:          2047           0        2047

3、扩展功能

3:
自开一个虚拟机
作为存储使用
添加虚拟磁盘10G也行
开机
改名字block1

ssh

修改解析
同步世界爱你chrony
vim
172。250
enable chronyd

yum scp openstack

[root@block1 ~]# vim /etc/hosts
172.25.21.250 foundation21.ilt.example.com
172.25.21.31 controller
172.25.21.32 compute1
172.25.21.33 block1
[root@block1 ~]# yum install -y chrony
[root@block1 ~]# vim /etc/chrony.conf 
server 172.25.21.250  iburst
[root@block1 ~]# systemctl enable --now chronyd
[root@controller ~]# scp /etc/yum.repos.d/openstack.repo root@block1:/etc/yum.repos.d/

块存储服务

1:
mysql
craset
grant
grasnt

[root@controller ~]# mysql -pwestos
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 144
Server version: 10.1.20-MariaDB MariaDB Server

Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE cinder;
Query OK, 1 row affected (0.01 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
    ->   IDENTIFIED BY 'cinder';
Query OK, 0 rows affected (0.02 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
    ->   IDENTIFIED BY 'cinder';
Query OK, 0 rows affected (0.00 sec)

cinder cinder
role

[root@controller ~]# openstack user create --domain default --password cinder cinder
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 95c51e1bbf9e42fb88bec5c31012f386 |
| enabled   | True                             |
| id        | 5d78fb6ee8204c029841d7108386066b |
| name      | cinder                           |
+-----------+----------------------------------+

[root@controller ~]# openstack role add --project service --user cinder admin

、O
O

[root@controller ~]# openstack service create --name cinder \
>   --description "OpenStack Block Storage" volume
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Block Storage          |
| enabled     | True                             |
| id          | a101627381814bf1ac6c29827391383c |
| name        | cinder                           |
| type        | volume                           |
+-------------+----------------------------------+
[root@controller ~]# openstack service create --name cinderv2 \
>   --description "OpenStack Block Storage" volumev2
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Block Storage          |
| enabled     | True                             |
| id          | 63a995c40e1443909b81bcdd5d67c895 |
| name        | cinderv2                         |
| type        | volumev2                         |
+-------------+----------------------------------+

8776
8776
8776
(v1)

[root@controller ~]# openstack endpoint create --region RegionOne \
>   volume public http://controller:8776/v1/%\(tenant_id\)s
+--------------+-----------------------------------------+
| Field        | Value                                   |
+--------------+-----------------------------------------+
| enabled      | True                                    |
| id           | 75f1443610844ea2bea2364fd880c915        |
| interface    | public                                  |
| region       | RegionOne                               |
| region_id    | RegionOne                               |
| service_id   | a101627381814bf1ac6c29827391383c        |
| service_name | cinder                                  |
| service_type | volume                                  |
| url          | http://controller:8776/v1/%(tenant_id)s |
+--------------+-----------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
>   volume internal http://controller:8776/v1/%\(tenant_id\)s
+--------------+-----------------------------------------+
| Field        | Value                                   |
+--------------+-----------------------------------------+
| enabled      | True                                    |
| id           | 83dad81a9d964244a32bd3f621910624        |
| interface    | internal                                |
| region       | RegionOne                               |
| region_id    | RegionOne                               |
| service_id   | a101627381814bf1ac6c29827391383c        |
| service_name | cinder                                  |
| service_type | volume                                  |
| url          | http://controller:8776/v1/%(tenant_id)s |
+--------------+-----------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
>   volume admin http://controller:8776/v1/%\(tenant_id\)s
+--------------+-----------------------------------------+
| Field        | Value                                   |
+--------------+-----------------------------------------+
| enabled      | True                                    |
| id           | f72d8cc40a884de8aa32aa3dab0d0566        |
| interface    | admin                                   |
| region       | RegionOne                               |
| region_id    | RegionOne                               |
| service_id   | a101627381814bf1ac6c29827391383c        |
| service_name | cinder                                  |
| service_type | volume                                  |
| url          | http://controller:8776/v1/%(tenant_id)s |
+--------------+-----------------------------------------+

3个v2

[root@controller ~]# openstack endpoint create --region RegionOne \
>   volumev2 public http://controller:8776/v2/%\(tenant_id\)s
+--------------+-----------------------------------------+
| Field        | Value                                   |
+--------------+-----------------------------------------+
| enabled      | True                                    |
| id           | 4271c08086814c428d9760005734b3fd        |
| interface    | public                                  |
| region       | RegionOne                               |
| region_id    | RegionOne                               |
| service_id   | 63a995c40e1443909b81bcdd5d67c895        |
| service_name | cinderv2                                |
| service_type | volumev2                                |
| url          | http://controller:8776/v2/%(tenant_id)s |
+--------------+-----------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
>   volumev2 internal http://controller:8776/v2/%\(tenant_id\)s
+--------------+-----------------------------------------+
| Field        | Value                                   |
+--------------+-----------------------------------------+
| enabled      | True                                    |
| id           | dd3efd8c97a843b4a6c36fa19bcfa431        |
| interface    | internal                                |
| region       | RegionOne                               |
| region_id    | RegionOne                               |
| service_id   | 63a995c40e1443909b81bcdd5d67c895        |
| service_name | cinderv2                                |
| service_type | volumev2                                |
| url          | http://controller:8776/v2/%(tenant_id)s |
+--------------+-----------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
>   volumev2 admin http://controller:8776/v2/%\(tenant_id\)s
+--------------+-----------------------------------------+
| Field        | Value                                   |
+--------------+-----------------------------------------+
| enabled      | True                                    |
| id           | adb76010f1b1443f8ab20e95ec45e069        |
| interface    | admin                                   |
| region       | RegionOne                               |
| region_id    | RegionOne                               |
| service_id   | 63a995c40e1443909b81bcdd5d67c895        |
| service_name | cinderv2                                |
| service_type | volumev2                                |
| url          | http://controller:8776/v2/%(tenant_id)s |
+--------------+-----------------------------------------+

安装

[root@controller ~]# yum install openstack-cinder

vim inder
database
DEFAULT
oslo(opemstavk)
DEFAULT
keystone(cinder)
DFAULT 31
osloconcurr

[root@controller ~]# vim /etc/cinder/cinder.conf

[database]
connection = mysql+pymysql://cinder:cinder@controller/cinder

[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 172.25.21.31

[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = openstack

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

su -s -c
最好进入数据库查看确认

[root@controller ~]# su -s /bin/sh -c "cinder-manage db sync" cinder
Option "logdir" from group "DEFAULT" is deprecated. Use option "log-dir" from group "DEFAULT".
2021-05-02 15:37:51.461 20387 WARNING py.warnings [-] /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:241: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported
  exception.NotSupportedWarning

2021-05-02 15:37:51.671 20387 INFO migrate.versioning.api [-] 0 -> 1... 
2021-05-02 15:37:52.462 20387 INFO migrate.versioning.api [-] done
2021-05-02 15:37:52.462 20387 INFO migrate.versioning.api [-] 1 -> 2... 
2021-05-02 15:37:52.778 20387 INFO migrate.versioning.api [-] done
2021-05-02 15:37:52.778 20387 INFO migrate.versioning.api [-] 2 -> 3... 

......

[root@controller ~]# mysql -pwestos
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 151
Server version: 10.1.20-MariaDB MariaDB Server

Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| cinder             |
| glance             |
| information_schema |
| keystone           |
| mysql              |
| neutron            |
| nova               |
| nova_api           |
| performance_schema |
+--------------------+
9 rows in set (0.00 sec)

MariaDB [(none)]> show tables from cinder;
+----------------------------+
| Tables_in_cinder           |
+----------------------------+
| backups                    |
| cgsnapshots                |
| consistencygroups          |
| driver_initiator_data      |
| encryption                 |
| image_volume_cache_entries |
| iscsi_targets              |
| migrate_version            |
| quality_of_service_specs   |
| quota_classes              |
| quota_usages               |
| quotas                     |
| reservations               |
| services                   |
| snapshot_metadata          |
| snapshots                  |
| transfers                  |
| volume_admin_metadata      |
| volume_attachment          |
| volume_glance_metadata     |
| volume_metadata            |
| volume_type_extra_specs    |
| volume_type_projects       |
| volume_types               |
| volumes                    |
+----------------------------+
25 rows in set (0.00 sec)

vim ec nova
cinder
resart mova api
enable cinder

[root@controller ~]# vim /etc/nova/nova.conf 

[cinder]
os_region_name = RegionOne
[root@controller ~]# systemctl restart openstack-nova-api.service
[root@controller ~]# systemctl enable --now openstack-cinder-api.service openstack-cinder-scheduler.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-api.service to /usr/lib/systemd/system/openstack-cinder-api.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-scheduler.service to /usr/lib/systemd/system/openstack-cinder-scheduler.service.

3:
安装lvm2enbale

[root@block1 ~]# yum install  lvm2
Loaded plugins: product-id, search-disabled-repos, subscription-manager
This system is not registered with an entitlement server. You can use subscription-manager to register.
openstack                                                                    | 2.9 kB  00:00:00     
openstack/primary_db                                                         | 141 kB  00:00:00     
Package 7:lvm2-2.02.180-8.el7.x86_64 already installed and latest version
Nothing to do
[root@block1 ~]# systemctl enable --now lvm2-lvmetad.service
[root@block1 ~]# fdisk -l

Disk /dev/vda: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000c1796

   Device Boot      Start         End      Blocks   Id  System
/dev/vda1   *        2048     2099199     1048576   83  Linux
/dev/vda2         2099200    41943039    19921920   8e  Linux LVM

Disk /dev/vdb: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/mapper/rhel-root: 18.2 GB, 18249416704 bytes, 35643392 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/mapper/rhel-swap: 2147 MB, 2147483648 bytes, 4194304 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

pvs
pvcreate

fdisk -l
pvcraete
vgcreate

[root@block1 ~]# pvcreate /dev/vdb
  Physical volume "/dev/vdb" successfully created.
[root@block1 ~]# vgcreate cinder-volumes /dev/vdb
  Volume group "cinder-volumes" successfully created

vim lvmconf
/filter
红色 vda vdb 1个

[root@block1 ~]# vim /etc/lvm/lvm.conf 
143
        filter = [ "a/vda/", "a/vdb/", "r/.*/"]

3:
安装targetic(比NFS安全)

[root@block1 ~]# yum install openstack-cinder targetcli python-keystone

vim cinderconf
database
DEFAULT
olsoDEFAULT
keystone
DEFAULT 33
lvm (最后)卷的驱动
DEFAULT lvm
DEFAULT
osloconcurr

[root@block1 ~]# vim /etc/cinder/cinder.conf

[database]
connection = mysql+pymysql://cinder:cinder@controller/cinder

[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 172.25.21.33
enabled_backends = lvm
glance_api_servers = http://controller:9292

[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = openstack

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder

[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

enbale

[root@block1 ~]# systemctl enable --now openstack-cinder-volume.service target.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-volume.service to /usr/lib/systemd/system/openstack-cinder-volume.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/target.service to /usr/lib/systemd/system/target.service.

1:
cindr ervice list

[root@controller ~]# cinder service-list 
+------------------+------------+------+---------+-------+----------------------------+-----------------+
|      Binary      |    Host    | Zone |  Status | State |         Updated_at         | Disabled Reason |
+------------------+------------+------+---------+-------+----------------------------+-----------------+
| cinder-scheduler | controller | nova | enabled |   up  | 2021-05-02T07:57:32.000000 |        -        |
|  cinder-volume   | block1@lvm | nova | enabled |   up  | 2021-05-02T07:57:34.000000 |        -        |
+------------------+------------+------+---------+-------+----------------------------+-----------------+

web:
demo
2G
联节到vm3

vm3:
格式化
mount
mkdir

web:
先卸载
分离

拉伸(扩展云盘)
5G
连接到vm3

vm3:
fdisk -l

mount data
df -h data

xfs_

3:
回收
pvs
lvs
(lv拉伸)

[root@block1 ~]# pvs
  PV         VG             Fmt  Attr PSize   PFree  
  /dev/vda2  rhel           lvm2 a--  <19.00g      0 
  /dev/vdb   cinder-volumes lvm2 a--  <20.00g <15.00g


[root@block1 ~]# lvs
  LV                                          VG             Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  volume-d9ab64b1-7f13-4728-a3aa-0f3e3420fa2b cinder-volumes -wi-ao----   5.00g                                                    
  root                                        rhel           -wi-ao---- <17.00g                                                    
  swap                                        rhel           -wi-ao----   2.00g                

三、

之前的虚拟机关掉
8。2的版本

所有服务部署到一个主机上(自动化)

21:
7G
PCU:pass-through
加磁盘20G
双网卡

kolla-ansible

lftp

cp eth1
(3)

ifup eth1

关闭火墙
lscpu

pvcraear
vgcreate

vim

resart lvm2
(出现问题)

lftp
vim docker。repo
安装docker-ce

不要装qmo

lftp
安装8的版本dockete

阿里云的镜像
写到yum源
安装docker-ce

docker images

加速器(加速下载镜像)不需要

安装python编译依赖

dnf安装依赖性

(不需要虚拟环境)

安装python3-pip

pip3 升级

mdkir

vim pipconf

安装ansible

vim epel

安装ansible

vim ansible
(1)

pip insatll 8。0。1
(出错)
强制更新

mkdir -p

cd
cp etc kolla

cd ansible
cp ~

ansible -i

cd etc

vim yml
(不做)

kolla-genpwd
vim passwd。yuml
/admin
改称westos

fltp stein下载
倒入docker -i

vim global
匹配版本centos
41IP地址
网络接口eth0
neutron:eth1
(2个接口)
nohaproxy
heat no
块存储 enabl——cinder yes
lvm yes
iscsis yes

vim docker
false
去掉一个东西

-i all检查依赖性
(python没有安装)

软连接
ln -s pyhto

再来检查依赖性

安装pyhton2
(不能用软连接)

3######

卸载ansible

安装最新的ansuble

删除
删除
删除
删除(。。。。。)

vim
改admin的密码westos
vim global
确认centos
佩加速 41IP
eth0
eth1
cinder yes
lvm yes
iscsi yes
flunted no
elasticas no
heat no

拉镜像

上一篇:OpenStack的部署T版(七)——cinder模块


下一篇:Openstack学习总结之七(存储组件部署)