Posted
Filed under Computer/Linux
Virtual Machine Manager error (virt-manager) after update rpm pacakge of CentOS 7.7.



Error message:
virt-manager-1.5.0 request.py:5:<module>:ImportError:cannot impore name UnrewindableBodyError

That issue comes from installed uncorrected package combination.

So, remove urllib3 and requests package of python 2.7.

# pip uninstall urllib3
# pip uninstall requests
# yum remove python-urllib3
# yum remove python-requests

and re-install urllib3 and requests package to original package.
# yum install python-urllib3
# yum install python-requests

Now, re-install virt-manager package.
# yum install virt-manager

Now it will be work Virtual Machine Manager(virt-manager).
2020/01/28 04:52 2020/01/28 04:52
[로그인][오픈아이디란?]
Posted
Filed under Computer/Linux
1. Already booted server and same network.
ping to same network's broadcast. (ping -b  192.168.122.255) or using namp
find arp table ( arp -an | grep <mac address> )


2. Catch DHCP request signal from booting server.
$ sudo tcpdump -i <eth interface> -en | grep -e "\.mdns" -e "igmp" | grep <mac address> | awk '{print $10}'

it need tcpdump command
$ rpm -qa |grep tcpdump >& /dev/null || sudo yum install tcpdump

ex) sudo tcpdump -i eth1 -en | grep -e "\.mdns" -e "igmp" | grep "52:54:00:05:3b:88" | awk '{print $10}'
2019/09/17 07:44 2019/09/17 07:44
[로그인][오픈아이디란?]
Posted
Filed under Computer/Linux
COMMON )
# yum remove postgresql10*  postgresql9*
# yum install postgresql-bdr94-bdr
# vi /etc/hosts
------------------------------------------------------------------------------------------------
192.168.122.1 psql1
192.168.122.2 psql2
------------------------------------------------------------------------------------------------
# su - postgres
-bash-4.2$ vi .bash_profile
------------------------------------------------------------------------------------------------
export PATH=${PATH}:/usr/pgsql-9.4/bin
------------------------------------------------------------------------------------------------
-bash-4.2$ source .bash_profile
-bash-4.2$ rm -fr /var/lib/pgsql/9.4-bdr/*
-bash-4.2$ initdb /var/lib/pgsql/9.4-bdr -A trust
-bash-4.2$ cd 9.4-bdr/
-bash-4.2$ vi postgresql.conf
------------------------------------------------------------------------------------------------
listen_addresses = '*'
shared_preload_libraries = 'bdr'
wal_level = 'logical'
track_commit_timestamp = on
max_connections = 100
max_wal_senders = 10
max_replication_slots = 10
max_worker_processes = 10
------------------------------------------------------------------------------------------------
-bash-4.2$ vi pg_hba.conf
------------------------------------------------------------------------------------------------
local   all             all                                     trust
host    all             all             127.0.0.1/32            trust
host    all             all             ::1/128                 trust
host replication bdrsync 192.168.122.1/32 trust
host replication bdrsync 192.168.122.2/32 trust
 
host ktest_db bdrsync 192.168.122.1/32 password
host ktest_db bdrsync 192.168.122.2/32 password
------------------------------------------------------------------------------------------------
-bash-4.2$ pg_ctl -l /tmp/pg_bdr.log -D /var/lib/pgsql/9.4-bdr start
-bash-4.2$ psql -c "CREATE USER bdrsync superuser;"
-bash-4.2$ psql -c "ALTER USER bdrsync WITH PASSWORD '12345#';"
-bash-4.2$ createuser ktester
-bash-4.2$ createdb -O ktester ktest_db
-bash-4.2$ psql ktest_db -c 'CREATE EXTENSION btree_gist;'
-bash-4.2$ psql ktest_db -c 'CREATE EXTENSION bdr;'
-bash-4.2$ pg_ctl -l /tmp/pg_bdr.log -D /var/lib/pgsql/9.4-bdr restart
-bash-4.2$ psql
postgres=# \c ktest_db
 

Master1 (IP: 192.168.122.1)
ktest_db=# SELECT bdr.bdr_group_create(local_node_name := 'psql1', node_external_dsn := 'host=192.168.122.1 user=bdrsync dbname=ktest_db password=12345#');
bdr_group_create
------------------
(1 row)
ktest_db=# select * from bdr.bdr_nodes;
     node_sysid      | node_timeline | node_dboid | node_status | node_name |                          node_local_dsn
                     | node_init_from_dsn | node_read_only | node_seq_id
---------------------+---------------+------------+-------------+-----------+---------------------------------------------
---------------------+--------------------+----------------+-------------
6735928151685930893 |             1 |      16387 | r           | psql1     | host=192.168.122.1 user=bdrsync dbname=sum_
db password=12345# |                    | f              |
(1 row)
 
sum_db=# select * from bdr.bdr_connections;
     conn_sysid      | conn_timeline | conn_dboid | conn_origin_sysid | conn_origin_timeline | conn_origin_dboid | conn_is
_unidirectional |                             conn_dsn                             | conn_apply_delay | conn_replication_s
ets
---------------------+---------------+------------+-------------------+----------------------+-------------------+--------
----------------+------------------------------------------------------------------+------------------+-------------------
----
6735928151685930893 |             1 |      16387 | 0                 |                    0 |                 0 | f
                | host=192.168.122.71 user=bdrsync dbname=ktest_db password=12345# |                  | {default}
(1 row)


Master2 (IP: 192.168.122.2)
ktest_db=# SELECT bdr.bdr_group_join(local_node_name := 'psql2', node_external_dsn := 'host=192.168.122.2 user=bdrsync dbname=ktest_db password=12345#',
join_using_dsn := 'host=192.168.122.1 user=bdrsync dbname=ktest_db password=12345#');
bdr_group_join
----------------
(1 row)
 
ktest_db=# select * from bdr.bdr_nodes; 
    node_sysid      | node_timeline | node_dboid | node_status | node_name |                          node_local_dsn
                   |                       node_init_from_dsn                        | node_read_only | node_seq_id
---------------------+---------------+------------+-------------+-----------+-----------------------------------------------
-------------------+-----------------------------------------------------------------+----------------+-------------
6737344874282799845 |             1 |      16387 | r           | psql1     | host=192.168.122.1 user=bdrsync dbname=ktest_db password=12345#  |                                                                 | f              |
6737381937345819706 |             1 |      16387 | r           | psql2     | host=192.168.122.2 user=bdrsync dbname=ktest_db password=12345# | host=192.168.122.1 user=bdrsync dbname=ktest_db password=12345# | f              |
(2 rows)
 
Any node)
ktest_db=# create table shared_table ( idx serial primary key, username varchar (50) unique not null, memo varchar(500) not null);
CREATE TABLE
 
ktest_db=# \dt
            List of relations
Schema |     Name     | Type  |  Owner
--------+--------------+-------+----------
public | shared_table | table | postgres
(1 row)
 
 
ktest_db=# \d shared_table;
                                  Table "public.shared_table"
  Column  |          Type          |                         Modifiers
----------+------------------------+------------------------------------------------------------
idx      | integer                | not null default nextval('shared_table_idx_seq'::regclass)
username | character varying(50)  | not null
memo     | character varying(500) | not null
Indexes:
    "shared_table_pkey" PRIMARY KEY, btree (idx)
    "shared_table_username_key" UNIQUE CONSTRAINT, btree (username)
 
 
ktest_db=# insert into shared_table (username,memo) values ('a1','aMemo'), ('a2','a2Memo');
INSERT 0 2
 
 
ktest_db=# select * from shared_table;
idx | username |  memo
-----+----------+--------
   1 | a1       | aMemo
   2 | a2       | a2Memo
(2 rows)
 
Now it will be error for put data at the other node 
ktest_db=# insert into shared_table (username,memo) values ('a4','a4Memo');
ERROR:  duplicate key value violates unique constraint "shared_table_pkey"
DETAIL:  Key (idx)=(1) already exists.
 
So, try this then no error.
ktest_db=# insert into shared_table (idx,username,memo) values ((select count(idx)+1 from shared_table),'a4','a4Memo');
INSERT 0 1
ktest_db=# insert into shared_table (idx,username,memo) values ((select count(idx)+1 from shared_table),'b4','b4Memo');
INSERT 0 1
ktest_db=# select * from shared_table;
idx | username |  memo
-----+----------+--------
   1 | a1       | aMemo
   2 | a2       | a2Memo
   3 | b4       | b4Memo
   4 | a4       | a4Memo
 
 or

ktest_db=# insert into shared_table (idx,username,memo) values ((select idx from shared_table order by idx desc limit 1)+1,'a5','a5Memo');
 
 or
 
ktest_db=# insert into shared_table (idx,username,memo) values ((select max(idx) from shared_table)+1,'a5','a5Memo');
 
 
 
 
issue 1)
ktest_db=# SELECT bdr.bdr_group_create(local_node_name := 'psql1', node_external_dsn := 'host=192.168.122.1 user=bdrsync dbname=ktest_db password=12345#');
FATAL:  could not connect to the server in non-replication mode: FATAL:  no pg_hba.conf entry for host "192.168.122.1", user "bdrsync", database "ktest_db", SSL off
 
DETAIL:  dsn was: connect_timeout=30 keepalives=1 keepalives_idle=20 keepalives_interval=20 keepalives_count=5   host=192.168.122.71 user=bdrsync dbname=ktest_db password=12345# fallback_application_name='bdr (6735928151685930893,1,16387,):bdrnodeinfo'
CONTEXT:  SQL statement "SELECT *                           FROM bdr_get_remote_nodeinfo(node_local_dsn)"
PL/pgSQL function internal_begin_join(text,text,text,text) line 42 at SQL statement
SQL statement "SELECT bdr.internal_begin_join(
        'bdr_group_join',
        local_node_name,
        CASE WHEN node_local_dsn IS NULL THEN node_external_dsn ELSE node_local_dsn END,
        join_using_dsn)"
PL/pgSQL function bdr_group_join(text,text,text,text,integer,text[]) line 21 at PERFORM
SQL statement "SELECT bdr.bdr_group_join(
        local_node_name := local_node_name,
        node_external_dsn := node_external_dsn,
        join_using_dsn := null,
        node_local_dsn := node_local_dsn,
        apply_delay := apply_delay,
        replication_sets := replication_sets)"
PL/pgSQL function bdr_group_create(text,text,text,integer,text[]) line 84 at PERFORM
server closed the connection unexpectedly
        This probably means the server terminated abnormally
        before or while processing the request.
The connection to the server was lost. Attempting reset: Succeeded.
 
==> This meaning is bdrsync user can't acess ktest_db. because, not defined at pg_hba.conf file.
 
 
issue 2)
amstest=# SELECT bdr.bdr_group_join(
amstest(# local_node_name := 'psql2',
amstest(# node_external_dsn := 'host=192.168.122.2 user=bdrsync dbname=ktest_db password=12345#',
amstest(# join_using_dsn := 'host=192.168.122.1 user=bdrsync dbname=ktest_db password=12345#'
amstest(# );
ERROR:  establish BDR: FATAL:  no pg_hba.conf entry for replication connection from host "192.168.122.2", user "bdrsync", SSL off
 
DETAIL:  Connection string is 'replication=database fallback_application_name='BDR test connection' connect_timeout=30 keepalives=1 keepalives_idle=20 keepalives_interval=20 keepalives_count=5   host=192.168.122.1 user=bdrsync dbname=ktest_db password=12345#'
CONTEXT:  SQL statement "SELECT * FROM bdr_test_replication_connection(remote_dsn)"
PL/pgSQL function internal_begin_join(text,text,text,text) line 110 at SQL statement
SQL statement "SELECT bdr.internal_begin_join(
        'bdr_group_join',
        local_node_name,
        CASE WHEN node_local_dsn IS NULL THEN node_external_dsn ELSE node_local_dsn END,
        join_using_dsn)"
PL/pgSQL function bdr_group_join(text,text,text,text,integer,text[]) line 21 at PERFORM
 
==> This meaning is bdrsync user can't access ktest_db at psql1 server.
So, it need also modify pg_hba.conf file.
 
bdrsync can access ktest_db with password. and bdrsync can access to replication db with trust (SSL) option.


issue 3)
ktest_db=# SELECT bdr.bdr_group_create(local_node_name := 'psql2', node_external_dsn := 'host=192.168.122.2 user=bdrsync dbname=ktest_db password=12345#', join_using_dsn := 'host=192.168.122.1 user=bdrsync dbname=ktest_db password=12345#');
ERROR:  function bdr.bdr_group_create(local_node_name := unknown, node_external_dsn := unknown, join_using_dsn := unknown) does not exist
LINE 1: SELECT bdr.bdr_group_create(local_node_name := 'psql2', node...
               ^
HINT:  No function matches the given name and argument types. You might need to add explicit type casts.

Not create, join

issue4)
---------------------------------------------------------------------------------
-bash-4.2$ psql kage_db -c 'CREATE EXTENSION bdr;'
ERROR:  bdr can only be loaded via shared_preload_libraries
---------------------------------------------------------------------------------------
Please check postgresql.conf file.
missing shared_preload_libraries = 'bdr' line.
2019/09/17 07:32 2019/09/17 07:32
[로그인][오픈아이디란?]
Posted
Filed under Computer/Linux
Simple shell script for find raw device name from LVM, file, directory

$ sudo ./find_rdev /dev/mapper/centos-root
/dev/sda3
/dev/sdb2

$ sudo ./find_rdev  /boot
/dev/sda2

$ sudo ./find_rdev /etc/fstab
/dev/sda3
/dev/sdb2

LVM included mdadm device case :
# pvs
  PV         VG     Fmt  Attr PSize  PFree 
  /dev/md0   centos lvm2 a--  15.98g 15.98g
  /dev/sda2  centos lvm2 a--  <7.00g     0 
  /dev/sdb1  centos lvm2 a--  <8.00g     0 


$ sudo ./find_rdev /dev/md0 
/dev/sdc
/dev/sdd

$ sudo ./find_rdev /
/dev/sda2
/dev/sdb1
/dev/sdc
/dev/sdd


2018/04/07 09:37 2018/04/07 09:37
[로그인][오픈아이디란?]
Posted
Filed under Computer/Linux
Need 4 nodes.
ceph-depoly 
ceph1  <==> node1 (MDS: Gateway server)
ceph2  <==> node2
ceph3  <==> node3
cephc : ceph client node
hardware spec:
All nodes: need outside network and inside network
OSD nodes: need extra disk for OSD device
All server need outside network for install ceph.
In All Servers:
# useradd ceph
# passwd ceph
# yum install ntp ntpdate ntp-doc
In ceph-deploy server
# sudo yum install yum-plugin-priorities
# echo "ceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/ceph
# chmod 0440 /etc/sudoers.d/ceph
# scp /etc/sudoers.d/ceph 10.4.0.102:/etc/sudoers.d/ceph
# scp /etc/sudoers.d/ceph 10.4.0.103:/etc/sudoers.d/ceph 
# scp /etc/sudoers.d/ceph 10.4.0.104:/etc/sudoers.d/ceph
# sudo vi /etc/yum.repos.d/ceph.repo
[ceph-noarch]
name=Ceph noarch packages
enabled=1
gpgcheck=1
type=rpm-md
Change account to ceph account:
$ ssh-keygen -t rsa
$ cd .ssh/
$ cp id_rsa.pub authorized_keys
$ cd -
$ ssh-copy-id ceph@node1
$ ssh-copy-id ceph@node2
$ ssh-copy-id ceph@node3
$ sudo yum install ceph-deploy
$ mkdir my-cluster
$ cd my-cluster/
$ ceph-deploy purge node1 node2 node3
$ ceph-deploy purgedata node1 node2 node3
$ ceph-deploy forgetkeys
$ rm -f ceph.*
Create cluster:
$ ceph-deploy new node1  <== create ceph.conf file
(or  ceph-deploy new --cluster-network=11.130.1.0/24 --public-network=11.130.1.0/24 <hostname> )
<< Config ceph.conf file for your hardware environments >>
Install ceph packages
$ ceph-deploy install node1 node2 node3
Deploy the initial monitor and gather the keys:
$ ceph-deploy mon create-initial
Copy config and admin key to all ceph servers:
$ ceph-deploy admin node1 node2 node3 <== copy ceph config and ceph.client.admin.keyring
Add OSD device (/dev/sdb)
$ ceph-deploy osd create node1:sdb node2:sdb node3:sdb
Health check:
$ ssh node1 sudo ceph health
HEALTH_OK
$ ssh node1 sudo ceph -s
    cluster e69ddb88-bef7-4fbe-9d41-8644032b40b2
     health HEALTH_OK
     monmap e1: 1 mons at {node1=10.4.0.102:6789/0}
            election epoch 3, quorum 0 node1
     osdmap e14: 3 osds: 3 up, 3 in
            flags sortbitwise,require_jewel_osds
      pgmap v24: 64 pgs, 1 pools, 0 bytes data, 0 objects
            100 MB used, 76658 MB / 76759 MB avail
                  64 active+clean
Create Metadata server:
$ ceph-deploy mds create node1
Add more monitor server to storage server for high availability.  ?????
$ ceph-deploy mon add node2
$ ceph-deploy mon add node3
$ ssh node1 sudo ceph -s
    cluster e69ddb88-bef7-4fbe-9d41-8644032b40b2
     health HEALTH_OK
     monmap e1: 1 mons at {node1=10.4.0.102:6789/0}
            election epoch 3, quorum 0 node1
     osdmap e14: 3 osds: 3 up, 3 in
            flags sortbitwise,require_jewel_osds
      pgmap v24: 64 pgs, 1 pools, 0 bytes data, 0 objects
            100 MB used, 76658 MB / 76759 MB avail
                  64 active+clean
Create RGW at gateway server:
$ ceph-deploy rgw create node1

Connect client node(cephc):
 on admin-node for deploy:
# copy ceph account, ssh, password, sudoers
admin-node$ ceph-deploy install cephc
admin-node$ ceph-deploy admin cephc  <== copy ceph config and ceph.client.admin.keyring
On Client node for making block device:
[root@cephc ~]# chmod 644 /etc/ceph/ceph.client.admin.keyring
[root@cephc ~]# rbd create disk01 --size 40960 <== rbd device size
[root@cephc ~]# rbd ls -l
NAME     SIZE PARENT FMT PROT LOCK
disk01 40960M          2
foo     4096M          2
[root@cephc ~]# modprobe rbd
[root@cephc ~]# sudo rbd feature disable disk01 exclusive-lock object-map fast-diff deep-flatten
[root@cephc ~]# rbd map disk01
/dev/rbd0
[root@cephc ~]# rbd showmapped
id pool image  snap device
0  rbd  disk01 -    /dev/rbd0
[root@cephc ~]# mkfs.xfs /dev/rbd0
[root@cephc ~]# mkdir /mnt/cephdisk
[root@cephc ~]# mount /dev/rbd0 /mnt/cephdisk/
[root@cephc ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda3        65G  2.1G   60G   4% /
devtmpfs        487M     0  487M   0% /dev
tmpfs           497M     0  497M   0% /dev/shm
tmpfs           497M  6.6M  490M   2% /run
tmpfs           497M     0  497M   0% /sys/fs/cgroup
/dev/sda1       477M   98M  354M  22% /boot
tmpfs           100M     0  100M   0% /run/user/0
/dev/rbd0        40G   33M   40G   1% /mnt/cephdisk
RBD mount script
---------------------------------------------------------------------------------------------------
# Script Author: http://bryanapperson.com/
# Change with your pools name
export poolname=rbd
# CHange with your disk image name
export rbdimage=disk01
# Mount Directory
export mountpoint=/mnt/mydisk
# Image mount/unmount and pool are passed from the systems service as arguments
# Determine if we are mounting or unmounting
if [ "$1" == "m" ]; then
modprobe rbd
rbd feature disable $rbdimage exclusive-lock object-map fast-diff deep-flatten
rbd map $rbdimage --id admin --keyring /etc/ceph/ceph.client.admin.keyring
mkdir -p $mountpoint
mount /dev/rbd/$poolname/$rbdimage $mountpoint
fi
if [ "$1" == "u" ]; then
umount $mountpoint
rbd unmap /dev/rbd/$poolname/$rbdimage
fi
---------------------------------------------------------------------------------------------------
Testing with single network:
[root@cephc ~]# cd /mnt/cephdisk/
[root@cephc cephdisk]# ls
[root@cephc cephdisk]# dd if=/dev/zero of=20G bs=1M count=20480
20480+0 records in
20480+0 records out
21474836480 bytes (21 GB) copied, 814.81 s, 26.4 MB/s
[root@cephc cephdisk]#
[root@cephc cephdisk]# time rm -f 20G
real    0m0.132s
user    0m0.001s
sys     0m0.110s
[root@cephc cephdisk]# dd if=/dev/zero of=2G bs=1M count=2048
2048+0 records in
2048+0 records out
2147483648 bytes (2.1 GB) copied, 80.6485 s, 26.6 MB/s
Testing with dual network:
Copy /etc/ceph/ceph.conf file to OSD servers (node1,node2,node3)
and reboot node1,2,3 server for apply it.
[root@node1 ~]# cat /etc/ceph/ceph.conf
[global]
fsid = e69ddb88-bef7-4fbe-9d41-8644032b40b2
mon_initial_members = node1
mon_host = 10.4.0.102
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
[osd]
public_network = 10.4.0.0/16
cluster_network = 10.5.0.0/16
[osd.0]
public_addr = 10.4.0.102:6801
cluster_addr = 10.5.0.1
[osd.1]
public_addr = 10.4.0.103:6802
cluster_addr = 10.5.0.2
[osd.2]
public_addr = 10.4.0.104:6803
cluster_addr = 10.5.0.3
[ceph@node1 ~]$ sudo ceph auth list
installed auth entries:
mds.node1
        key: AQBi7lNa8E3rJRAA8h6VKiIrT3Jjq2QHBlktmw==
        caps: [mds] allow
        caps: [mon] allow profile mds
        caps: [osd] allow rwx
osd.0
        key: AQDF7VNaPpZXChAAoF/ppU5TLPV0aWgj/lL8Cg==
        caps: [mon] allow profile osd
        caps: [osd] allow *
osd.1
        key: AQDR7VNa55uGNxAAOIXPeXPJNihYILKfVqQmew==
        caps: [mon] allow profile osd
        caps: [osd] allow *
osd.2
        key: AQDg7VNa6HKRGhAADWqYn32xh7g1LPEkeTIBFw==
        caps: [mon] allow profile osd
        caps: [osd] allow *
client.admin
        key: AQBD7VNaej9jCRAAaNWDjIK7KTUsuY90lIqtwg==
        caps: [mds] allow *
        caps: [mon] allow *
        caps: [osd] allow *
client.bootstrap-mds
        key: AQBD7VNaXUrtGRAAuQGP7ElLiMlWExWE2PM2iQ==
        caps: [mon] allow profile bootstrap-mds
client.bootstrap-osd
        key: AQBD7VNa/BftDhAA0grK5NqxJf2zZAXj61158Q==
        caps: [mon] allow profile bootstrap-osd
client.bootstrap-rgw
        key: AQBD7VNa46ZtFBAA1JSr5c4Jt+BqHIhLN1I9wA==
        caps: [mon] allow profile bootstrap-rgw
client.rgw.node1
        key: AQCh71Na4bLZAhAAnlP+2BGA8kK80FoiEEklIw==
        caps: [mon] allow rw
        caps: [osd] allow rwx
[ceph@node1 ~]$ sudo rados df
pool name                 KB      objects       clones     degraded      unfound           rd        rd KB           wr        wr KB
.rgw.root                  2            4            0            0            0           48           37            4            5
default.rgw.control            0            8            0            0            0            0            0            0            0
default.rgw.data.root            0            0            0            0            0            0            0            0            0
default.rgw.gc             0           32            0            0            0          288          256          192            0
default.rgw.log            0          127            0            0            0         6096         5969         4064            0
default.rgw.users.uid            0            0            0            0            0            0            0            0            0
rbd                 22163889         5435            0            0            0          473         6140        55750     33583613
  total used        66644872         5606
  total avail       11956808
  total space       78601680
[root@cephc cephdisk]# dd if=/dev/zero of=2G3 bs=1M count=2048
2048+0 records in
2048+0 records out
2147483648 bytes (2.1 GB) copied, 73.7127 s, 29.1 MB/s
[root@cephc cephdisk]# dd if=/dev/zero of=2G4 bs=1M count=2048
2048+0 records in
2048+0 records out
2147483648 bytes (2.1 GB) copied, 76.5691 s, 28.0 MB/s
[root@cephc cephdisk]# dd if=/dev/zero of=2G5 bs=1M count=2048
2048+0 records in
2048+0 records out
Reboot client node:
[root@cephc ~]# ls /dev/rbd*
ls: cannot access /dev/rbd*: No such file or directory
[root@cephc ~]# modprobe rbd
[root@cephc ~]# lsmod |grep rbd
rbd                    83889  0
libceph               282661  1 rbd
[root@cephc ~]# ls /dev/rbd*
ls: cannot access /dev/rbd*: No such file or directory
[root@cephc ~]# rbd ls -l
NAME     SIZE PARENT FMT PROT LOCK
disk01 40960M          2
foo     4096M          2
[root@cephc ~]# rbd feature disable disk01 exclusive-lock object-map fast-diff deep-flatten
rbd: failed to update image features: (22) Invalid argument
2018-01-09 00:48:23.297393 7f9028a25d80 -1 librbd: one or more requested features are already disabled
[root@cephc ~]# rbd map disk01
/dev/rbd0
[root@cephc ~]# ls /dev/rbd*
/dev/rbd0
/dev/rbd:
rbd
[root@cephc ~]# mount /dev/rbd0 /mnt/cephdisk
[root@cephc ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda3        65G  2.1G   60G   4% /
devtmpfs        487M     0  487M   0% /dev
tmpfs           497M     0  497M   0% /dev/shm
tmpfs           497M  6.6M  490M   2% /run
tmpfs           497M     0  497M   0% /sys/fs/cgroup
/dev/sda1       477M   98M  354M  22% /boot
tmpfs           100M     0  100M   0% /run/user/0
/dev/rbd0        40G   11G   30G  26% /mnt/cephdisk
[root@cephc ~]# ls -lh /mnt/cephdisk/
total 10G
-rw-r--r-- 1 root root 2.0G Jan  9 00:08 2G
-rw-r--r-- 1 root root 2.0G Jan  9 00:27 2G2nd
-rw-r--r-- 1 root root 2.0G Jan  9 00:38 2G3
-rw-r--r-- 1 root root 2.0G Jan  9 00:40 2G4
-rw-r--r-- 1 root root 2.0G Jan  9 00:42 2G5
2018/03/24 08:05 2018/03/24 08:05
[로그인][오픈아이디란?]