Настройка хоста под KVM

Материал из Eugene Paniot Wiki
Перейти к: навигация, поиск

Базовая настройка ОС

Базовая настройка сервера на базе Linux


KVM

dbus-uuidgen  > /var/lib/dbus/machine-id
yum install -y centos-release-qemu-ev

yum install libvirt virt-install qemu qemu-guest-agent  qemu-system-x86  qemu-user qemu-tools virt-manager bridge-utils seabios glusterfs-api libiscsi4 centos-release-qemu-ev qemu-kvm-tools qemu-kvm qemu-img

/etc/libvirt/libvirtd.conf

unix_sock_group = "kvm"
unix_sock_ro_perms = "0777"
unix_sock_rw_perms = "0770"
unix_sock_dir = "/var/run/libvirt"

listen_tls = 0
listen_tcp = 1
listen_addr = "0.0.0.0"

/etc/sysconfig/libvirtd

...
LIBVIRTD_ARGS="--listen"
...
saslpasswd2 -f /etc/libvirt/passwd.db -a libvirt user

/etc/libvirt/qemu.conf

vnc_listen = "127.0.0.1"
user = "qemu"
group = "qemu"
security_driver = "none"
max_processes = 0
max_files = 0
max_queued = 0
mac_filter = 0
keepalive_interval = 5
keepalive_count = 5
migration_port_min = 65000
migration_port_max = 65500

Настройки cgconfig

mkdir -v /cgroup

/etc/cgconfig.conf

mount {
	cpuset	= /cgroup/cpuset;
	cpu	= /cgroup/cpu;
	cpuacct	= /cgroup/cpuacct;
	memory	= /cgroup/memory;
	devices	= /cgroup/devices;
	freezer	= /cgroup/freezer;
	net_cls	= /cgroup/net_cls;
	blkio	= /cgroup/blkio;
}
chkconfig cgconfig on

Убрать определение virbr0

truncate -s0 /etc/libvirt/qemu/networks/default.xml

Установить hook на запуск libvirtd

/etc/libvirt/hooks/qemu

#!/bin/bash

cgset -r memory.oom_control=1 libvirt ; cgset -r memory.swappiness=1 libvirt
cgset -r memory.oom_control=1 libvirt/qemu ; cgset -r memory.swappiness=1 libvirt/qemu
cgset -r memory.oom_control=1 machine ; cgset -r memory.swappiness=1 machine

exit 0

<seealso> http://libvirt.org/hooks.html Libvirt Hooks http://www.kernel.org/doc/Documentation/cgroups/ Cgroups Documentation </seealso>

Centos 7

systemctl enable ksm
systemctl enable ksmtuned
systemctl start ksm
systemctl start ksmtuned

glusterfs

wget http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo -O /etc/yum.repos.d/glusterfs-epel.repo
yum install -y glusterfs-fuse glusterfs-server glusterfs-api glusterfs
/etc/glusterfs/glusterd.vol

volume management
    type mgmt/glusterd
    option working-directory /var/lib/glusterd
    option transport-type socket
    option transport.socket.keepalive-time 1000
    option transport.socket.keepalive-interval 200
    option transport.socket.read-fail-log off
    option ping-timeout 30
    option rpc-auth-allow-insecure on
#   option base-port 49152
end-volume
systemctl enable  glusterfsd.service
systemctl enable  glusterd.service
systemctl restart glusterfsd
systemctl restart glusterd
lvcreate -n data -l 100%FREE vg_node
mkfs.xfs -l size=128m  -d agcount=32 -f /dev/vg_node/data
mkdir /data
/etc/fstab

/dev/vg_node/data						/data			xfs		defaults,noatime,nodiratime,osyncisdsync 1 2

http://www.gluster.org/community/documentation/index.php/Libgfapi_with_qemu_libvirt

gluster volume set rv1 performance.open-behind disable
$ gluster volume info

Volume Name: rv1
Type: Replicate
Volume ID: 40f08a4c-2e10-4705-a1cb-ff434447a6b6
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: virt3:/data/rv1
Brick2: virt4:/data/rv1
Options Reconfigured:
network.compression: on
performance.strict-write-ordering: on
transport.keepalive: on
auth.allow: 127.0.0.1
performance.readdir-ahead: on
server.allow-insecure: on
performance.quick-read: off
performance.read-ahead: off
performance.io-cache: off
performance.stat-prefetch: off
cluster.eager-lock: enable
network.remote-dio: enable
cluster.quorum-type: auto
cluster.server-quorum-type: server
storage.owner-uid: 107
storage.owner-gid: 107
nfs.disable: off
performance.cache-size: 1G
performance.io-thread-count: 32
cluster.background-self-heal-count: 32
cluster.metadata-self-heal: on
cluster.data-self-heal: on
cluster.entry-self-heal: on
cluster.self-heal-daemon: on
network.ping-timeout: 5