2 # vi: ft=ruby:et:ts=2:sts=2:sw=2
4 VAGRANTFILE_API_VERSION = 2
10 # Defaults for Configuration data.
11 # Will be overridden from the settings file
12 # and (possibly later) from commandline parameters.
24 network_opts = [ :type, :link, :flags, :hwaddr, :name, :ipv4, :ipv6 ]
26 libvirt_network_parms = {
46 #:hostname => 'gluno1',
48 #:box => 'local-fedora-rawhide-64',
49 #:box => 'purpleidea-fedora-21',
50 #:box => 'local-fedora-21.2',
53 :container_name => 'gluno1',
54 #:container_name => 'node1',
57 :box => 'local-fedora-21.2',
61 :internal_if => 'virbr1',
65 :ipv4 => '172.20.10.30',
69 # #:ipv4 => '10.111.222.201',
76 # Load the config, if it exists,
77 # possibly override with commandline args,
78 # (currently none supported yet)
79 # and then store the config.
82 projectdir = File.expand_path File.dirname(__FILE__)
83 f = File.join(projectdir, 'vagrant.yaml')
85 settings = YAML::load_file f
87 if settings[:vms].is_a?(Array)
90 puts "Loaded settings from #{f}."
93 # TODO(?): ARGV-processing
99 File.open(f, 'w') do |file|
100 file.write settings.to_yaml
102 puts "Wrote settings to #{f}."
108 defaults.keys.each do |cat|
109 next if not vm.has_key?(cat)
110 defaults[cat].keys.each do |subcat|
111 next if not vm[cat].has_key?(subcat)
112 defaults[cat][subcat].keys.each do |key|
113 if not vm[cat][subcat].has_key?(key)
114 vm[cat][subcat][key] = defaults[cat][subcat][key]
120 #if not vm[:provider][:libvirt].has_key?(:prefix)
121 # vm[:provider][:libvirt][:prefix] = default_libvirt_prefix
124 vm[:networks].each do |net|
125 net_default.keys.each do |key|
126 if not net.has_key?(key)
127 net[key] = net_default[key]
134 # compose the list of cluster internal ips
136 cluster_internal_ips = vms.map do |vm|
138 vm[:networks].each do |n|
139 if n[:link] == vm[:internal_if]
149 #print "internal ips: "
150 #print cluster_internal_ips
153 #PROVISION_SCRIPT = <<SCRIPT
154 #yum -y install make samba
157 NET_FIX_ALWAYS_SCRIPT = <<SCRIPT
159 # eth1 is not brought up automatically
160 # by 'vagrant up' of the existing vm
161 # because eth1 is not up, glusterd can
162 # not be started and gluster volumes can
163 # not be mountd. fix it all up here until
164 # we have a correctly working environment
167 grep -q -s "${MOUNTPT}" /etc/fstab && {
168 # already provisioned...
169 systemctl restart glusterd
175 NET_FIX_INITIAL_SCRIPT = <<SCRIPT
177 # Fix dhclient running on private network IF
179 systemctl restart NetworkManager
183 INSTALL_SCRIPT = <<SCRIPT
185 yum -y install xfsprogs
186 yum -y install glusterfs{,-server,-fuse,-geo-replication}
187 yum -y install ctdb samba
190 XFS_SCRIPT = <<SCRIPT
195 DISKDEV="/dev/${DEVICE}"
196 DISKPARTDEV="/dev/${PARTDEV}"
198 MOUNTP=/export/${PARTDEV}
199 BRICKD=${MOUNTP}/brick
201 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
203 parted -s ${DISKDEV} print && {
204 echo "Labe exists on ${DISKDEV}."
206 echo "Creating label on ${DISKDEV}."
207 parted -s ${DISKDEV} mklabel msdos
210 parted -s ${DISKDEV} print 1 && {
211 echo "Partition ${DISKPARTDEV} exists."
213 echo "Creating partition ${DISKPARTDEV}."
214 parted -s ${DISKDEV} mkpart primary 1 100%
217 blkid -s TYPE ${DISKPARTDEV} | grep -q -s 'TYPE="xfs"' && {
218 echo "Partition ${DISKPARTDEV} contains xfs file system."
220 echo "Creating xfs filesystem on ${DISKPARTDEV}."
221 mkfs.xfs -f ${DISKPARTDEV}
228 grep -q -s ${DISKPARTDEV} ${FILE} && {
229 echo "Mount entry for ${DISKPARTDEV} is present in ${FILE}."
231 echo "Creating mount entry for ${DISKPARTDEV} in ${FILE}."
232 test -f ${FILE} || touch ${FILE}
233 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
235 ${DISKPARTDEV} ${MOUNTP} xfs defaults 0 0
239 mount | grep ${MOUNTP} && {
240 echo "${MOUNTP} is already mounted."
242 echo "Mounting ${MOUNTP}."
249 GLUSTER_START_SCRIPT = <<SCRIPT
251 systemctl start glusterd.service
254 #GLUSTER_PROBE_SCRIPT = <<SCRIPT
259 #gluster peer probe ${PEER_IP}
262 GLUSTER_PROBE_SCRIPT = <<SCRIPT
267 for PEER_IP in ${PEER_IPS}
269 # try for some time to reach the other node:
270 for COUNT in $(seq 1 12)
272 gluster peer probe ${PEER_IP} && break
278 GLUSTER_CREATEVOL_SCRIPT = <<SCRIPT
285 echo "gluster volume create $VOLNAME rep $REP transport tcp $@"
286 gluster volume create $VOLNAME rep $REP transport tcp $@
288 gluster volume start $VOLNAME
291 GLUSTER_MOUNT_SCRIPT = <<SCRIPT
298 MOUNTDEV="127.0.0.1:/${VOLNAME}"
302 #mount -t glusterfs ${MOUNTDEV} ${MOUNTPT}
304 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
308 grep -q -s "${MOUNTPT}" ${FILE} || {
309 test -f ${FILE} || touch ${FILE}
310 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
313 ${MOUNTDEV} ${MOUNTPT} glusterfs defaults,selinux 0 0
322 CTDB_STOP_SCRIPT = <<SCRIPT
324 systemctl stop ctdb.service
327 CTDB_CREATE_NODES_SCRIPT = <<SCRIPT
330 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
335 test -f ${FILE} || touch ${FILE}
336 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
339 for IP in ${NODES_IPS}
341 echo "$IP" >> ${FILE}
345 CTDB_CREATE_PUBADDRS_SCRIPT = <<SCRIPT
348 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
352 FILE=/etc/ctdb/public_addresses
353 test -f ${FILE} || touch ${FILE}
354 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
359 echo ${IP} >> ${FILE}
363 CTDB_CREATE_CONF_SCRIPT = <<SCRIPT
366 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
368 RECLOCKDIR=/gluster/gv0/ctdb
369 mkdir -p ${RECLOCKDIR}
370 RECLOCKFILE=${RECLOCKDIR}/reclock
372 PUBLIC_ADDRESSES_FILE=/etc/ctdb/public_addresses
373 NODES_FILE=/etc/ctdb/nodes
375 FILE=/etc/sysconfig/ctdb
376 test -f ${FILE} || touch ${FILE}
377 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
381 CTDB_NODES=${NODES_FILE}
382 CTDB_PUBLIC_ADDRESSES=${PUBLIC_ADDRESSES_FILE}
383 CTDB_RECOVERY_LOCK=${RECLOCKFILE}
384 CTDB_MANAGES_SAMBA="yes"
385 CTDB_SAMBA_SKIP_SHARE_CKECK="yes"
386 #CTDB_MANAGES_WINBIND="yes"
390 SAMBA_CREATE_CONF_SCRIPT = <<SCRIPT
393 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
399 mkdir -p ${GLUSTER_VOL_MOUNT}/share1
400 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share1
402 mkdir -p ${GLUSTER_VOL_MOUNT}/share2
403 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share2
405 FILE=/etc/samba/smb.conf
406 test -f ${FILE} || touch ${FILE}
407 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
412 netbios name = sambacluster
421 vfs objects = acl_xattr glusterfs
422 glusterfs:volume = ${GLUSTER_VOL}
423 kernel share modes = no
427 path = ${GLUSTER_VOL_MOUNT}/share2
428 vfs objects = acl_xattr
433 CTDB_START_SCRIPT = <<SCRIPT
435 systemctl start ctdb.service
438 # The vagrant machine definitions
441 Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
443 if Vagrant.has_plugin?("vagrant-cachier")
444 config.cache.scope = :box
447 vms.each do |machine|
448 config.vm.define machine[:hostname] do |node|
449 node.vm.box = machine[:provider][:libvirt][:box]
450 node.vm.hostname = machine[:hostname]
452 node.vm.provider :libvirt do |libvirt|
453 libvirt.default_prefix = machine[:provider][:libvirt][:prefix]
454 libvirt.memory = 1024
455 libvirt.storage :file, :size => '64M', :device => 'vdb'
456 libvirt.storage :file, :size => '10G', :device => 'vdc'
458 machine[:networks].each do |net|
459 if not net[:ipv4] == ''
460 node.vm.network :private_network, :ip => net[:ipv4]
466 # There is some problem with the fedora base box:
467 # Upon first boot, ifdown eth1 fails and the dhclient
468 # keep being active. Simply bringing down and up again
469 # the interface is not sufficient. We need to restart
470 # NetworkManager in order to teach it to not feel
471 # responsible for the interface any more.
472 node.vm.provision "net_fix_initial", type: "shell" do |s|
473 s.inline = NET_FIX_INITIAL_SCRIPT
476 node.vm.provision "install", type: "shell" do |s|
477 s.inline = INSTALL_SCRIPT
480 # There is some problem with the fedora base box:
481 # We need to up the interface on reboots.
482 # It does not come up automatically.
483 node.vm.provision "net_fix_always", type: "shell", run: "always" do |s|
484 s.inline = NET_FIX_ALWAYS_SCRIPT
487 # multiple privisioners with same name possible?
488 node.vm.provision "xfs", type: "shell" do |s|
489 s.inline = XFS_SCRIPT
490 #s.args = [ "vdb", "/export/gluster/brick1" ]
494 node.vm.provision "xfs", type: "shell" do |s|
495 s.inline = XFS_SCRIPT
496 #s.args = [ "vdc" , "/export/gluster/brick2" ]
500 node.vm.provision "gluster_start", type: "shell" do |s|
501 s.inline = GLUSTER_START_SCRIPT
504 node.vm.provision "gluster_probe", type: "shell" do |s|
505 s.inline = GLUSTER_PROBE_SCRIPT
506 s.args = cluster_internal_ips
509 node.vm.provision "gluster_createvol", type: "shell" do |s|
510 mount_points = cluster_internal_ips.map do |ip|
511 "#{ip}:/export/vdb1/brick"
513 s.inline = GLUSTER_CREATEVOL_SCRIPT
514 s.args = [ "gv0", "3" ] + mount_points
517 node.vm.provision "gluster_mount", type: "shell" do |s|
518 s.inline = GLUSTER_MOUNT_SCRIPT
519 s.args = [ "gv0", "/gluster/gv0" ]
522 node.vm.provision "gluster_createvol", type: "shell" do |s|
523 mount_points = cluster_internal_ips.map do |ip|
524 "#{ip}:/export/vdc1/brick"
526 s.inline = GLUSTER_CREATEVOL_SCRIPT
527 s.args = [ "gv1", "3" ] + mount_points
530 node.vm.provision "gluster_mount", type: "shell" do |s|
531 s.inline = GLUSTER_MOUNT_SCRIPT
532 s.args = [ "gv1", "/gluster/gv1" ]
536 # ctdb / samba config
539 node.vm.provision "ctdb_stop", type: "shell" do |s|
540 s.inline = CTDB_STOP_SCRIPT
543 node.vm.provision "ctdb_create_nodes", type: "shell" do |s|
544 s.inline = CTDB_CREATE_NODES_SCRIPT
545 s.args = cluster_internal_ips
548 #node.vm.provision "ctdb_create_pubaddrs", type: "shell" do |s|
549 # s.inline = CTDB_CREATE_PUBADDRS_SCRIPT
553 node.vm.provision "ctdb_create_conf", type: "shell" do |s|
554 s.inline = CTDB_CREATE_CONF_SCRIPT
557 node.vm.provision "samba_create_conf", type: "shell" do |s|
558 s.inline = SAMBA_CREATE_CONF_SCRIPT
559 s.args = [ "gv1", "/gluster/gv1" ]
562 node.vm.provision "ctdb_start", type: "shell" do |s|
563 s.inline = CTDB_START_SCRIPT