Rewrite autocluster in Python
authorMartin Schwenke <martin@meltin.net>
Wed, 27 Feb 2019 04:11:58 +0000 (15:11 +1100)
committerMartin Schwenke <martin@meltin.net>
Mon, 25 Mar 2019 05:52:25 +0000 (16:52 +1100)
Reads a YAML configuration, with defaults in defaults.yml.  This
replaces the bash version.

Currently does the following actions for cluster <cluster>: defaults,
dump, status, generate, destroy, create, ssh_config, setup, build.

update_hosts is replaced by ssh_update.  If necessary, hosts file can
be updated by hand using hosts file in state directory.

Update build/packaging and example configuration file.

Signed-off-by: Martin Schwenke <martin@meltin.net>
Makefile
autocluster.py [new file with mode: 0755]
autocluster.spec.in
defaults.yml [new file with mode: 0644]
example.yml [new file with mode: 0644]

index f39cb5c..ee82ac9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -5,21 +5,19 @@ datadir       = ${prefix}/share/autocluster
 bindir = ${prefix}/bin
 DESTDIR        =
 
-datas  = base config.d host_setup templates
+datas  = defaults.yml ansible vagrant
 hacks  = autocluster.hack
 genpkg = debian/changelog autocluster.spec ChangeLog
 
 all:   $(hacks)
 
-autocluster.hack:      autocluster
-       sed -e '/##BEGIN-INSTALLDIR-MAGIC##/i \
-installdir=$(datadir)' -e '/##BEGIN-INSTALLDIR-MAGIC##/,/##END-INSTALLDIR-MAGIC##/d' $< > $@
+autocluster.hack:      autocluster.py Makefile
+       sed -e "s|^INSTALL_DIR = .*|INSTALL_DIR = '$(datadir)'|" $< > $@
 
 install:       all
        mkdir -p $(DESTDIR)$(datadir)
        cp -a $(datas) $(DESTDIR)$(datadir)/
        mkdir -p $(DESTDIR)$(bindir)
-       install -m 755 vircmd $(DESTDIR)$(bindir) 
        install -m 755 autocluster.hack $(DESTDIR)$(bindir)/autocluster
 
 debian/changelog:      debian/changelog.in Makefile .git/refs/heads/master
diff --git a/autocluster.py b/autocluster.py
new file mode 100755 (executable)
index 0000000..3e9f553
--- /dev/null
@@ -0,0 +1,729 @@
+#!/usr/bin/env python
+
+'''Autocluster: Generate test clusters for clustered Samba
+
+   Reads configuration file in YAML format
+
+   Uses Vagrant to create cluster, Ansible to configure
+'''
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import print_function
+
+import os
+import errno
+import sys
+import re
+import subprocess
+import shutil
+
+import ipaddress
+
+import yaml
+try:
+    import libvirt
+except ImportError as err:
+    LIBVIRT_IMPORT_ERROR = err
+    libvirt = None
+
+INSTALL_DIR = '.'
+
+NODE_TYPES = ['nas', 'base', 'build', 'cbuild', 'ad', 'test']
+GENERATED_KEYS = ['cluster', 'nodes', 'shares']
+
+
+def usage():
+    '''Print usage message'''
+
+    sys.exit(
+        '''Usage: %s <group> <args>
+  Groups:
+
+    cluster <cluster> <command> ...
+
+       Commands:
+            defaults    Dump default configuration to stdout
+            dump        Dump cluster configuration to stdout
+            status      Show cluster status
+            generate    Generate cluster metadata for Vagrant, Ansible and SSH
+            destroy     Destroy cluster
+            create      Create cluster
+            ssh_config  Install cluster SSH configuration in current account
+            setup       Perform configuration/setup of cluster nodes
+            build       Short for: destroy, generate create ssh_config setup
+''' % sys.argv[0])
+
+
+def sanity_check_cluster_name(cluster):
+    '''Ensure that the cluster name is sane'''
+
+    if not re.match('^[A-Za-z][A-Za-z0-9]+$', cluster):
+        sys.exit('''ERROR: Invalid cluster name "%s"
+  Some cluster filesystems only allow cluster names matching
+  ^[A-Za-z][A-Za-z0-9]+$''' % cluster)
+
+
+def calculate_nodes(cluster, defaults, config):
+    '''Calculate hostname, IP and other attributes for each node'''
+
+    combined = dict(defaults)
+    combined.update(config)
+
+    if 'node_list' not in config:
+        sys.exit('Error: node_list not defined')
+
+    have_dedicated_storage_nodes = False
+    for node_type in combined['node_list']:
+
+        if node_type not in NODE_TYPES:
+            sys.exit('ERROR: Invalid node type %s in node_list' % node_type)
+
+        if type == 'storage':
+            have_dedicated_storage_nodes = True
+
+    nodes = {}
+    type_counts = {}
+    for idx, node_type in enumerate(combined['node_list']):
+        node = {}
+
+        node['type'] = node_type
+
+        # Construct hostname, whether node is CTDB node
+        if node_type == 'nas':
+            tag = 'n'
+            node['is_ctdb_node'] = True
+        else:
+            tag = node_type
+            node['is_ctdb_node'] = False
+
+        type_counts[node_type] = type_counts.get(node_type, 0) + 1
+        hostname = '%s%s%d' % (cluster, tag, type_counts[node_type])
+
+        # Does the node have shared storage?
+        if node_type == 'storage':
+            node['has_shared_storage'] = True
+        elif node_type == 'nas' and not have_dedicated_storage_nodes:
+            node['has_shared_storage'] = True
+        else:
+            node['has_shared_storage'] = False
+
+        # List of IP addresses, one for each network
+        node['ips'] = []
+        for net in combined['networks']:
+            offset = config['firstip'] + idx
+            if sys.version_info[0] < 3:
+                # Backported Python 2 ipaddress demands unicode instead of str
+                net = net.decode('utf-8')
+            ip_address = ipaddress.ip_network(net, strict=False)
+            node['ips'].append(str(ip_address[offset]))
+
+        nodes[hostname] = node
+
+    config['nodes'] = nodes
+
+
+def calculate_dependencies_ad(config):
+    '''Calculate nameserver and auth method based on the first AD node'''
+
+    for _, node in config['nodes'].items():
+        if node['type'] == 'ad':
+            nameserver = node['ips'][0]
+            if 'resolv_conf' not in config:
+                config['resolv_conf'] = {}
+            if 'nameserver' not in config['resolv_conf']:
+                config['resolv_conf']['nameserver'] = nameserver
+
+            if 'auth_method' not in config:
+                config['auth_method'] = 'winbind'
+
+            break
+
+
+def calculate_dependencies_virthost(defaults, config):
+    '''Handle special values that depend on virthost'''
+
+    if 'virthost' in config:
+        virthost = config['virthost']
+    else:
+        virthost = defaults['virthost']
+
+    if 'resolv_conf' not in config:
+        config['resolv_conf'] = {}
+    if 'nameserver' not in config['resolv_conf']:
+        config['resolv_conf']['nameserver'] = virthost
+
+    if 'repository_baseurl' not in config:
+        config['repository_baseurl'] = 'http://%s/mediasets' % virthost
+
+    if 'ad' not in config:
+        config['ad'] = {}
+    if 'dns_forwarder' not in config['ad']:
+        config['ad']['dns_forwarder'] = virthost
+
+
+def calculate_dependencies(cluster, defaults, config):
+    '''Handle special values that depend on updated config values'''
+
+    config['cluster'] = cluster
+
+    calculate_dependencies_ad(config)
+    calculate_dependencies_virthost(defaults, config)
+
+    # domain -> search
+    if 'resolv_conf' in config and \
+       'domain' in config['resolv_conf'] and \
+       'search' not in config['resolv_conf']:
+
+        config['resolv_conf']['search'] = config['resolv_conf']['domain']
+
+    # Presence of distro repositories means delete existing ones
+    if 'repositories_delete_existing' not in config:
+        for repo in config['repositories']:
+            if repo['type'] == 'distro':
+                config['repositories_delete_existing'] = True
+                break
+
+
+def calculate_kdc(config):
+    '''Calculate KDC setting if unset and there is an AD node'''
+
+    if 'kdc' not in config:
+        for hostname, node in config['nodes'].items():
+            if node['type'] == 'ad':
+                config['kdc'] = hostname
+                break
+
+
+def calculate_timezone(config):
+    '''Calculate timezone setting if unset'''
+
+    if 'timezone' not in config:
+        timezone_file = os.environ.get('AUTOCLUSTER_TEST_TIMEZONE_FILE',
+                                       '/etc/timezone')
+        try:
+            with open(timezone_file) as stream:
+                content = stream.readlines()
+                timezone = content[0]
+                config['timezone'] = timezone.strip()
+        except IOError as err:
+            if err.errno != errno.ENOENT:
+                raise
+
+    if 'timezone' not in config:
+        clock_file = os.environ.get('AUTOCLUSTER_TEST_CLOCK_FILE',
+                                    '/etc/sysconfig/clock')
+        try:
+            with open(clock_file) as stream:
+                zone_re = re.compile('^ZONE="([^"]+)".*')
+                lines = stream.readlines()
+                matches = [l for l in lines if zone_re.match(l)]
+                if matches:
+                    timezone = zone_re.match(matches[0]).group(1)
+                    config['timezone'] = timezone.strip()
+        except IOError as err:
+            if err.errno != errno.ENOENT:
+                raise
+
+
+def calculate_shares(defaults, config):
+    '''Calculate share definitions based on cluster filesystem mountpoint'''
+
+    if 'clusterfs' in config and 'mountpoint' in config['clusterfs']:
+        mountpoint = config['clusterfs']['mountpoint']
+    else:
+        mountpoint = defaults['clusterfs']['mountpoint']
+    directory = os.path.join(mountpoint, 'data')
+    share = {'name': 'data', 'directory': directory, 'mode': '0o777'}
+
+    config['shares'] = [share]
+
+
+def load_defaults():
+    '''Load default configuration'''
+
+    # Any failures here are internal errors, so allow default
+    # exceptions
+
+    defaults_file = os.path.join(INSTALL_DIR, 'defaults.yml')
+
+    with open(defaults_file, 'r') as stream:
+        defaults = yaml.safe_load(stream)
+
+    return defaults
+
+
+def nested_update(dst, src, context=None):
+    '''Update dictionary dst from dictionary src.  Sanity check that all
+keys in src are defined in dst, except those in GENERATED_KEYS.  This
+means that defaults.yml acts as a template for configuration options.'''
+
+    for key, val in src.items():
+        if context is None:
+            ctx = key
+        else:
+            ctx = '%s.%s' % (context, key)
+
+        if key not in dst and key not in GENERATED_KEYS:
+            sys.exit('ERROR: Invalid configuration key "%s"' % ctx)
+
+        if isinstance(val, dict) and key in dst:
+            nested_update(dst[key], val, ctx)
+        else:
+            dst[key] = val
+
+
+def load_config_with_includes(config_file):
+    '''Load a config file, recursively respecting "include" options'''
+
+    if not os.path.exists(config_file):
+        sys.exit('ERROR: Configuration file %s not found' % config_file)
+
+    with open(config_file, 'r') as stream:
+        try:
+            config = yaml.safe_load(stream)
+        except yaml.YAMLError as exc:
+            sys.exit('Error parsing config file %s, %s' % (config_file, exc))
+
+    if config is None:
+        config = {}
+
+    # Handle include item, either a single string or a list
+    if 'include' not in config:
+        return config
+    includes = config['include']
+    config.pop('include', None)
+    if isinstance(includes, str):
+        includes = [includes]
+    if not isinstance(includes, list):
+        print('warning: Ignoring non-string/list include', file=sys.stderr)
+        return config
+    for include in includes:
+        if not isinstance(include, str):
+            print('warning: Ignoring non-string include', file=sys.stderr)
+            continue
+
+        included_config = load_config_with_includes(include)
+        config.update(included_config)
+
+    return config
+
+
+def load_config(cluster):
+    '''Load default and user configuration; combine them'''
+
+    defaults = load_defaults()
+
+    config_file = '%s.yml' % cluster
+
+    config = load_config_with_includes(config_file)
+
+    calculate_nodes(cluster, defaults, config)
+    calculate_dependencies(cluster, defaults, config)
+    calculate_timezone(config)
+    calculate_kdc(config)
+    calculate_shares(defaults, config)
+
+    out = dict(defaults)
+    nested_update(out, config)
+
+    return out
+
+
+def generate_config_yml(config, outdir):
+    '''Output combined YAML configuration to "config.yml"'''
+
+    outfile = os.path.join(outdir, 'config.yml')
+
+    with open(outfile, 'w') as stream:
+        out = yaml.dump(config, default_flow_style=False)
+
+        print('---', file=stream)
+        print(out, file=stream)
+
+
+def generate_hosts(cluster, config, outdir):
+    '''Output hosts file snippet to "hosts"'''
+
+    outfile = os.path.join(outdir, 'hosts')
+
+    with open(outfile, 'w') as stream:
+        print("# autocluster %s" % cluster, file=stream)
+
+        domain = config['resolv_conf']['domain']
+
+        for hostname, node in config['nodes'].items():
+            ip_address = node['ips'][0]
+            line = "%s\t%s.%s %s" % (ip_address, hostname, domain, hostname)
+
+            print(line, file=stream)
+
+
+def generate_ssh_config(config, outdir):
+    '''Output ssh_config file snippet to "ssh_config"'''
+
+    outfile = os.path.join(outdir, 'ssh_config')
+
+    with open(outfile, 'w') as stream:
+        for hostname, node in config['nodes'].items():
+            ip_address = node['ips'][0]
+            ssh_key = os.path.join(os.environ['HOME'], '.ssh/id_autocluster')
+            section = '''Host %s
+  HostName %s
+  User root
+  Port 22
+  UserKnownHostsFile /dev/null
+  StrictHostKeyChecking no
+  PasswordAuthentication no
+  IdentityFile %s
+  IdentitiesOnly yes
+  LogLevel FATAL
+''' % (hostname, ip_address, ssh_key)
+
+            print(section, file=stream)
+
+
+def generate_ansible_inventory(config, outdir):
+    '''Output Ansible inventory file to "ansible.inventory"'''
+
+    type_map = {}
+
+    for hostname, node in config['nodes'].items():
+
+        node_type = node['type']
+        hostnames = type_map.get(node['type'], [])
+        hostnames.append(hostname)
+        type_map[node['type']] = hostnames
+
+    outfile = os.path.join(outdir, 'ansible.inventory')
+
+    with open(outfile, 'w') as stream:
+        for node_type, hostnames in type_map.items():
+            print('[%s-nodes]' % node_type, file=stream)
+            hostnames.sort()
+            for hostname in hostnames:
+                print(hostname, file=stream)
+            print(file=stream)
+
+
+def cluster_defaults():
+    '''Dump default YAML configuration to stdout'''
+
+    defaults = load_defaults()
+    out = yaml.dump(defaults, default_flow_style=False)
+    print('---')
+    print(out)
+
+
+def cluster_dump(cluster):
+    '''Dump cluster YAML configuration to stdout'''
+
+    config = load_config(cluster)
+
+    # Remove some generated, internal values that aren't in an input
+    # configuration
+    for key in ['nodes', 'shares']:
+        config.pop(key, None)
+
+    out = yaml.dump(config, default_flow_style=False)
+    print('---')
+    print(out)
+
+
+def get_state_dir(cluster):
+    '''Return the state directory for the current cluster'''
+
+    return os.path.join(os.getcwd(), '.autocluster', cluster)
+
+
+def announce(group, cluster, command):
+    '''Print a banner announcing the current step'''
+
+    hashes = '############################################################'
+    heading = '%s %s %s' % (group, cluster, command)
+    banner = "%s\n# %-56s #\n%s" % (hashes, heading, hashes)
+
+    print(banner)
+
+
+def cluster_generate(cluster):
+    '''Generate metadata files from configuration'''
+
+    announce('cluster', cluster, 'generate')
+
+    config = load_config(cluster)
+
+    outdir = get_state_dir(cluster)
+    try:
+        os.makedirs(outdir)
+    except OSError as err:
+        if err.errno != errno.EEXIST:
+            raise
+
+    generate_config_yml(config, outdir)
+    generate_hosts(cluster, config, outdir)
+    generate_ssh_config(config, outdir)
+    generate_ansible_inventory(config, outdir)
+
+
+def vagrant_command(cluster, config, args):
+    '''Run vagrant with the given arguments'''
+
+    state_dir = get_state_dir(cluster)
+
+    os.environ['VAGRANT_DEFAULT_PROVIDER'] = config['vagrant_provider']
+    os.environ['VAGRANT_CWD'] = os.path.join(INSTALL_DIR, 'vagrant')
+    os.environ['VAGRANT_DOTFILE_PATH'] = os.path.join(state_dir, '.vagrant')
+    os.environ['AUTOCLUSTER_STATE'] = state_dir
+
+    full_args = args[:]  # copy
+    full_args.insert(0, 'vagrant')
+
+    subprocess.check_call(full_args)
+
+
+def cluster_status(cluster):
+    '''Check status of cluster using Vagrant'''
+
+    announce('cluster', cluster, 'status')
+
+    config = load_config(cluster)
+
+    vagrant_command(cluster, config, ['status'])
+
+
+def get_shared_disk_names(cluster, config):
+    '''Return shared disks names for cluster, None if none'''
+
+    have_shared_disks = False
+    for _, node in config['nodes'].items():
+        if node['has_shared_storage']:
+            have_shared_disks = True
+            break
+    if not have_shared_disks:
+        return None
+
+    count = config['shared_disks']['count']
+    if count == 0:
+        return None
+
+    return ['autocluster_%s_shared%02d.img' % (cluster, n + 1)
+            for n in range(count)]
+
+
+def delete_shared_disk_images(cluster, config):
+    '''Delete any shared disks for the given cluster'''
+
+    if config['vagrant_provider'] != 'libvirt':
+        return
+
+    shared_disks = get_shared_disk_names(cluster, config)
+    if shared_disks is None:
+        return
+
+    if libvirt is None:
+        print('warning: unable to check for stale shared disks (no libvirt)',
+              file=sys.stderr)
+        return
+
+    conn = libvirt.open()
+    storage_pool = conn.storagePoolLookupByName('autocluster')
+    for disk in shared_disks:
+        try:
+            volume = storage_pool.storageVolLookupByName(disk)
+            volume.delete()
+        except libvirt.libvirtError as err:
+            if err.get_error_code() != libvirt.VIR_ERR_NO_STORAGE_VOL:
+                raise err
+    conn.close()
+
+
+def create_shared_disk_images(cluster, config):
+    '''Create shared disks for the given cluster'''
+
+    if config['vagrant_provider'] != 'libvirt':
+        return
+
+    shared_disks = get_shared_disk_names(cluster, config)
+    if shared_disks is None:
+        return
+
+    if libvirt is None:
+        raise LIBVIRT_IMPORT_ERROR
+
+    conn = libvirt.open()
+    storage_pool = conn.storagePoolLookupByName('autocluster')
+
+    size = str(config['shared_disks']['size'])
+    if size[-1].isdigit():
+        unit = 'B'
+        capacity = size
+    else:
+        unit = size[-1]
+        capacity = size[:-1]
+
+    for disk in shared_disks:
+        xml = '''<volume type='file'>
+  <name>%s</name>
+  <capacity unit="%s">%s</capacity>
+</volume>''' % (disk, unit, capacity)
+        storage_pool.createXML(xml)
+
+    conn.close()
+
+
+def cluster_destroy_quiet(cluster):
+    '''Destroy and undefine cluster using Vagrant - don't announce'''
+
+    config = load_config(cluster)
+
+    # First attempt often fails, so try a few times
+    for _ in range(10):
+        try:
+            vagrant_command(cluster,
+                            config,
+                            ['destroy', '-f', '--no-parallel'])
+        except subprocess.CalledProcessError as err:
+            saved_err = err
+        else:
+            delete_shared_disk_images(cluster, config)
+            return
+
+    raise saved_err
+
+
+def cluster_destroy(cluster):
+    '''Destroy and undefine cluster using Vagrant'''
+
+    announce('cluster', cluster, 'destroy')
+
+    cluster_destroy_quiet(cluster)
+
+
+def cluster_create(cluster):
+    '''Create and boot cluster using Vagrant'''
+
+    announce('cluster', cluster, 'create')
+
+    config = load_config(cluster)
+
+    # Create our own shared disk images to protect against
+    # https://github.com/vagrant-libvirt/vagrant-libvirt/issues/825
+    create_shared_disk_images(cluster, config)
+
+    # First attempt sometimes fails, so try a few times
+    for _ in range(10):
+        try:
+            vagrant_command(cluster, config, ['up'])
+        except subprocess.CalledProcessError as err:
+            saved_err = err
+            cluster_destroy(cluster)
+        else:
+            return
+
+    raise saved_err
+
+
+def cluster_ssh_config(cluster):
+    '''Install SSH configuration for cluster'''
+
+    announce('cluster', cluster, 'ssh_config')
+
+    src = os.path.join(get_state_dir(cluster), 'ssh_config')
+    dst = os.path.join(os.environ['HOME'],
+                       '.ssh/autocluster.d',
+                       '%s.config' % cluster)
+    shutil.copyfile(src, dst)
+
+
+def cluster_setup(cluster):
+    '''Setup cluster using Ansible'''
+
+    announce('cluster', cluster, 'setup')
+
+    # Could put these in the state directory, but disable for now
+    os.environ['ANSIBLE_RETRY_FILES_ENABLED'] = 'false'
+
+    state_dir = get_state_dir(cluster)
+    config_file = os.path.join(state_dir, 'config.yml')
+    inventory = os.path.join(state_dir, 'ansible.inventory')
+    playbook = os.path.join(INSTALL_DIR, 'ansible/node/site.yml')
+    args = ['ansible-playbook',
+            '-e', '@%s' % config_file,
+            '-i', inventory,
+            playbook]
+    try:
+        subprocess.check_call(args)
+    except subprocess.CalledProcessError as err:
+        sys.exit('ERROR: cluster setup exited with %d' % err.returncode)
+
+
+def cluster_build(cluster):
+    '''Build cluster using Ansible'''
+
+    cluster_destroy(cluster)
+    cluster_generate(cluster)
+    cluster_create(cluster)
+    cluster_ssh_config(cluster)
+    cluster_setup(cluster)
+
+
+def cluster_command(cluster, command):
+    '''Run appropriate cluster command function'''
+
+    if command == 'defaults':
+        cluster_defaults()
+    elif command == 'dump':
+        cluster_dump(cluster)
+    elif command == 'status':
+        cluster_status(cluster)
+    elif command == 'generate':
+        cluster_generate(cluster)
+    elif command == 'destroy':
+        cluster_destroy(cluster)
+    elif command == 'create':
+        cluster_create(cluster)
+    elif command == 'ssh_config':
+        cluster_ssh_config(cluster)
+    elif command == 'setup':
+        cluster_setup(cluster)
+    elif command == 'build':
+        cluster_build(cluster)
+    else:
+        usage()
+
+
+def main():
+    '''Main autocluster command-line handling'''
+
+    if len(sys.argv) < 2:
+        usage()
+
+    if sys.argv[1] == 'cluster':
+        if len(sys.argv) < 4:
+            usage()
+
+        cluster = sys.argv[2]
+
+        sanity_check_cluster_name(cluster)
+
+        for command in sys.argv[3:]:
+            cluster_command(cluster, command)
+
+    else:
+        usage()
+
+
+if __name__ == '__main__':
+    sys.exit(main())
index 5ff3b0b..9d7ffa0 100644 (file)
@@ -11,8 +11,11 @@ URL: git://git.samba.org/autocluster.git
 Source: autocluster-%{version}.tar.gz
 
 Provides: autocluster = %{version}
-Requires: expect, coreutils, sed
-Requires: /usr/bin/virsh, /usr/bin/qemu-img, /usr/libexec/qemu-kvm
+Requires: coreutils, sed
+# Pick one:
+Requires: python, python-ipaddress
+#Requires: python3
+Requires: ansible >= 2.4
 
 Prefix: /usr
 BuildRoot: %{_tmppath}/%{name}-%{version}-root
@@ -42,8 +45,7 @@ rm -rf $RPM_BUILD_ROOT
 %files
 %defattr(-,root,root)
 
-%doc README ChangeLog examples/*
+%doc README ChangeLog example.yml
 %dir %{_prefix}/share/autocluster
 %{_prefix}/share/autocluster/*
 %{_bindir}/autocluster
-%{_bindir}/vircmd
diff --git a/defaults.yml b/defaults.yml
new file mode 100644 (file)
index 0000000..7b061df
--- /dev/null
@@ -0,0 +1,102 @@
+#
+# Environment
+#
+
+# As seen by nodes
+virthost: 10.0.0.1
+timezone: Australia/Canberra
+
+#
+# Repositories
+#
+
+repositories_delete_existing: false
+repository_baseurl: http://10.0.0.1/mediasets
+repositories: []
+
+#
+# Cluster
+#
+
+vagrant_box: generic/centos7
+vagrant_provider: libvirt
+
+# Supported types: nas, ad, base, build, cbuild, storage, test
+node_list: [nas, nas, nas, nas, base]
+
+cpus: 2
+# mem is currently ignored due to a libvirt provider bug
+mem: 1024
+
+# Block devices shared between nodes, used for cluster filesystem
+shared_disks:
+  count: 3
+  size: 10G
+
+# Currently unused
+#disksize: 20G
+#rootsize: 15000
+#swapsize: 2000
+
+#
+# Cluster filesystem
+#
+clusterfs:
+  type: gpfs
+  mountpoint: /clusterfs
+
+# Networking
+
+resolv_conf:
+  domain: example.com
+  search: example.com
+  # Defaults to 1st AD node if one exists
+  nameserver: 10.0.0.1
+
+samba:
+  workgroup: EXAMPLE
+
+networks:
+- 10.0.0.0/24
+- 10.0.1.0/24
+- 10.0.2.0/24
+# Final octet for 1st IP of cluster
+firstip: 20
+# Offset from firstip of public IP addresses
+public_ip_offset: 100
+
+#
+# Authentication
+#
+
+# Default automatically switches to winbind if an AD node exists
+auth_method: files
+# Defaults to 1st AD node, if one exists
+kdc: example-ad
+
+#
+# Active directory
+#
+
+ad:
+  admin_password: p@ssw0rd
+  # Defaults to virthost
+  dns_forwarder: 10.0.0.1
+
+#
+# Includes
+#
+
+# Useful for combining configurations, particularly static +
+# generated.  Note that position in current file does not matter: any
+# included configuration overrides configuration from the current
+# file.  This means that include: should logically be at the end.  Can
+# be either a single filename or a list of filenames.
+
+# include: foo.yml
+
+# include:
+# - a.yml
+# - b.yml
+
+...
diff --git a/example.yml b/example.yml
new file mode 100644 (file)
index 0000000..8a154dc
--- /dev/null
@@ -0,0 +1,34 @@
+---
+# 3 CTDB nodes, an active directory node and a test node
+node_list: [nas, nas, nas, ad, test]
+
+# Last octet of the first node's IP address - the default is OK but
+# this will obviously need to be set if running multiple clusters
+firstip: 50
+
+repositories:
+  # local CentOS mirror, will cause default repos to be removed
+- name: centos-7-os
+  type: distro
+  baseurl: http://10.11.12.13/mediasets
+  path: CENTOS/7/os/\$basearch/
+
+  # local Samba and CTDB repository - assumed to be served by host machine
+- name: samba
+  type: nas
+  path: SAMBA/samba-4.9.4/
+
+  # local Samba AD repository - assumed to be served by host machine
+- name: samba-ad
+  type: ad
+  path: SAMBA-AD/samba-4.9.4/
+
+  # GPFS cluster filesystme - assumed to be served by host machine
+- name: gpfs
+  type: clusterfs
+  path: GPFS/4.2.3
+
+resolv_conf:
+  # nameserver will be the active directory node or, if done, then virthost
+  domain: example.local
+...