Index: trunk/tools/subversion/user-management/manage-volumes |
— | — | @@ -0,0 +1,256 @@ |
| 2 | +#!/usr/bin/python |
| 3 | +import sys, traceback, ldapsupportlib, datetime, paramiko, socket |
| 4 | +from optparse import OptionParser |
| 5 | + |
| 6 | +try: |
| 7 | + import ldap |
| 8 | + import ldap.modlist |
| 9 | +except ImportError: |
| 10 | + sys.stderr.write("Unable to import LDAP library.\n") |
| 11 | + |
| 12 | +NONE = 0 |
| 13 | +INFO = 10 |
| 14 | +DEBUG = 20 |
| 15 | + |
| 16 | +class VolumeManager: |
| 17 | + def __init__(self): |
| 18 | + # TODO: Pull this info from a configuration file |
| 19 | + self.base_dir = '/a/' |
| 20 | + self.user = 'glustermanager' |
| 21 | + self.gluster_vol_dir = '/etc/glusterd/' |
| 22 | + # Volumes in projects listed as global; so: { 'dumps': ['xml'] } would be |
| 23 | + # an xml share in the dumps project being listed as global. |
| 24 | + self.global_shares = {} |
| 25 | + self.volume_quotas = {'home': '50GB','default': '300GB'} |
| 26 | + self.default_options = ['nfs.disable on'] |
| 27 | + self.bricks = ['labstore1.pmtpa.wmnet', 'labstore2.pmtpa.wmnet', 'labstore3.pmtpa.wmnet', 'labstore4.pmtpa.wmnet'] |
| 28 | + self.volume_names = ['home', 'project'] |
| 29 | + self.loglevel = INFO |
| 30 | + self.logfile = None |
| 31 | + |
| 32 | + def run(self): |
| 33 | + parser = OptionParser(conflict_handler="resolve") |
| 34 | + parser.set_usage('manage-volumes [options]') |
| 35 | + |
| 36 | + ldapSupportLib = ldapsupportlib.LDAPSupportLib() |
| 37 | + ldapSupportLib.addParserOptions(parser) |
| 38 | + |
| 39 | + parser.add_option("--logfile", dest="logfile", help="Write output to the specified log file. (default: stdin)") |
| 40 | + parser.add_option("--loglevel", dest="loglevel", help="Change level of logging; NONE, INFO, DEBUG (default: INFO)") |
| 41 | + (options, args) = parser.parse_args() |
| 42 | + ldapSupportLib.setBindInfoByOptions(options, parser) |
| 43 | + |
| 44 | + if options.logfile: |
| 45 | + self.logfile = options.logfile |
| 46 | + if options.loglevel: |
| 47 | + self.loglevel = options.loglevel |
| 48 | + |
| 49 | + base = ldapSupportLib.getBase() |
| 50 | + ds = ldapSupportLib.connect() |
| 51 | + |
| 52 | + projectdata = self.search_s(ds,"ou=groups," + base,ldap.SCOPE_SUBTREE,"(&(cn=*)(owner=*))") |
| 53 | + hostdata = self.search_s(ds,"ou=hosts," + base,ldap.SCOPE_SUBTREE,"(puppetvar=instanceproject=*)", ['puppetvar','aRecord']) |
| 54 | + volumedata = self.ssh_exec_command('sudo gluster volume info', True, True) |
| 55 | + project_hosts = self.get_hosts(hostdata) |
| 56 | + project_volumes = self.get_volumes(volumedata) |
| 57 | + for project in projectdata: |
| 58 | + project_name = project[1]["cn"][0] |
| 59 | + hosts = [] |
| 60 | + if project_name in project_hosts: |
| 61 | + hosts = project_hosts[project_name] |
| 62 | + hosts.sort() |
| 63 | + for volume_name in self.volume_names: |
| 64 | + project_volume = project_name + '-' + volume_name |
| 65 | + if project_volume not in project_volumes: |
| 66 | + # First, make the volume directories. This function runs on all |
| 67 | + # bricks and returns a list of return values. If the return value |
| 68 | + # isn't 1 or 0, the command failed on a brick, and we shouldn't |
| 69 | + # create the volume. |
| 70 | + ret_vals = set(self.mkvolumedir(project_name, volume_name)) |
| 71 | + make_vol = set([1,0]).difference(ret_vals) |
| 72 | + if make_vol: |
| 73 | + vol_ret = self.mkvolume(project_name, volume_name) |
| 74 | + if vol_ret: |
| 75 | + self.log("Created volume: " + project_name + "-" + volume_name) |
| 76 | + else: |
| 77 | + # No point going on if the volume creation failed |
| 78 | + continue |
| 79 | + else: |
| 80 | + continue |
| 81 | + volume_hosts = [] |
| 82 | + if project_volume in project_volumes and 'auth.allow' in project_volumes[project_volume]: |
| 83 | + volume_hosts = project_volumes[project_volume]['auth.allow'] |
| 84 | + volume_hosts.sort() |
| 85 | + if project_name in self.global_shares and volume_name in self.global_shares[project_name]: |
| 86 | + # This is a global share |
| 87 | + if volume_hosts != ['*']: |
| 88 | + self.setallow(project_name,volume_name,['*']) |
| 89 | + elif hosts: |
| 90 | + # A host has been added or deleted, modify the auth.allow |
| 91 | + if volume_hosts != hosts: |
| 92 | + self.setallow(project_name,volume_name,hosts) |
| 93 | + else: |
| 94 | + # All hosts have been deleted, or none have been created, ensure we |
| 95 | + # aren't sharing to anything |
| 96 | + if volume_hosts != []: |
| 97 | + self.setallow(project_name,volume_name,[]) |
| 98 | + # TODO: Unshare and stop deleted projects |
| 99 | + ds.unbind() |
| 100 | + return 0 |
| 101 | + |
| 102 | + def mkvolumedir(self, project_name, volume_name): |
| 103 | + # We ensure a volume directory is unique by setting <base_dir>/project_name/volume_name |
| 104 | + # as we know every project is unique and volumes within it also will be unique |
| 105 | + return self.ssh_exec_command('sudo mkdir -p ' + self.base_dir + project_name + '/' + volume_name) |
| 106 | + |
| 107 | + def mkvolume(self, project_name, volume_name): |
| 108 | + # We ensure volumes are unique by setting project_name-volume_name, as the combo is |
| 109 | + # known to be unique |
| 110 | + volume = project_name + '-' + volume_name |
| 111 | + bricks = '' |
| 112 | + for brick in self.bricks: |
| 113 | + bricks = bricks + brick + ':' + self.base_dir + project_name + '/' + volume_name + ' ' |
| 114 | + ret = self.ssh_exec_command('sudo gluster volume create ' + volume + ' replica 2 transport tcp ' + bricks, True) |
| 115 | + if ret == 0: |
| 116 | + # We initially set the auth.allow to NONE, since the default is *, and that's stupid |
| 117 | + self.ssh_exec_command('sudo gluster volume set ' + volume + ' auth.allow NONE', True) |
| 118 | + for option in self.default_options: |
| 119 | + self.ssh_exec_command('sudo gluster volume set ' + volume + ' ' + option, True) |
| 120 | + self.ssh_exec_command('sudo gluster volume profile ' + volume + ' start', True) |
| 121 | + self.ssh_exec_command('sudo gluster volume quota ' + volume + ' enable', True) |
| 122 | + if volume_name in self.volume_quotas: |
| 123 | + quota = self.volume_quotas[volume_name] |
| 124 | + else: |
| 125 | + quota = self.volume_quotas['default'] |
| 126 | + self.ssh_exec_command('sudo gluster volume quota ' + volume + ' limit-usage / ' + quota, True) |
| 127 | + self.ssh_exec_command('sudo gluster volume start ' + volume, True) |
| 128 | + return True |
| 129 | + else: |
| 130 | + return False |
| 131 | + |
| 132 | + def setallow(self, project_name, volume_name, hosts): |
| 133 | + if hosts: |
| 134 | + hosts = ','.join(hosts) |
| 135 | + else: |
| 136 | + hosts = 'NONE' |
| 137 | + volume = project_name + '-' + volume_name |
| 138 | + self.ssh_exec_command('sudo gluster volume set ' + volume + ' auth.allow ' +hosts, True) |
| 139 | + |
| 140 | + def ssh_exec_command(self, command, single=False, return_stdout=False): |
| 141 | + if single: |
| 142 | + # Only run this on a single brick, we arbitrarily pick the first one |
| 143 | + return self._ssh_exec_command(command,self.bricks[0],return_stdout) |
| 144 | + else: |
| 145 | + # Run this on all bricks |
| 146 | + returnvals = [] |
| 147 | + for brick in self.bricks: |
| 148 | + ret = self._ssh_exec_command(command,brick,return_stdout) |
| 149 | + returnvals.append(ret) |
| 150 | + return returnvals |
| 151 | + |
| 152 | + def _ssh_exec_command(self, command, brick, return_stdout=False): |
| 153 | + ssh = paramiko.SSHClient() |
| 154 | + ssh.load_host_keys('/var/lib/' + self.user + '/.ssh/known_hosts') |
| 155 | + if return_stdout: |
| 156 | + ret = '' |
| 157 | + else: |
| 158 | + ret = -1 |
| 159 | + try: |
| 160 | + ssh.connect(brick, 22, self.user, key_filename='/var/lib/' + self.user + '/.ssh/id_rsa') |
| 161 | + chan = ssh.get_transport().open_session() |
| 162 | + if self.loglevel >= DEBUG: |
| 163 | + self.log(brick + ' - "' + command + '"') |
| 164 | + chan.exec_command(command) |
| 165 | + ret = chan.recv_exit_status() |
| 166 | + if return_stdout: |
| 167 | + # Since we are using a channel, we need to keep reading until there isn't |
| 168 | + # any output left |
| 169 | + ret = [] |
| 170 | + while chan.recv_ready(): |
| 171 | + ret.append(chan.recv(1024)) |
| 172 | + ret = "".join(ret) |
| 173 | + ret = ret.split('\n') |
| 174 | + ssh.close() |
| 175 | + except (paramiko.SSHException, socket.error): |
| 176 | + sys.stderr.write("Failed to connect to %s." % brick) |
| 177 | + traceback.print_exc(file=sys.stderr) |
| 178 | + return None |
| 179 | + return ret |
| 180 | + |
| 181 | + def get_hosts(self,hostdata): |
| 182 | + project_hosts = {} |
| 183 | + if hostdata: |
| 184 | + for host in hostdata: |
| 185 | + host_ip = host[1]["aRecord"][0] |
| 186 | + puppet_vars = host[1]["puppetvar"] |
| 187 | + for puppet_var in puppet_vars: |
| 188 | + var_arr = puppet_var.split('=') |
| 189 | + if len(var_arr) == 2 and var_arr[0] == "instanceproject": |
| 190 | + project = var_arr[1] |
| 191 | + if project in project_hosts: |
| 192 | + project_hosts[project].append(host_ip) |
| 193 | + else: |
| 194 | + project_hosts[project] = [host_ip] |
| 195 | + # No need to go any further, we aren't reading other variables |
| 196 | + break |
| 197 | + return project_hosts |
| 198 | + |
| 199 | + def get_volumes(self, volumedata): |
| 200 | + volumes = {} |
| 201 | + if volumedata: |
| 202 | + current_volume = '' |
| 203 | + for line in volumedata: |
| 204 | + line = line.strip() |
| 205 | + line_arr = line.split(': ') |
| 206 | + if len(line_arr) == 2 and line_arr[0] == "Volume Name": |
| 207 | + current_volume = line_arr[1] |
| 208 | + elif len(line_arr) == 2 and line_arr[0] == "auth.allow": |
| 209 | + if line_arr[1] == "NONE": |
| 210 | + hosts = [] |
| 211 | + else: |
| 212 | + hosts = line_arr[1].split(',') |
| 213 | + volumes[current_volume] = {'auth.allow': hosts} |
| 214 | + # Let's reset the current_volume, in case there are any |
| 215 | + # weird formatting errors, we wouldn't want to add another |
| 216 | + # project's IPs to this volume. |
| 217 | + current_volume = '' |
| 218 | + return volumes |
| 219 | + |
| 220 | + def search_s(self,ds,base,scope,query,attrlist=None): |
| 221 | + try: |
| 222 | + data = ds.search_s(base,scope,query,attrlist) |
| 223 | + if not data: |
| 224 | + raise ldap.NO_SUCH_OBJECT() |
| 225 | + return data |
| 226 | + except ldap.NO_SUCH_OBJECT: |
| 227 | + sys.stderr.write("The search returned no entries.\n") |
| 228 | + return None |
| 229 | + except ldap.PROTOCOL_ERROR: |
| 230 | + sys.stderr.write("There was an LDAP protocol error; see traceback.\n") |
| 231 | + traceback.print_exc(file=sys.stderr) |
| 232 | + return None |
| 233 | + except Exception: |
| 234 | + try: |
| 235 | + sys.stderr.write("There was a general error, this is unexpected; see traceback.\n") |
| 236 | + traceback.print_exc(file=sys.stderr) |
| 237 | + return None |
| 238 | + except Exception: |
| 239 | + traceback.print_exc(file=sys.stderr) |
| 240 | + return None |
| 241 | + |
| 242 | + def log(self, logstring): |
| 243 | + if self.loglevel >= INFO: |
| 244 | + log = datetime.datetime.now().strftime("%m/%d/%Y - %H:%M:%S - ") + logstring + "\n" |
| 245 | + if self.logfile: |
| 246 | + lf = open(self.logfile, 'a') |
| 247 | + lf.write(log) |
| 248 | + lf.close() |
| 249 | + else: |
| 250 | + print log |
| 251 | + |
| 252 | +def main(): |
| 253 | + volume_manager = VolumeManager() |
| 254 | + volume_manager.run() |
| 255 | + |
| 256 | +if __name__ == "__main__": |
| 257 | + main() |
Property changes on: trunk/tools/subversion/user-management/manage-volumes |
___________________________________________________________________ |
Added: svn:executable |
1 | 258 | + * |