Index: trunk/tools/subversion/user-management/manage-volumes |
— | — | @@ -21,6 +21,9 @@ |
22 | 22 | # Volumes in projects listed as global; so: { 'dumps': ['xml'] } would be |
23 | 23 | # an xml share in the dumps project being listed as global. |
24 | 24 | self.global_shares = {'publicdata': ['project']} |
| 25 | + # Volumes which need to have hosts manually added to the gluster access list; so, { 'dumps': |
| 26 | + # { 'project': ['10.0.0.1'] } } would manually add 10.0.0.1 to the dumps-project access list |
| 27 | + self.manual_shares = {'publicdata': {'project': ['208.80.154.11']}} |
25 | 28 | self.volume_quotas = {'home': '50GB','default': '300GB'} |
26 | 29 | self.default_options = ['nfs.disable on'] |
27 | 30 | self.bricks = ['labstore1.pmtpa.wmnet', 'labstore2.pmtpa.wmnet', 'labstore3.pmtpa.wmnet', 'labstore4.pmtpa.wmnet'] |
— | — | @@ -65,6 +68,9 @@ |
66 | 69 | hosts = project_hosts[project_name] |
67 | 70 | hosts.sort() |
68 | 71 | for volume_name in self.volume_names: |
| 72 | + volume_hosts = hosts |
| 73 | + if project_name in self.manual_shares and volume_name in self.manual_shares[project_name]: |
| 74 | + volume_hosts.extend(self.manual_shares[project_name][volume_name]) |
69 | 75 | project_volume = project_name + '-' + volume_name |
70 | 76 | if project_volume not in project_volumes: |
71 | 77 | # First, make the volume directories. This function runs on all |
— | — | @@ -82,25 +88,25 @@ |
83 | 89 | continue |
84 | 90 | else: |
85 | 91 | continue |
86 | | - volume_hosts = [] |
| 92 | + gluster_hosts = [] |
87 | 93 | if project_volume in project_volumes and 'auth.allow' in project_volumes[project_volume]: |
88 | | - volume_hosts = project_volumes[project_volume]['auth.allow'] |
89 | | - volume_hosts.sort() |
| 94 | + gluster_hosts = project_volumes[project_volume]['auth.allow'] |
| 95 | + gluster_hosts.sort() |
90 | 96 | if project_name in self.global_shares and volume_name in self.global_shares[project_name]: |
91 | 97 | # This is a global share |
92 | 98 | # A host has been added or deleted, modify the auth.allow |
93 | | - volume_nfs_hosts = '' |
| 99 | + gluster_nfs_hosts = '' |
94 | 100 | if project_volume in project_volumes and 'nfs.rpc-auth-allow' in project_volumes[project_volume]: |
95 | | - volume_nfs_hosts = project_volumes[project_volume]['nfs.rpc-auth-allow'] |
96 | | - if volume_nfs_hosts != '*': |
| 101 | + gluster_nfs_hosts = project_volumes[project_volume]['nfs.rpc-auth-allow'] |
| 102 | + if gluster_nfs_hosts != '*': |
97 | 103 | self.setglobal(project_name,volume_name) |
98 | | - if hosts: |
99 | | - if volume_hosts != hosts: |
100 | | - self.setallow(project_name,volume_name,hosts) |
| 104 | + if volume_hosts: |
| 105 | + if gluster_hosts != volume_hosts: |
| 106 | + self.setallow(project_name,volume_name,volume_hosts) |
101 | 107 | else: |
102 | 108 | # All hosts have been deleted, or none have been created, ensure we |
103 | 109 | # aren't sharing to anything |
104 | | - if volume_hosts != []: |
| 110 | + if gluster_hosts != []: |
105 | 111 | self.setallow(project_name,volume_name,[]) |
106 | 112 | # TODO: Unshare and stop deleted projects |
107 | 113 | ds.unbind() |