root / lib / cmdlib.py @ d0b3526f
History | View | Annotate | Download (109.3 kB)
1 | a8083063 | Iustin Pop | #!/usr/bin/python
|
---|---|---|---|
2 | a8083063 | Iustin Pop | #
|
3 | a8083063 | Iustin Pop | |
4 | a8083063 | Iustin Pop | # Copyright (C) 2006, 2007 Google Inc.
|
5 | a8083063 | Iustin Pop | #
|
6 | a8083063 | Iustin Pop | # This program is free software; you can redistribute it and/or modify
|
7 | a8083063 | Iustin Pop | # it under the terms of the GNU General Public License as published by
|
8 | a8083063 | Iustin Pop | # the Free Software Foundation; either version 2 of the License, or
|
9 | a8083063 | Iustin Pop | # (at your option) any later version.
|
10 | a8083063 | Iustin Pop | #
|
11 | a8083063 | Iustin Pop | # This program is distributed in the hope that it will be useful, but
|
12 | a8083063 | Iustin Pop | # WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 | a8083063 | Iustin Pop | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 | a8083063 | Iustin Pop | # General Public License for more details.
|
15 | a8083063 | Iustin Pop | #
|
16 | a8083063 | Iustin Pop | # You should have received a copy of the GNU General Public License
|
17 | a8083063 | Iustin Pop | # along with this program; if not, write to the Free Software
|
18 | a8083063 | Iustin Pop | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 | a8083063 | Iustin Pop | # 02110-1301, USA.
|
20 | a8083063 | Iustin Pop | |
21 | a8083063 | Iustin Pop | |
22 | a8083063 | Iustin Pop | """Module implementing the commands used by gnt-* programs."""
|
23 | a8083063 | Iustin Pop | |
24 | a8083063 | Iustin Pop | # pylint: disable-msg=W0613,W0201
|
25 | a8083063 | Iustin Pop | |
26 | a8083063 | Iustin Pop | import os |
27 | a8083063 | Iustin Pop | import os.path |
28 | a8083063 | Iustin Pop | import sha |
29 | a8083063 | Iustin Pop | import socket |
30 | a8083063 | Iustin Pop | import time |
31 | a8083063 | Iustin Pop | import tempfile |
32 | a8083063 | Iustin Pop | import re |
33 | a8083063 | Iustin Pop | import platform |
34 | a8083063 | Iustin Pop | |
35 | a8083063 | Iustin Pop | from ganeti import rpc |
36 | a8083063 | Iustin Pop | from ganeti import ssh |
37 | a8083063 | Iustin Pop | from ganeti import logger |
38 | a8083063 | Iustin Pop | from ganeti import utils |
39 | a8083063 | Iustin Pop | from ganeti import errors |
40 | a8083063 | Iustin Pop | from ganeti import hypervisor |
41 | a8083063 | Iustin Pop | from ganeti import config |
42 | a8083063 | Iustin Pop | from ganeti import constants |
43 | a8083063 | Iustin Pop | from ganeti import objects |
44 | a8083063 | Iustin Pop | from ganeti import opcodes |
45 | a8083063 | Iustin Pop | from ganeti import ssconf |
46 | a8083063 | Iustin Pop | |
47 | a8083063 | Iustin Pop | class LogicalUnit(object): |
48 | a8083063 | Iustin Pop | """Logical Unit base class..
|
49 | a8083063 | Iustin Pop |
|
50 | a8083063 | Iustin Pop | Subclasses must follow these rules:
|
51 | a8083063 | Iustin Pop | - implement CheckPrereq which also fills in the opcode instance
|
52 | a8083063 | Iustin Pop | with all the fields (even if as None)
|
53 | a8083063 | Iustin Pop | - implement Exec
|
54 | a8083063 | Iustin Pop | - implement BuildHooksEnv
|
55 | a8083063 | Iustin Pop | - redefine HPATH and HTYPE
|
56 | a8083063 | Iustin Pop | - optionally redefine their run requirements (REQ_CLUSTER,
|
57 | a8083063 | Iustin Pop | REQ_MASTER); note that all commands require root permissions
|
58 | a8083063 | Iustin Pop |
|
59 | a8083063 | Iustin Pop | """
|
60 | a8083063 | Iustin Pop | HPATH = None
|
61 | a8083063 | Iustin Pop | HTYPE = None
|
62 | a8083063 | Iustin Pop | _OP_REQP = [] |
63 | a8083063 | Iustin Pop | REQ_CLUSTER = True
|
64 | a8083063 | Iustin Pop | REQ_MASTER = True
|
65 | a8083063 | Iustin Pop | |
66 | a8083063 | Iustin Pop | def __init__(self, processor, op, cfg, sstore): |
67 | a8083063 | Iustin Pop | """Constructor for LogicalUnit.
|
68 | a8083063 | Iustin Pop |
|
69 | a8083063 | Iustin Pop | This needs to be overriden in derived classes in order to check op
|
70 | a8083063 | Iustin Pop | validity.
|
71 | a8083063 | Iustin Pop |
|
72 | a8083063 | Iustin Pop | """
|
73 | a8083063 | Iustin Pop | self.processor = processor
|
74 | a8083063 | Iustin Pop | self.op = op
|
75 | a8083063 | Iustin Pop | self.cfg = cfg
|
76 | a8083063 | Iustin Pop | self.sstore = sstore
|
77 | a8083063 | Iustin Pop | for attr_name in self._OP_REQP: |
78 | a8083063 | Iustin Pop | attr_val = getattr(op, attr_name, None) |
79 | a8083063 | Iustin Pop | if attr_val is None: |
80 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Required parameter '%s' missing" % |
81 | a8083063 | Iustin Pop | attr_name) |
82 | a8083063 | Iustin Pop | if self.REQ_CLUSTER: |
83 | a8083063 | Iustin Pop | if not cfg.IsCluster(): |
84 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Cluster not initialized yet," |
85 | a8083063 | Iustin Pop | " use 'gnt-cluster init' first.")
|
86 | a8083063 | Iustin Pop | if self.REQ_MASTER: |
87 | a8083063 | Iustin Pop | master = cfg.GetMaster() |
88 | a8083063 | Iustin Pop | if master != socket.gethostname():
|
89 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Commands must be run on the master" |
90 | a8083063 | Iustin Pop | " node %s" % master)
|
91 | a8083063 | Iustin Pop | |
92 | a8083063 | Iustin Pop | def CheckPrereq(self): |
93 | a8083063 | Iustin Pop | """Check prerequisites for this LU.
|
94 | a8083063 | Iustin Pop |
|
95 | a8083063 | Iustin Pop | This method should check that the prerequisites for the execution
|
96 | a8083063 | Iustin Pop | of this LU are fulfilled. It can do internode communication, but
|
97 | a8083063 | Iustin Pop | it should be idempotent - no cluster or system changes are
|
98 | a8083063 | Iustin Pop | allowed.
|
99 | a8083063 | Iustin Pop |
|
100 | a8083063 | Iustin Pop | The method should raise errors.OpPrereqError in case something is
|
101 | a8083063 | Iustin Pop | not fulfilled. Its return value is ignored.
|
102 | a8083063 | Iustin Pop |
|
103 | a8083063 | Iustin Pop | This method should also update all the parameters of the opcode to
|
104 | a8083063 | Iustin Pop | their canonical form; e.g. a short node name must be fully
|
105 | a8083063 | Iustin Pop | expanded after this method has successfully completed (so that
|
106 | a8083063 | Iustin Pop | hooks, logging, etc. work correctly).
|
107 | a8083063 | Iustin Pop |
|
108 | a8083063 | Iustin Pop | """
|
109 | a8083063 | Iustin Pop | raise NotImplementedError |
110 | a8083063 | Iustin Pop | |
111 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
112 | a8083063 | Iustin Pop | """Execute the LU.
|
113 | a8083063 | Iustin Pop |
|
114 | a8083063 | Iustin Pop | This method should implement the actual work. It should raise
|
115 | a8083063 | Iustin Pop | errors.OpExecError for failures that are somewhat dealt with in
|
116 | a8083063 | Iustin Pop | code, or expected.
|
117 | a8083063 | Iustin Pop |
|
118 | a8083063 | Iustin Pop | """
|
119 | a8083063 | Iustin Pop | raise NotImplementedError |
120 | a8083063 | Iustin Pop | |
121 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
122 | a8083063 | Iustin Pop | """Build hooks environment for this LU.
|
123 | a8083063 | Iustin Pop |
|
124 | a8083063 | Iustin Pop | This method should return a three-node tuple consisting of: a dict
|
125 | a8083063 | Iustin Pop | containing the environment that will be used for running the
|
126 | a8083063 | Iustin Pop | specific hook for this LU, a list of node names on which the hook
|
127 | a8083063 | Iustin Pop | should run before the execution, and a list of node names on which
|
128 | a8083063 | Iustin Pop | the hook should run after the execution.
|
129 | a8083063 | Iustin Pop |
|
130 | a8083063 | Iustin Pop | The keys of the dict must not have 'GANETI_' prefixed as this will
|
131 | a8083063 | Iustin Pop | be handled in the hooks runner. Also note additional keys will be
|
132 | a8083063 | Iustin Pop | added by the hooks runner. If the LU doesn't define any
|
133 | a8083063 | Iustin Pop | environment, an empty dict (and not None) should be returned.
|
134 | a8083063 | Iustin Pop |
|
135 | a8083063 | Iustin Pop | As for the node lists, the master should not be included in the
|
136 | a8083063 | Iustin Pop | them, as it will be added by the hooks runner in case this LU
|
137 | a8083063 | Iustin Pop | requires a cluster to run on (otherwise we don't have a node
|
138 | a8083063 | Iustin Pop | list). No nodes should be returned as an empty list (and not
|
139 | a8083063 | Iustin Pop | None).
|
140 | a8083063 | Iustin Pop |
|
141 | a8083063 | Iustin Pop | Note that if the HPATH for a LU class is None, this function will
|
142 | a8083063 | Iustin Pop | not be called.
|
143 | a8083063 | Iustin Pop |
|
144 | a8083063 | Iustin Pop | """
|
145 | a8083063 | Iustin Pop | raise NotImplementedError |
146 | a8083063 | Iustin Pop | |
147 | a8083063 | Iustin Pop | |
148 | a8083063 | Iustin Pop | class NoHooksLU(LogicalUnit): |
149 | a8083063 | Iustin Pop | """Simple LU which runs no hooks.
|
150 | a8083063 | Iustin Pop |
|
151 | a8083063 | Iustin Pop | This LU is intended as a parent for other LogicalUnits which will
|
152 | a8083063 | Iustin Pop | run no hooks, in order to reduce duplicate code.
|
153 | a8083063 | Iustin Pop |
|
154 | a8083063 | Iustin Pop | """
|
155 | a8083063 | Iustin Pop | HPATH = None
|
156 | a8083063 | Iustin Pop | HTYPE = None
|
157 | a8083063 | Iustin Pop | |
158 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
159 | a8083063 | Iustin Pop | """Build hooks env.
|
160 | a8083063 | Iustin Pop |
|
161 | a8083063 | Iustin Pop | This is a no-op, since we don't run hooks.
|
162 | a8083063 | Iustin Pop |
|
163 | a8083063 | Iustin Pop | """
|
164 | a8083063 | Iustin Pop | return
|
165 | a8083063 | Iustin Pop | |
166 | a8083063 | Iustin Pop | |
167 | a8083063 | Iustin Pop | def _UpdateEtcHosts(fullnode, ip): |
168 | a8083063 | Iustin Pop | """Ensure a node has a correct entry in /etc/hosts.
|
169 | a8083063 | Iustin Pop |
|
170 | a8083063 | Iustin Pop | Args:
|
171 | a8083063 | Iustin Pop | fullnode - Fully qualified domain name of host. (str)
|
172 | a8083063 | Iustin Pop | ip - IPv4 address of host (str)
|
173 | a8083063 | Iustin Pop |
|
174 | a8083063 | Iustin Pop | """
|
175 | a8083063 | Iustin Pop | node = fullnode.split(".", 1)[0] |
176 | a8083063 | Iustin Pop | |
177 | a8083063 | Iustin Pop | f = open('/etc/hosts', 'r+') |
178 | a8083063 | Iustin Pop | |
179 | a8083063 | Iustin Pop | inthere = False
|
180 | a8083063 | Iustin Pop | |
181 | a8083063 | Iustin Pop | save_lines = [] |
182 | a8083063 | Iustin Pop | add_lines = [] |
183 | a8083063 | Iustin Pop | removed = False
|
184 | a8083063 | Iustin Pop | |
185 | a8083063 | Iustin Pop | while True: |
186 | a8083063 | Iustin Pop | rawline = f.readline() |
187 | a8083063 | Iustin Pop | |
188 | a8083063 | Iustin Pop | if not rawline: |
189 | a8083063 | Iustin Pop | # End of file
|
190 | a8083063 | Iustin Pop | break
|
191 | a8083063 | Iustin Pop | |
192 | a8083063 | Iustin Pop | line = rawline.split('\n')[0] |
193 | a8083063 | Iustin Pop | |
194 | a8083063 | Iustin Pop | # Strip off comments
|
195 | a8083063 | Iustin Pop | line = line.split('#')[0] |
196 | a8083063 | Iustin Pop | |
197 | a8083063 | Iustin Pop | if not line: |
198 | a8083063 | Iustin Pop | # Entire line was comment, skip
|
199 | a8083063 | Iustin Pop | save_lines.append(rawline) |
200 | a8083063 | Iustin Pop | continue
|
201 | a8083063 | Iustin Pop | |
202 | a8083063 | Iustin Pop | fields = line.split() |
203 | a8083063 | Iustin Pop | |
204 | a8083063 | Iustin Pop | haveall = True
|
205 | a8083063 | Iustin Pop | havesome = False
|
206 | a8083063 | Iustin Pop | for spec in [ ip, fullnode, node ]: |
207 | a8083063 | Iustin Pop | if spec not in fields: |
208 | a8083063 | Iustin Pop | haveall = False
|
209 | a8083063 | Iustin Pop | if spec in fields: |
210 | a8083063 | Iustin Pop | havesome = True
|
211 | a8083063 | Iustin Pop | |
212 | a8083063 | Iustin Pop | if haveall:
|
213 | a8083063 | Iustin Pop | inthere = True
|
214 | a8083063 | Iustin Pop | save_lines.append(rawline) |
215 | a8083063 | Iustin Pop | continue
|
216 | a8083063 | Iustin Pop | |
217 | a8083063 | Iustin Pop | if havesome and not haveall: |
218 | a8083063 | Iustin Pop | # Line (old, or manual?) which is missing some. Remove.
|
219 | a8083063 | Iustin Pop | removed = True
|
220 | a8083063 | Iustin Pop | continue
|
221 | a8083063 | Iustin Pop | |
222 | a8083063 | Iustin Pop | save_lines.append(rawline) |
223 | a8083063 | Iustin Pop | |
224 | a8083063 | Iustin Pop | if not inthere: |
225 | a8083063 | Iustin Pop | add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
|
226 | a8083063 | Iustin Pop | |
227 | a8083063 | Iustin Pop | if removed:
|
228 | a8083063 | Iustin Pop | if add_lines:
|
229 | a8083063 | Iustin Pop | save_lines = save_lines + add_lines |
230 | a8083063 | Iustin Pop | |
231 | a8083063 | Iustin Pop | # We removed a line, write a new file and replace old.
|
232 | a8083063 | Iustin Pop | fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc') |
233 | a8083063 | Iustin Pop | newfile = os.fdopen(fd, 'w')
|
234 | a8083063 | Iustin Pop | newfile.write(''.join(save_lines))
|
235 | a8083063 | Iustin Pop | newfile.close() |
236 | a8083063 | Iustin Pop | os.rename(tmpname, '/etc/hosts')
|
237 | a8083063 | Iustin Pop | |
238 | a8083063 | Iustin Pop | elif add_lines:
|
239 | a8083063 | Iustin Pop | # Simply appending a new line will do the trick.
|
240 | a8083063 | Iustin Pop | f.seek(0, 2) |
241 | a8083063 | Iustin Pop | for add in add_lines: |
242 | a8083063 | Iustin Pop | f.write(add) |
243 | a8083063 | Iustin Pop | |
244 | a8083063 | Iustin Pop | f.close() |
245 | a8083063 | Iustin Pop | |
246 | a8083063 | Iustin Pop | |
247 | a8083063 | Iustin Pop | def _UpdateKnownHosts(fullnode, ip, pubkey): |
248 | a8083063 | Iustin Pop | """Ensure a node has a correct known_hosts entry.
|
249 | a8083063 | Iustin Pop |
|
250 | a8083063 | Iustin Pop | Args:
|
251 | a8083063 | Iustin Pop | fullnode - Fully qualified domain name of host. (str)
|
252 | a8083063 | Iustin Pop | ip - IPv4 address of host (str)
|
253 | a8083063 | Iustin Pop | pubkey - the public key of the cluster
|
254 | a8083063 | Iustin Pop |
|
255 | a8083063 | Iustin Pop | """
|
256 | a8083063 | Iustin Pop | if os.path.exists('/etc/ssh/ssh_known_hosts'): |
257 | a8083063 | Iustin Pop | f = open('/etc/ssh/ssh_known_hosts', 'r+') |
258 | a8083063 | Iustin Pop | else:
|
259 | a8083063 | Iustin Pop | f = open('/etc/ssh/ssh_known_hosts', 'w+') |
260 | a8083063 | Iustin Pop | |
261 | a8083063 | Iustin Pop | inthere = False
|
262 | a8083063 | Iustin Pop | |
263 | a8083063 | Iustin Pop | save_lines = [] |
264 | a8083063 | Iustin Pop | add_lines = [] |
265 | a8083063 | Iustin Pop | removed = False
|
266 | a8083063 | Iustin Pop | |
267 | a8083063 | Iustin Pop | while True: |
268 | a8083063 | Iustin Pop | rawline = f.readline() |
269 | a8083063 | Iustin Pop | logger.Debug('read %s' % (repr(rawline),)) |
270 | a8083063 | Iustin Pop | |
271 | a8083063 | Iustin Pop | if not rawline: |
272 | a8083063 | Iustin Pop | # End of file
|
273 | a8083063 | Iustin Pop | break
|
274 | a8083063 | Iustin Pop | |
275 | a8083063 | Iustin Pop | line = rawline.split('\n')[0] |
276 | a8083063 | Iustin Pop | |
277 | a8083063 | Iustin Pop | parts = line.split(' ')
|
278 | a8083063 | Iustin Pop | fields = parts[0].split(',') |
279 | a8083063 | Iustin Pop | key = parts[2]
|
280 | a8083063 | Iustin Pop | |
281 | a8083063 | Iustin Pop | haveall = True
|
282 | a8083063 | Iustin Pop | havesome = False
|
283 | a8083063 | Iustin Pop | for spec in [ ip, fullnode ]: |
284 | a8083063 | Iustin Pop | if spec not in fields: |
285 | a8083063 | Iustin Pop | haveall = False
|
286 | a8083063 | Iustin Pop | if spec in fields: |
287 | a8083063 | Iustin Pop | havesome = True
|
288 | a8083063 | Iustin Pop | |
289 | a8083063 | Iustin Pop | logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),)) |
290 | a8083063 | Iustin Pop | if haveall and key == pubkey: |
291 | a8083063 | Iustin Pop | inthere = True
|
292 | a8083063 | Iustin Pop | save_lines.append(rawline) |
293 | a8083063 | Iustin Pop | logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),)) |
294 | a8083063 | Iustin Pop | continue
|
295 | a8083063 | Iustin Pop | |
296 | a8083063 | Iustin Pop | if havesome and (not haveall or key != pubkey): |
297 | a8083063 | Iustin Pop | removed = True
|
298 | a8083063 | Iustin Pop | logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),)) |
299 | a8083063 | Iustin Pop | continue
|
300 | a8083063 | Iustin Pop | |
301 | a8083063 | Iustin Pop | save_lines.append(rawline) |
302 | a8083063 | Iustin Pop | |
303 | a8083063 | Iustin Pop | if not inthere: |
304 | a8083063 | Iustin Pop | add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
|
305 | a8083063 | Iustin Pop | logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),)) |
306 | a8083063 | Iustin Pop | |
307 | a8083063 | Iustin Pop | if removed:
|
308 | a8083063 | Iustin Pop | save_lines = save_lines + add_lines |
309 | a8083063 | Iustin Pop | |
310 | a8083063 | Iustin Pop | # Write a new file and replace old.
|
311 | a8083063 | Iustin Pop | fd, tmpname = tempfile.mkstemp('tmp', 'ssh_known_hosts_', '/etc/ssh') |
312 | a8083063 | Iustin Pop | newfile = os.fdopen(fd, 'w')
|
313 | a8083063 | Iustin Pop | newfile.write(''.join(save_lines))
|
314 | a8083063 | Iustin Pop | newfile.close() |
315 | a8083063 | Iustin Pop | logger.Debug("Wrote new known_hosts.")
|
316 | a8083063 | Iustin Pop | os.rename(tmpname, '/etc/ssh/ssh_known_hosts')
|
317 | a8083063 | Iustin Pop | |
318 | a8083063 | Iustin Pop | elif add_lines:
|
319 | a8083063 | Iustin Pop | # Simply appending a new line will do the trick.
|
320 | a8083063 | Iustin Pop | f.seek(0, 2) |
321 | a8083063 | Iustin Pop | for add in add_lines: |
322 | a8083063 | Iustin Pop | f.write(add) |
323 | a8083063 | Iustin Pop | |
324 | a8083063 | Iustin Pop | f.close() |
325 | a8083063 | Iustin Pop | |
326 | a8083063 | Iustin Pop | |
327 | a8083063 | Iustin Pop | def _HasValidVG(vglist, vgname): |
328 | a8083063 | Iustin Pop | """Checks if the volume group list is valid.
|
329 | a8083063 | Iustin Pop |
|
330 | a8083063 | Iustin Pop | A non-None return value means there's an error, and the return value
|
331 | a8083063 | Iustin Pop | is the error message.
|
332 | a8083063 | Iustin Pop |
|
333 | a8083063 | Iustin Pop | """
|
334 | a8083063 | Iustin Pop | vgsize = vglist.get(vgname, None)
|
335 | a8083063 | Iustin Pop | if vgsize is None: |
336 | a8083063 | Iustin Pop | return "volume group '%s' missing" % vgname |
337 | a8083063 | Iustin Pop | elif vgsize < 20480: |
338 | a8083063 | Iustin Pop | return ("volume group '%s' too small (20480MiB required, %dMib found" % |
339 | a8083063 | Iustin Pop | vgname, vgsize) |
340 | a8083063 | Iustin Pop | return None |
341 | a8083063 | Iustin Pop | |
342 | a8083063 | Iustin Pop | |
343 | a8083063 | Iustin Pop | def _InitSSHSetup(node): |
344 | a8083063 | Iustin Pop | """Setup the SSH configuration for the cluster.
|
345 | a8083063 | Iustin Pop |
|
346 | a8083063 | Iustin Pop |
|
347 | a8083063 | Iustin Pop | This generates a dsa keypair for root, adds the pub key to the
|
348 | a8083063 | Iustin Pop | permitted hosts and adds the hostkey to its own known hosts.
|
349 | a8083063 | Iustin Pop |
|
350 | a8083063 | Iustin Pop | Args:
|
351 | a8083063 | Iustin Pop | node: the name of this host as a fqdn
|
352 | a8083063 | Iustin Pop |
|
353 | a8083063 | Iustin Pop | """
|
354 | a8083063 | Iustin Pop | utils.RemoveFile('/root/.ssh/known_hosts')
|
355 | a8083063 | Iustin Pop | |
356 | a8083063 | Iustin Pop | if os.path.exists('/root/.ssh/id_dsa'): |
357 | a8083063 | Iustin Pop | utils.CreateBackup('/root/.ssh/id_dsa')
|
358 | a8083063 | Iustin Pop | if os.path.exists('/root/.ssh/id_dsa.pub'): |
359 | a8083063 | Iustin Pop | utils.CreateBackup('/root/.ssh/id_dsa.pub')
|
360 | a8083063 | Iustin Pop | |
361 | a8083063 | Iustin Pop | utils.RemoveFile('/root/.ssh/id_dsa')
|
362 | a8083063 | Iustin Pop | utils.RemoveFile('/root/.ssh/id_dsa.pub')
|
363 | a8083063 | Iustin Pop | |
364 | a8083063 | Iustin Pop | result = utils.RunCmd(["ssh-keygen", "-t", "dsa", |
365 | a8083063 | Iustin Pop | "-f", "/root/.ssh/id_dsa", |
366 | a8083063 | Iustin Pop | "-q", "-N", ""]) |
367 | a8083063 | Iustin Pop | if result.failed:
|
368 | a8083063 | Iustin Pop | raise errors.OpExecError, ("could not generate ssh keypair, error %s" % |
369 | a8083063 | Iustin Pop | result.output) |
370 | a8083063 | Iustin Pop | |
371 | a8083063 | Iustin Pop | f = open('/root/.ssh/id_dsa.pub', 'r') |
372 | a8083063 | Iustin Pop | try:
|
373 | a8083063 | Iustin Pop | utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192)) |
374 | a8083063 | Iustin Pop | finally:
|
375 | a8083063 | Iustin Pop | f.close() |
376 | a8083063 | Iustin Pop | |
377 | a8083063 | Iustin Pop | |
378 | a8083063 | Iustin Pop | def _InitGanetiServerSetup(ss): |
379 | a8083063 | Iustin Pop | """Setup the necessary configuration for the initial node daemon.
|
380 | a8083063 | Iustin Pop |
|
381 | a8083063 | Iustin Pop | This creates the nodepass file containing the shared password for
|
382 | a8083063 | Iustin Pop | the cluster and also generates the SSL certificate.
|
383 | a8083063 | Iustin Pop |
|
384 | a8083063 | Iustin Pop | """
|
385 | a8083063 | Iustin Pop | # Create pseudo random password
|
386 | a8083063 | Iustin Pop | randpass = sha.new(os.urandom(64)).hexdigest()
|
387 | a8083063 | Iustin Pop | # and write it into sstore
|
388 | a8083063 | Iustin Pop | ss.SetKey(ss.SS_NODED_PASS, randpass) |
389 | a8083063 | Iustin Pop | |
390 | a8083063 | Iustin Pop | result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024", |
391 | a8083063 | Iustin Pop | "-days", str(365*5), "-nodes", "-x509", |
392 | a8083063 | Iustin Pop | "-keyout", constants.SSL_CERT_FILE,
|
393 | a8083063 | Iustin Pop | "-out", constants.SSL_CERT_FILE, "-batch"]) |
394 | a8083063 | Iustin Pop | if result.failed:
|
395 | a8083063 | Iustin Pop | raise errors.OpExecError, ("could not generate server ssl cert, command" |
396 | a8083063 | Iustin Pop | " %s had exitcode %s and error message %s" %
|
397 | a8083063 | Iustin Pop | (result.cmd, result.exit_code, result.output)) |
398 | a8083063 | Iustin Pop | |
399 | a8083063 | Iustin Pop | os.chmod(constants.SSL_CERT_FILE, 0400)
|
400 | a8083063 | Iustin Pop | |
401 | a8083063 | Iustin Pop | result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
|
402 | a8083063 | Iustin Pop | |
403 | a8083063 | Iustin Pop | if result.failed:
|
404 | a8083063 | Iustin Pop | raise errors.OpExecError, ("could not start the node daemon, command %s" |
405 | a8083063 | Iustin Pop | " had exitcode %s and error %s" %
|
406 | a8083063 | Iustin Pop | (result.cmd, result.exit_code, result.output)) |
407 | a8083063 | Iustin Pop | |
408 | a8083063 | Iustin Pop | |
409 | a8083063 | Iustin Pop | def _InitClusterInterface(fullname, name, ip): |
410 | a8083063 | Iustin Pop | """Initialize the master startup script.
|
411 | a8083063 | Iustin Pop |
|
412 | a8083063 | Iustin Pop | """
|
413 | a8083063 | Iustin Pop | f = file(constants.CLUSTER_NAME_FILE, 'w') |
414 | a8083063 | Iustin Pop | f.write("%s\n" % fullname)
|
415 | a8083063 | Iustin Pop | f.close() |
416 | a8083063 | Iustin Pop | |
417 | a8083063 | Iustin Pop | f = file(constants.MASTER_INITD_SCRIPT, 'w') |
418 | a8083063 | Iustin Pop | f.write ("#!/bin/sh\n")
|
419 | a8083063 | Iustin Pop | f.write ("\n")
|
420 | a8083063 | Iustin Pop | f.write ("# Start Ganeti Master Virtual Address\n")
|
421 | a8083063 | Iustin Pop | f.write ("\n")
|
422 | a8083063 | Iustin Pop | f.write ("DESC=\"Ganeti Master IP\"\n")
|
423 | a8083063 | Iustin Pop | f.write ("MASTERNAME=\"%s\"\n" % name)
|
424 | a8083063 | Iustin Pop | f.write ("MASTERIP=\"%s\"\n" % ip)
|
425 | a8083063 | Iustin Pop | f.write ("case \"$1\" in\n")
|
426 | a8083063 | Iustin Pop | f.write (" start)\n")
|
427 | a8083063 | Iustin Pop | f.write (" if fping -q -c 3 ${MASTERIP} &>/dev/null; then\n")
|
428 | a8083063 | Iustin Pop | f.write (" echo \"$MASTERNAME no-go - there is already a master.\"\n")
|
429 | a8083063 | Iustin Pop | f.write (" rm -f %s\n" % constants.MASTER_CRON_LINK)
|
430 | a8083063 | Iustin Pop | f.write (" scp ${MASTERNAME}:%s %s\n" %
|
431 | a8083063 | Iustin Pop | (constants.CLUSTER_CONF_FILE, constants.CLUSTER_CONF_FILE)) |
432 | a8083063 | Iustin Pop | f.write (" else\n")
|
433 | a8083063 | Iustin Pop | f.write (" echo -n \"Starting $DESC: \"\n")
|
434 | a8083063 | Iustin Pop | f.write (" ip address add ${MASTERIP}/32 dev xen-br0"
|
435 | a8083063 | Iustin Pop | " label xen-br0:0\n")
|
436 | a8083063 | Iustin Pop | f.write (" arping -q -U -c 3 -I xen-br0 -s ${MASTERIP} ${MASTERIP}\n")
|
437 | a8083063 | Iustin Pop | f.write (" echo \"$MASTERNAME.\"\n")
|
438 | a8083063 | Iustin Pop | f.write (" fi\n")
|
439 | a8083063 | Iustin Pop | f.write (" ;;\n")
|
440 | a8083063 | Iustin Pop | f.write (" stop)\n")
|
441 | a8083063 | Iustin Pop | f.write (" echo -n \"Stopping $DESC: \"\n")
|
442 | a8083063 | Iustin Pop | f.write (" ip address del ${MASTERIP}/32 dev xen-br0\n")
|
443 | a8083063 | Iustin Pop | f.write (" echo \"$MASTERNAME.\"\n")
|
444 | a8083063 | Iustin Pop | f.write (" ;;\n")
|
445 | a8083063 | Iustin Pop | f.write (" *)\n")
|
446 | a8083063 | Iustin Pop | f.write (" echo \"Usage: $0 {start|stop}\" >&2\n")
|
447 | a8083063 | Iustin Pop | f.write (" exit 1\n")
|
448 | a8083063 | Iustin Pop | f.write (" ;;\n")
|
449 | a8083063 | Iustin Pop | f.write ("esac\n")
|
450 | a8083063 | Iustin Pop | f.write ("\n")
|
451 | a8083063 | Iustin Pop | f.write ("exit 0\n")
|
452 | a8083063 | Iustin Pop | f.flush() |
453 | a8083063 | Iustin Pop | os.fsync(f.fileno()) |
454 | a8083063 | Iustin Pop | f.close() |
455 | a8083063 | Iustin Pop | os.chmod(constants.MASTER_INITD_SCRIPT, 0755)
|
456 | a8083063 | Iustin Pop | |
457 | a8083063 | Iustin Pop | |
458 | a8083063 | Iustin Pop | class LUInitCluster(LogicalUnit): |
459 | a8083063 | Iustin Pop | """Initialise the cluster.
|
460 | a8083063 | Iustin Pop |
|
461 | a8083063 | Iustin Pop | """
|
462 | a8083063 | Iustin Pop | HPATH = "cluster-init"
|
463 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_CLUSTER |
464 | a8083063 | Iustin Pop | _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix", |
465 | a8083063 | Iustin Pop | "def_bridge"]
|
466 | a8083063 | Iustin Pop | REQ_CLUSTER = False
|
467 | a8083063 | Iustin Pop | |
468 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
469 | a8083063 | Iustin Pop | """Build hooks env.
|
470 | a8083063 | Iustin Pop |
|
471 | a8083063 | Iustin Pop | Notes: Since we don't require a cluster, we must manually add
|
472 | a8083063 | Iustin Pop | ourselves in the post-run node list.
|
473 | a8083063 | Iustin Pop |
|
474 | a8083063 | Iustin Pop | """
|
475 | a8083063 | Iustin Pop | |
476 | a8083063 | Iustin Pop | env = {"CLUSTER": self.op.cluster_name, |
477 | a8083063 | Iustin Pop | "MASTER": self.hostname} |
478 | a8083063 | Iustin Pop | return env, [], [self.hostname['hostname_full']] |
479 | a8083063 | Iustin Pop | |
480 | a8083063 | Iustin Pop | def CheckPrereq(self): |
481 | a8083063 | Iustin Pop | """Verify that the passed name is a valid one.
|
482 | a8083063 | Iustin Pop |
|
483 | a8083063 | Iustin Pop | """
|
484 | a8083063 | Iustin Pop | if config.ConfigWriter.IsCluster():
|
485 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Cluster is already initialised") |
486 | a8083063 | Iustin Pop | |
487 | a8083063 | Iustin Pop | hostname_local = socket.gethostname() |
488 | a8083063 | Iustin Pop | self.hostname = hostname = utils.LookupHostname(hostname_local)
|
489 | a8083063 | Iustin Pop | if not hostname: |
490 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Cannot resolve my own hostname ('%s')" % |
491 | a8083063 | Iustin Pop | hostname_local) |
492 | a8083063 | Iustin Pop | |
493 | a8083063 | Iustin Pop | self.clustername = clustername = utils.LookupHostname(self.op.cluster_name) |
494 | a8083063 | Iustin Pop | if not clustername: |
495 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Cannot resolve given cluster name ('%s')" |
496 | a8083063 | Iustin Pop | % self.op.cluster_name)
|
497 | a8083063 | Iustin Pop | |
498 | a8083063 | Iustin Pop | result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']]) |
499 | a8083063 | Iustin Pop | if result.failed:
|
500 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Inconsistency: this host's name resolves" |
501 | a8083063 | Iustin Pop | " to %s,\nbut this ip address does not"
|
502 | a8083063 | Iustin Pop | " belong to this host."
|
503 | a8083063 | Iustin Pop | " Aborting." % hostname['ip']) |
504 | a8083063 | Iustin Pop | |
505 | a8083063 | Iustin Pop | secondary_ip = getattr(self.op, "secondary_ip", None) |
506 | a8083063 | Iustin Pop | if secondary_ip and not utils.IsValidIP(secondary_ip): |
507 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid secondary ip given") |
508 | a8083063 | Iustin Pop | if secondary_ip and secondary_ip != hostname['ip']: |
509 | a8083063 | Iustin Pop | result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip]) |
510 | a8083063 | Iustin Pop | if result.failed:
|
511 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("You gave %s as secondary IP,\n" |
512 | a8083063 | Iustin Pop | "but it does not belong to this host." %
|
513 | a8083063 | Iustin Pop | secondary_ip) |
514 | a8083063 | Iustin Pop | self.secondary_ip = secondary_ip
|
515 | a8083063 | Iustin Pop | |
516 | a8083063 | Iustin Pop | # checks presence of the volume group given
|
517 | a8083063 | Iustin Pop | vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
|
518 | a8083063 | Iustin Pop | |
519 | a8083063 | Iustin Pop | if vgstatus:
|
520 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Error: %s" % vgstatus) |
521 | a8083063 | Iustin Pop | |
522 | a8083063 | Iustin Pop | if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", |
523 | a8083063 | Iustin Pop | self.op.mac_prefix):
|
524 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid mac prefix given '%s'" % |
525 | a8083063 | Iustin Pop | self.op.mac_prefix)
|
526 | a8083063 | Iustin Pop | |
527 | a8083063 | Iustin Pop | if self.op.hypervisor_type not in hypervisor.VALID_HTYPES: |
528 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" % |
529 | a8083063 | Iustin Pop | self.op.hypervisor_type)
|
530 | a8083063 | Iustin Pop | |
531 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
532 | a8083063 | Iustin Pop | """Initialize the cluster.
|
533 | a8083063 | Iustin Pop |
|
534 | a8083063 | Iustin Pop | """
|
535 | a8083063 | Iustin Pop | clustername = self.clustername
|
536 | a8083063 | Iustin Pop | hostname = self.hostname
|
537 | a8083063 | Iustin Pop | |
538 | a8083063 | Iustin Pop | # adds the cluste name file and master startup script
|
539 | a8083063 | Iustin Pop | _InitClusterInterface(clustername['hostname_full'],
|
540 | a8083063 | Iustin Pop | clustername['hostname'],
|
541 | a8083063 | Iustin Pop | clustername['ip'])
|
542 | a8083063 | Iustin Pop | |
543 | a8083063 | Iustin Pop | # set up the simple store
|
544 | a8083063 | Iustin Pop | ss = ssconf.SimpleStore() |
545 | a8083063 | Iustin Pop | ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
|
546 | a8083063 | Iustin Pop | |
547 | a8083063 | Iustin Pop | # set up the inter-node password and certificate
|
548 | a8083063 | Iustin Pop | _InitGanetiServerSetup(ss) |
549 | a8083063 | Iustin Pop | |
550 | a8083063 | Iustin Pop | # start the master ip
|
551 | a8083063 | Iustin Pop | rpc.call_node_start_master(hostname['hostname_full'])
|
552 | a8083063 | Iustin Pop | |
553 | a8083063 | Iustin Pop | # set up ssh config and /etc/hosts
|
554 | a8083063 | Iustin Pop | f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r') |
555 | a8083063 | Iustin Pop | try:
|
556 | a8083063 | Iustin Pop | sshline = f.read() |
557 | a8083063 | Iustin Pop | finally:
|
558 | a8083063 | Iustin Pop | f.close() |
559 | a8083063 | Iustin Pop | sshkey = sshline.split(" ")[1] |
560 | a8083063 | Iustin Pop | |
561 | a8083063 | Iustin Pop | _UpdateEtcHosts(hostname['hostname_full'],
|
562 | a8083063 | Iustin Pop | hostname['ip'],
|
563 | a8083063 | Iustin Pop | ) |
564 | a8083063 | Iustin Pop | |
565 | a8083063 | Iustin Pop | _UpdateKnownHosts(hostname['hostname_full'],
|
566 | a8083063 | Iustin Pop | hostname['ip'],
|
567 | a8083063 | Iustin Pop | sshkey, |
568 | a8083063 | Iustin Pop | ) |
569 | a8083063 | Iustin Pop | |
570 | a8083063 | Iustin Pop | _InitSSHSetup(hostname['hostname'])
|
571 | a8083063 | Iustin Pop | |
572 | a8083063 | Iustin Pop | # init of cluster config file
|
573 | a8083063 | Iustin Pop | cfgw = config.ConfigWriter() |
574 | a8083063 | Iustin Pop | cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip, |
575 | a8083063 | Iustin Pop | clustername['hostname'], sshkey, self.op.mac_prefix, |
576 | a8083063 | Iustin Pop | self.op.vg_name, self.op.def_bridge) |
577 | a8083063 | Iustin Pop | |
578 | a8083063 | Iustin Pop | |
579 | a8083063 | Iustin Pop | class LUDestroyCluster(NoHooksLU): |
580 | a8083063 | Iustin Pop | """Logical unit for destroying the cluster.
|
581 | a8083063 | Iustin Pop |
|
582 | a8083063 | Iustin Pop | """
|
583 | a8083063 | Iustin Pop | _OP_REQP = [] |
584 | a8083063 | Iustin Pop | |
585 | a8083063 | Iustin Pop | def CheckPrereq(self): |
586 | a8083063 | Iustin Pop | """Check prerequisites.
|
587 | a8083063 | Iustin Pop |
|
588 | a8083063 | Iustin Pop | This checks whether the cluster is empty.
|
589 | a8083063 | Iustin Pop |
|
590 | a8083063 | Iustin Pop | Any errors are signalled by raising errors.OpPrereqError.
|
591 | a8083063 | Iustin Pop |
|
592 | a8083063 | Iustin Pop | """
|
593 | a8083063 | Iustin Pop | master = self.cfg.GetMaster()
|
594 | a8083063 | Iustin Pop | |
595 | a8083063 | Iustin Pop | nodelist = self.cfg.GetNodeList()
|
596 | a8083063 | Iustin Pop | if len(nodelist) > 0 and nodelist != [master]: |
597 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("There are still %d node(s) in " |
598 | a8083063 | Iustin Pop | "this cluster." % (len(nodelist) - 1)) |
599 | a8083063 | Iustin Pop | |
600 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
601 | a8083063 | Iustin Pop | """Destroys the cluster.
|
602 | a8083063 | Iustin Pop |
|
603 | a8083063 | Iustin Pop | """
|
604 | a8083063 | Iustin Pop | utils.CreateBackup('/root/.ssh/id_dsa')
|
605 | a8083063 | Iustin Pop | utils.CreateBackup('/root/.ssh/id_dsa.pub')
|
606 | a8083063 | Iustin Pop | rpc.call_node_leave_cluster(self.cfg.GetMaster())
|
607 | a8083063 | Iustin Pop | |
608 | a8083063 | Iustin Pop | |
609 | a8083063 | Iustin Pop | class LUVerifyCluster(NoHooksLU): |
610 | a8083063 | Iustin Pop | """Verifies the cluster status.
|
611 | a8083063 | Iustin Pop |
|
612 | a8083063 | Iustin Pop | """
|
613 | a8083063 | Iustin Pop | _OP_REQP = [] |
614 | a8083063 | Iustin Pop | |
615 | a8083063 | Iustin Pop | def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result, |
616 | a8083063 | Iustin Pop | remote_version, feedback_fn): |
617 | a8083063 | Iustin Pop | """Run multiple tests against a node.
|
618 | a8083063 | Iustin Pop |
|
619 | a8083063 | Iustin Pop | Test list:
|
620 | a8083063 | Iustin Pop | - compares ganeti version
|
621 | a8083063 | Iustin Pop | - checks vg existance and size > 20G
|
622 | a8083063 | Iustin Pop | - checks config file checksum
|
623 | a8083063 | Iustin Pop | - checks ssh to other nodes
|
624 | a8083063 | Iustin Pop |
|
625 | a8083063 | Iustin Pop | Args:
|
626 | a8083063 | Iustin Pop | node: name of the node to check
|
627 | a8083063 | Iustin Pop | file_list: required list of files
|
628 | a8083063 | Iustin Pop | local_cksum: dictionary of local files and their checksums
|
629 | a8083063 | Iustin Pop | """
|
630 | a8083063 | Iustin Pop | # compares ganeti version
|
631 | a8083063 | Iustin Pop | local_version = constants.PROTOCOL_VERSION |
632 | a8083063 | Iustin Pop | if not remote_version: |
633 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: connection to %s failed" % (node))
|
634 | a8083063 | Iustin Pop | return True |
635 | a8083063 | Iustin Pop | |
636 | a8083063 | Iustin Pop | if local_version != remote_version:
|
637 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: sw version mismatch: master %s, node(%s) %s" %
|
638 | a8083063 | Iustin Pop | (local_version, node, remote_version)) |
639 | a8083063 | Iustin Pop | return True |
640 | a8083063 | Iustin Pop | |
641 | a8083063 | Iustin Pop | # checks vg existance and size > 20G
|
642 | a8083063 | Iustin Pop | |
643 | a8083063 | Iustin Pop | bad = False
|
644 | a8083063 | Iustin Pop | if not vglist: |
645 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: unable to check volume groups on node %s." %
|
646 | a8083063 | Iustin Pop | (node,)) |
647 | a8083063 | Iustin Pop | bad = True
|
648 | a8083063 | Iustin Pop | else:
|
649 | a8083063 | Iustin Pop | vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
|
650 | a8083063 | Iustin Pop | if vgstatus:
|
651 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: %s on node %s" % (vgstatus, node))
|
652 | a8083063 | Iustin Pop | bad = True
|
653 | a8083063 | Iustin Pop | |
654 | a8083063 | Iustin Pop | # checks config file checksum
|
655 | a8083063 | Iustin Pop | # checks ssh to any
|
656 | a8083063 | Iustin Pop | |
657 | a8083063 | Iustin Pop | if 'filelist' not in node_result: |
658 | a8083063 | Iustin Pop | bad = True
|
659 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: node hasn't returned file checksum data")
|
660 | a8083063 | Iustin Pop | else:
|
661 | a8083063 | Iustin Pop | remote_cksum = node_result['filelist']
|
662 | a8083063 | Iustin Pop | for file_name in file_list: |
663 | a8083063 | Iustin Pop | if file_name not in remote_cksum: |
664 | a8083063 | Iustin Pop | bad = True
|
665 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: file '%s' missing" % file_name)
|
666 | a8083063 | Iustin Pop | elif remote_cksum[file_name] != local_cksum[file_name]:
|
667 | a8083063 | Iustin Pop | bad = True
|
668 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: file '%s' has wrong checksum" % file_name)
|
669 | a8083063 | Iustin Pop | |
670 | a8083063 | Iustin Pop | if 'nodelist' not in node_result: |
671 | a8083063 | Iustin Pop | bad = True
|
672 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: node hasn't returned node connectivity data")
|
673 | a8083063 | Iustin Pop | else:
|
674 | a8083063 | Iustin Pop | if node_result['nodelist']: |
675 | a8083063 | Iustin Pop | bad = True
|
676 | a8083063 | Iustin Pop | for node in node_result['nodelist']: |
677 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: communication with node '%s': %s" %
|
678 | a8083063 | Iustin Pop | (node, node_result['nodelist'][node]))
|
679 | a8083063 | Iustin Pop | hyp_result = node_result.get('hypervisor', None) |
680 | a8083063 | Iustin Pop | if hyp_result is not None: |
681 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: hypervisor verify failure: '%s'" % hyp_result)
|
682 | a8083063 | Iustin Pop | return bad
|
683 | a8083063 | Iustin Pop | |
684 | a8083063 | Iustin Pop | def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn): |
685 | a8083063 | Iustin Pop | """Verify an instance.
|
686 | a8083063 | Iustin Pop |
|
687 | a8083063 | Iustin Pop | This function checks to see if the required block devices are
|
688 | a8083063 | Iustin Pop | available on the instance's node.
|
689 | a8083063 | Iustin Pop |
|
690 | a8083063 | Iustin Pop | """
|
691 | a8083063 | Iustin Pop | bad = False
|
692 | a8083063 | Iustin Pop | |
693 | a8083063 | Iustin Pop | instancelist = self.cfg.GetInstanceList()
|
694 | a8083063 | Iustin Pop | if not instance in instancelist: |
695 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: instance %s not in instance list %s" %
|
696 | a8083063 | Iustin Pop | (instance, instancelist)) |
697 | a8083063 | Iustin Pop | bad = True
|
698 | a8083063 | Iustin Pop | |
699 | a8083063 | Iustin Pop | instanceconfig = self.cfg.GetInstanceInfo(instance)
|
700 | a8083063 | Iustin Pop | node_current = instanceconfig.primary_node |
701 | a8083063 | Iustin Pop | |
702 | a8083063 | Iustin Pop | node_vol_should = {} |
703 | a8083063 | Iustin Pop | instanceconfig.MapLVsByNode(node_vol_should) |
704 | a8083063 | Iustin Pop | |
705 | a8083063 | Iustin Pop | for node in node_vol_should: |
706 | a8083063 | Iustin Pop | for volume in node_vol_should[node]: |
707 | a8083063 | Iustin Pop | if node not in node_vol_is or volume not in node_vol_is[node]: |
708 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: volume %s missing on node %s" %
|
709 | a8083063 | Iustin Pop | (volume, node)) |
710 | a8083063 | Iustin Pop | bad = True
|
711 | a8083063 | Iustin Pop | |
712 | a8083063 | Iustin Pop | if not instanceconfig.status == 'down': |
713 | a8083063 | Iustin Pop | if not instance in node_instance[node_current]: |
714 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: instance %s not running on node %s" %
|
715 | a8083063 | Iustin Pop | (instance, node_current)) |
716 | a8083063 | Iustin Pop | bad = True
|
717 | a8083063 | Iustin Pop | |
718 | a8083063 | Iustin Pop | for node in node_instance: |
719 | a8083063 | Iustin Pop | if (not node == node_current): |
720 | a8083063 | Iustin Pop | if instance in node_instance[node]: |
721 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: instance %s should not run on node %s" %
|
722 | a8083063 | Iustin Pop | (instance, node)) |
723 | a8083063 | Iustin Pop | bad = True
|
724 | a8083063 | Iustin Pop | |
725 | a8083063 | Iustin Pop | return not bad |
726 | a8083063 | Iustin Pop | |
727 | a8083063 | Iustin Pop | def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn): |
728 | a8083063 | Iustin Pop | """Verify if there are any unknown volumes in the cluster.
|
729 | a8083063 | Iustin Pop |
|
730 | a8083063 | Iustin Pop | The .os, .swap and backup volumes are ignored. All other volumes are
|
731 | a8083063 | Iustin Pop | reported as unknown.
|
732 | a8083063 | Iustin Pop |
|
733 | a8083063 | Iustin Pop | """
|
734 | a8083063 | Iustin Pop | bad = False
|
735 | a8083063 | Iustin Pop | |
736 | a8083063 | Iustin Pop | for node in node_vol_is: |
737 | a8083063 | Iustin Pop | for volume in node_vol_is[node]: |
738 | a8083063 | Iustin Pop | if node not in node_vol_should or volume not in node_vol_should[node]: |
739 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: volume %s on node %s should not exist" %
|
740 | a8083063 | Iustin Pop | (volume, node)) |
741 | a8083063 | Iustin Pop | bad = True
|
742 | a8083063 | Iustin Pop | return bad
|
743 | a8083063 | Iustin Pop | |
744 | a8083063 | Iustin Pop | |
745 | a8083063 | Iustin Pop | def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn): |
746 | a8083063 | Iustin Pop | """Verify the list of running instances.
|
747 | a8083063 | Iustin Pop |
|
748 | a8083063 | Iustin Pop | This checks what instances are running but unknown to the cluster.
|
749 | a8083063 | Iustin Pop |
|
750 | a8083063 | Iustin Pop | """
|
751 | a8083063 | Iustin Pop | bad = False
|
752 | a8083063 | Iustin Pop | for node in node_instance: |
753 | a8083063 | Iustin Pop | for runninginstance in node_instance[node]: |
754 | a8083063 | Iustin Pop | if runninginstance not in instancelist: |
755 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: instance %s on node %s should not exist" %
|
756 | a8083063 | Iustin Pop | (runninginstance, node)) |
757 | a8083063 | Iustin Pop | bad = True
|
758 | a8083063 | Iustin Pop | return bad
|
759 | a8083063 | Iustin Pop | |
760 | a8083063 | Iustin Pop | def _VerifyNodeConfigFiles(self, ismaster, node, file_list, feedback_fn): |
761 | a8083063 | Iustin Pop | """Verify the list of node config files"""
|
762 | a8083063 | Iustin Pop | |
763 | a8083063 | Iustin Pop | bad = False
|
764 | a8083063 | Iustin Pop | for file_name in constants.MASTER_CONFIGFILES: |
765 | a8083063 | Iustin Pop | if ismaster and file_name not in file_list: |
766 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: master config file %s missing from master"
|
767 | a8083063 | Iustin Pop | " node %s" % (file_name, node))
|
768 | a8083063 | Iustin Pop | bad = True
|
769 | a8083063 | Iustin Pop | elif not ismaster and file_name in file_list: |
770 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: master config file %s should not exist"
|
771 | a8083063 | Iustin Pop | " on non-master node %s" % (file_name, node))
|
772 | a8083063 | Iustin Pop | bad = True
|
773 | a8083063 | Iustin Pop | |
774 | a8083063 | Iustin Pop | for file_name in constants.NODE_CONFIGFILES: |
775 | a8083063 | Iustin Pop | if file_name not in file_list: |
776 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: config file %s missing from node %s" %
|
777 | a8083063 | Iustin Pop | (file_name, node)) |
778 | a8083063 | Iustin Pop | bad = True
|
779 | a8083063 | Iustin Pop | |
780 | a8083063 | Iustin Pop | return bad
|
781 | a8083063 | Iustin Pop | |
782 | a8083063 | Iustin Pop | def CheckPrereq(self): |
783 | a8083063 | Iustin Pop | """Check prerequisites.
|
784 | a8083063 | Iustin Pop |
|
785 | a8083063 | Iustin Pop | This has no prerequisites.
|
786 | a8083063 | Iustin Pop |
|
787 | a8083063 | Iustin Pop | """
|
788 | a8083063 | Iustin Pop | pass
|
789 | a8083063 | Iustin Pop | |
790 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
791 | a8083063 | Iustin Pop | """Verify integrity of cluster, performing various test on nodes.
|
792 | a8083063 | Iustin Pop |
|
793 | a8083063 | Iustin Pop | """
|
794 | a8083063 | Iustin Pop | bad = False
|
795 | a8083063 | Iustin Pop | feedback_fn("* Verifying global settings")
|
796 | a8083063 | Iustin Pop | self.cfg.VerifyConfig()
|
797 | a8083063 | Iustin Pop | |
798 | a8083063 | Iustin Pop | master = self.cfg.GetMaster()
|
799 | a8083063 | Iustin Pop | vg_name = self.cfg.GetVGName()
|
800 | a8083063 | Iustin Pop | nodelist = utils.NiceSort(self.cfg.GetNodeList())
|
801 | a8083063 | Iustin Pop | instancelist = utils.NiceSort(self.cfg.GetInstanceList())
|
802 | a8083063 | Iustin Pop | node_volume = {} |
803 | a8083063 | Iustin Pop | node_instance = {} |
804 | a8083063 | Iustin Pop | |
805 | a8083063 | Iustin Pop | # FIXME: verify OS list
|
806 | a8083063 | Iustin Pop | # do local checksums
|
807 | a8083063 | Iustin Pop | file_names = constants.CLUSTER_CONF_FILES |
808 | a8083063 | Iustin Pop | local_checksums = utils.FingerprintFiles(file_names) |
809 | a8083063 | Iustin Pop | |
810 | a8083063 | Iustin Pop | feedback_fn("* Gathering data (%d nodes)" % len(nodelist)) |
811 | a8083063 | Iustin Pop | all_configfile = rpc.call_configfile_list(nodelist) |
812 | a8083063 | Iustin Pop | all_volumeinfo = rpc.call_volume_list(nodelist, vg_name) |
813 | a8083063 | Iustin Pop | all_instanceinfo = rpc.call_instance_list(nodelist) |
814 | a8083063 | Iustin Pop | all_vglist = rpc.call_vg_list(nodelist) |
815 | a8083063 | Iustin Pop | node_verify_param = { |
816 | a8083063 | Iustin Pop | 'filelist': file_names,
|
817 | a8083063 | Iustin Pop | 'nodelist': nodelist,
|
818 | a8083063 | Iustin Pop | 'hypervisor': None, |
819 | a8083063 | Iustin Pop | } |
820 | a8083063 | Iustin Pop | all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param) |
821 | a8083063 | Iustin Pop | all_rversion = rpc.call_version(nodelist) |
822 | a8083063 | Iustin Pop | |
823 | a8083063 | Iustin Pop | for node in nodelist: |
824 | a8083063 | Iustin Pop | feedback_fn("* Verifying node %s" % node)
|
825 | a8083063 | Iustin Pop | result = self._VerifyNode(node, file_names, local_checksums,
|
826 | a8083063 | Iustin Pop | all_vglist[node], all_nvinfo[node], |
827 | a8083063 | Iustin Pop | all_rversion[node], feedback_fn) |
828 | a8083063 | Iustin Pop | bad = bad or result
|
829 | a8083063 | Iustin Pop | # node_configfile
|
830 | a8083063 | Iustin Pop | nodeconfigfile = all_configfile[node] |
831 | a8083063 | Iustin Pop | |
832 | a8083063 | Iustin Pop | if not nodeconfigfile: |
833 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: connection to %s failed" % (node))
|
834 | a8083063 | Iustin Pop | bad = True
|
835 | a8083063 | Iustin Pop | continue
|
836 | a8083063 | Iustin Pop | |
837 | a8083063 | Iustin Pop | bad = bad or self._VerifyNodeConfigFiles(node==master, node, |
838 | a8083063 | Iustin Pop | nodeconfigfile, feedback_fn) |
839 | a8083063 | Iustin Pop | |
840 | a8083063 | Iustin Pop | # node_volume
|
841 | a8083063 | Iustin Pop | volumeinfo = all_volumeinfo[node] |
842 | a8083063 | Iustin Pop | |
843 | a8083063 | Iustin Pop | if type(volumeinfo) != dict: |
844 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: connection to %s failed" % (node,))
|
845 | a8083063 | Iustin Pop | bad = True
|
846 | a8083063 | Iustin Pop | continue
|
847 | a8083063 | Iustin Pop | |
848 | a8083063 | Iustin Pop | node_volume[node] = volumeinfo |
849 | a8083063 | Iustin Pop | |
850 | a8083063 | Iustin Pop | # node_instance
|
851 | a8083063 | Iustin Pop | nodeinstance = all_instanceinfo[node] |
852 | a8083063 | Iustin Pop | if type(nodeinstance) != list: |
853 | a8083063 | Iustin Pop | feedback_fn(" - ERROR: connection to %s failed" % (node,))
|
854 | a8083063 | Iustin Pop | bad = True
|
855 | a8083063 | Iustin Pop | continue
|
856 | a8083063 | Iustin Pop | |
857 | a8083063 | Iustin Pop | node_instance[node] = nodeinstance |
858 | a8083063 | Iustin Pop | |
859 | a8083063 | Iustin Pop | node_vol_should = {} |
860 | a8083063 | Iustin Pop | |
861 | a8083063 | Iustin Pop | for instance in instancelist: |
862 | a8083063 | Iustin Pop | feedback_fn("* Verifying instance %s" % instance)
|
863 | a8083063 | Iustin Pop | result = self._VerifyInstance(instance, node_volume, node_instance,
|
864 | a8083063 | Iustin Pop | feedback_fn) |
865 | a8083063 | Iustin Pop | bad = bad or result
|
866 | a8083063 | Iustin Pop | |
867 | a8083063 | Iustin Pop | inst_config = self.cfg.GetInstanceInfo(instance)
|
868 | a8083063 | Iustin Pop | |
869 | a8083063 | Iustin Pop | inst_config.MapLVsByNode(node_vol_should) |
870 | a8083063 | Iustin Pop | |
871 | a8083063 | Iustin Pop | feedback_fn("* Verifying orphan volumes")
|
872 | a8083063 | Iustin Pop | result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
|
873 | a8083063 | Iustin Pop | feedback_fn) |
874 | a8083063 | Iustin Pop | bad = bad or result
|
875 | a8083063 | Iustin Pop | |
876 | a8083063 | Iustin Pop | feedback_fn("* Verifying remaining instances")
|
877 | a8083063 | Iustin Pop | result = self._VerifyOrphanInstances(instancelist, node_instance,
|
878 | a8083063 | Iustin Pop | feedback_fn) |
879 | a8083063 | Iustin Pop | bad = bad or result
|
880 | a8083063 | Iustin Pop | |
881 | a8083063 | Iustin Pop | return int(bad) |
882 | a8083063 | Iustin Pop | |
883 | a8083063 | Iustin Pop | |
884 | a8083063 | Iustin Pop | def _WaitForSync(cfgw, instance, oneshot=False, unlock=False): |
885 | a8083063 | Iustin Pop | """Sleep and poll for an instance's disk to sync.
|
886 | a8083063 | Iustin Pop |
|
887 | a8083063 | Iustin Pop | """
|
888 | a8083063 | Iustin Pop | if not instance.disks: |
889 | a8083063 | Iustin Pop | return True |
890 | a8083063 | Iustin Pop | |
891 | a8083063 | Iustin Pop | if not oneshot: |
892 | a8083063 | Iustin Pop | logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
|
893 | a8083063 | Iustin Pop | |
894 | a8083063 | Iustin Pop | node = instance.primary_node |
895 | a8083063 | Iustin Pop | |
896 | a8083063 | Iustin Pop | for dev in instance.disks: |
897 | a8083063 | Iustin Pop | cfgw.SetDiskID(dev, node) |
898 | a8083063 | Iustin Pop | |
899 | a8083063 | Iustin Pop | retries = 0
|
900 | a8083063 | Iustin Pop | while True: |
901 | a8083063 | Iustin Pop | max_time = 0
|
902 | a8083063 | Iustin Pop | done = True
|
903 | a8083063 | Iustin Pop | cumul_degraded = False
|
904 | a8083063 | Iustin Pop | rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks) |
905 | a8083063 | Iustin Pop | if not rstats: |
906 | a8083063 | Iustin Pop | logger.ToStderr("Can't get any data from node %s" % node)
|
907 | a8083063 | Iustin Pop | retries += 1
|
908 | a8083063 | Iustin Pop | if retries >= 10: |
909 | a8083063 | Iustin Pop | raise errors.RemoteError, ("Can't contact node %s for mirror data," |
910 | a8083063 | Iustin Pop | " aborting." % node)
|
911 | a8083063 | Iustin Pop | time.sleep(6)
|
912 | a8083063 | Iustin Pop | continue
|
913 | a8083063 | Iustin Pop | retries = 0
|
914 | a8083063 | Iustin Pop | for i in range(len(rstats)): |
915 | a8083063 | Iustin Pop | mstat = rstats[i] |
916 | a8083063 | Iustin Pop | if mstat is None: |
917 | a8083063 | Iustin Pop | logger.ToStderr("Can't compute data for node %s/%s" %
|
918 | a8083063 | Iustin Pop | (node, instance.disks[i].iv_name)) |
919 | a8083063 | Iustin Pop | continue
|
920 | a8083063 | Iustin Pop | perc_done, est_time, is_degraded = mstat |
921 | a8083063 | Iustin Pop | cumul_degraded = cumul_degraded or (is_degraded and perc_done is None) |
922 | a8083063 | Iustin Pop | if perc_done is not None: |
923 | a8083063 | Iustin Pop | done = False
|
924 | a8083063 | Iustin Pop | if est_time is not None: |
925 | a8083063 | Iustin Pop | rem_time = "%d estimated seconds remaining" % est_time
|
926 | a8083063 | Iustin Pop | max_time = est_time |
927 | a8083063 | Iustin Pop | else:
|
928 | a8083063 | Iustin Pop | rem_time = "no time estimate"
|
929 | a8083063 | Iustin Pop | logger.ToStdout("- device %s: %5.2f%% done, %s" %
|
930 | a8083063 | Iustin Pop | (instance.disks[i].iv_name, perc_done, rem_time)) |
931 | a8083063 | Iustin Pop | if done or oneshot: |
932 | a8083063 | Iustin Pop | break
|
933 | a8083063 | Iustin Pop | |
934 | a8083063 | Iustin Pop | if unlock:
|
935 | a8083063 | Iustin Pop | utils.Unlock('cmd')
|
936 | a8083063 | Iustin Pop | try:
|
937 | a8083063 | Iustin Pop | time.sleep(min(60, max_time)) |
938 | a8083063 | Iustin Pop | finally:
|
939 | a8083063 | Iustin Pop | if unlock:
|
940 | a8083063 | Iustin Pop | utils.Lock('cmd')
|
941 | a8083063 | Iustin Pop | |
942 | a8083063 | Iustin Pop | if done:
|
943 | a8083063 | Iustin Pop | logger.ToStdout("Instance %s's disks are in sync." % instance.name)
|
944 | a8083063 | Iustin Pop | return not cumul_degraded |
945 | a8083063 | Iustin Pop | |
946 | a8083063 | Iustin Pop | |
947 | a8083063 | Iustin Pop | def _CheckDiskConsistency(cfgw, dev, node, on_primary): |
948 | a8083063 | Iustin Pop | """Check that mirrors are not degraded.
|
949 | a8083063 | Iustin Pop |
|
950 | a8083063 | Iustin Pop | """
|
951 | a8083063 | Iustin Pop | |
952 | a8083063 | Iustin Pop | cfgw.SetDiskID(dev, node) |
953 | a8083063 | Iustin Pop | |
954 | a8083063 | Iustin Pop | result = True
|
955 | a8083063 | Iustin Pop | if on_primary or dev.AssembleOnSecondary(): |
956 | a8083063 | Iustin Pop | rstats = rpc.call_blockdev_find(node, dev) |
957 | a8083063 | Iustin Pop | if not rstats: |
958 | a8083063 | Iustin Pop | logger.ToStderr("Can't get any data from node %s" % node)
|
959 | a8083063 | Iustin Pop | result = False
|
960 | a8083063 | Iustin Pop | else:
|
961 | a8083063 | Iustin Pop | result = result and (not rstats[5]) |
962 | a8083063 | Iustin Pop | if dev.children:
|
963 | a8083063 | Iustin Pop | for child in dev.children: |
964 | a8083063 | Iustin Pop | result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
|
965 | a8083063 | Iustin Pop | |
966 | a8083063 | Iustin Pop | return result
|
967 | a8083063 | Iustin Pop | |
968 | a8083063 | Iustin Pop | |
969 | a8083063 | Iustin Pop | class LUDiagnoseOS(NoHooksLU): |
970 | a8083063 | Iustin Pop | """Logical unit for OS diagnose/query.
|
971 | a8083063 | Iustin Pop |
|
972 | a8083063 | Iustin Pop | """
|
973 | a8083063 | Iustin Pop | _OP_REQP = [] |
974 | a8083063 | Iustin Pop | |
975 | a8083063 | Iustin Pop | def CheckPrereq(self): |
976 | a8083063 | Iustin Pop | """Check prerequisites.
|
977 | a8083063 | Iustin Pop |
|
978 | a8083063 | Iustin Pop | This always succeeds, since this is a pure query LU.
|
979 | a8083063 | Iustin Pop |
|
980 | a8083063 | Iustin Pop | """
|
981 | a8083063 | Iustin Pop | return
|
982 | a8083063 | Iustin Pop | |
983 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
984 | a8083063 | Iustin Pop | """Compute the list of OSes.
|
985 | a8083063 | Iustin Pop |
|
986 | a8083063 | Iustin Pop | """
|
987 | a8083063 | Iustin Pop | node_list = self.cfg.GetNodeList()
|
988 | a8083063 | Iustin Pop | node_data = rpc.call_os_diagnose(node_list) |
989 | a8083063 | Iustin Pop | if node_data == False: |
990 | a8083063 | Iustin Pop | raise errors.OpExecError, "Can't gather the list of OSes" |
991 | a8083063 | Iustin Pop | return node_data
|
992 | a8083063 | Iustin Pop | |
993 | a8083063 | Iustin Pop | |
994 | a8083063 | Iustin Pop | class LURemoveNode(LogicalUnit): |
995 | a8083063 | Iustin Pop | """Logical unit for removing a node.
|
996 | a8083063 | Iustin Pop |
|
997 | a8083063 | Iustin Pop | """
|
998 | a8083063 | Iustin Pop | HPATH = "node-remove"
|
999 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_NODE |
1000 | a8083063 | Iustin Pop | _OP_REQP = ["node_name"]
|
1001 | a8083063 | Iustin Pop | |
1002 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
1003 | a8083063 | Iustin Pop | """Build hooks env.
|
1004 | a8083063 | Iustin Pop |
|
1005 | a8083063 | Iustin Pop | This doesn't run on the target node in the pre phase as a failed
|
1006 | a8083063 | Iustin Pop | node would not allows itself to run.
|
1007 | a8083063 | Iustin Pop |
|
1008 | a8083063 | Iustin Pop | """
|
1009 | a8083063 | Iustin Pop | all_nodes = self.cfg.GetNodeList()
|
1010 | a8083063 | Iustin Pop | all_nodes.remove(self.op.node_name)
|
1011 | a8083063 | Iustin Pop | return {"NODE_NAME": self.op.node_name}, all_nodes, all_nodes |
1012 | a8083063 | Iustin Pop | |
1013 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1014 | a8083063 | Iustin Pop | """Check prerequisites.
|
1015 | a8083063 | Iustin Pop |
|
1016 | a8083063 | Iustin Pop | This checks:
|
1017 | a8083063 | Iustin Pop | - the node exists in the configuration
|
1018 | a8083063 | Iustin Pop | - it does not have primary or secondary instances
|
1019 | a8083063 | Iustin Pop | - it's not the master
|
1020 | a8083063 | Iustin Pop |
|
1021 | a8083063 | Iustin Pop | Any errors are signalled by raising errors.OpPrereqError.
|
1022 | a8083063 | Iustin Pop |
|
1023 | a8083063 | Iustin Pop | """
|
1024 | a8083063 | Iustin Pop | |
1025 | a8083063 | Iustin Pop | node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name)) |
1026 | a8083063 | Iustin Pop | if node is None: |
1027 | a8083063 | Iustin Pop | logger.Error("Error: Node '%s' is unknown." % self.op.node_name) |
1028 | a8083063 | Iustin Pop | return 1 |
1029 | a8083063 | Iustin Pop | |
1030 | a8083063 | Iustin Pop | instance_list = self.cfg.GetInstanceList()
|
1031 | a8083063 | Iustin Pop | |
1032 | a8083063 | Iustin Pop | masternode = self.cfg.GetMaster()
|
1033 | a8083063 | Iustin Pop | if node.name == masternode:
|
1034 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node is the master node," |
1035 | a8083063 | Iustin Pop | " you need to failover first.")
|
1036 | a8083063 | Iustin Pop | |
1037 | a8083063 | Iustin Pop | for instance_name in instance_list: |
1038 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(instance_name)
|
1039 | a8083063 | Iustin Pop | if node.name == instance.primary_node:
|
1040 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance %s still running on the node," |
1041 | a8083063 | Iustin Pop | " please remove first." % instance_name)
|
1042 | a8083063 | Iustin Pop | if node.name in instance.secondary_nodes: |
1043 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance %s has node as a secondary," |
1044 | a8083063 | Iustin Pop | " please remove first." % instance_name)
|
1045 | a8083063 | Iustin Pop | self.op.node_name = node.name
|
1046 | a8083063 | Iustin Pop | self.node = node
|
1047 | a8083063 | Iustin Pop | |
1048 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1049 | a8083063 | Iustin Pop | """Removes the node from the cluster.
|
1050 | a8083063 | Iustin Pop |
|
1051 | a8083063 | Iustin Pop | """
|
1052 | a8083063 | Iustin Pop | node = self.node
|
1053 | a8083063 | Iustin Pop | logger.Info("stopping the node daemon and removing configs from node %s" %
|
1054 | a8083063 | Iustin Pop | node.name) |
1055 | a8083063 | Iustin Pop | |
1056 | a8083063 | Iustin Pop | rpc.call_node_leave_cluster(node.name) |
1057 | a8083063 | Iustin Pop | |
1058 | a8083063 | Iustin Pop | ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT) |
1059 | a8083063 | Iustin Pop | |
1060 | a8083063 | Iustin Pop | logger.Info("Removing node %s from config" % node.name)
|
1061 | a8083063 | Iustin Pop | |
1062 | a8083063 | Iustin Pop | self.cfg.RemoveNode(node.name)
|
1063 | a8083063 | Iustin Pop | |
1064 | a8083063 | Iustin Pop | |
1065 | a8083063 | Iustin Pop | class LUQueryNodes(NoHooksLU): |
1066 | a8083063 | Iustin Pop | """Logical unit for querying nodes.
|
1067 | a8083063 | Iustin Pop |
|
1068 | a8083063 | Iustin Pop | """
|
1069 | a8083063 | Iustin Pop | _OP_REQP = ["output_fields"]
|
1070 | a8083063 | Iustin Pop | |
1071 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1072 | a8083063 | Iustin Pop | """Check prerequisites.
|
1073 | a8083063 | Iustin Pop |
|
1074 | a8083063 | Iustin Pop | This checks that the fields required are valid output fields.
|
1075 | a8083063 | Iustin Pop |
|
1076 | a8083063 | Iustin Pop | """
|
1077 | a8083063 | Iustin Pop | self.static_fields = frozenset(["name", "pinst", "sinst", "pip", "sip"]) |
1078 | a8083063 | Iustin Pop | self.dynamic_fields = frozenset(["dtotal", "dfree", |
1079 | a8083063 | Iustin Pop | "mtotal", "mnode", "mfree"]) |
1080 | a8083063 | Iustin Pop | self.all_fields = self.static_fields | self.dynamic_fields |
1081 | a8083063 | Iustin Pop | |
1082 | a8083063 | Iustin Pop | if not self.all_fields.issuperset(self.op.output_fields): |
1083 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Unknown output fields selected: %s" |
1084 | a8083063 | Iustin Pop | % ",".join(frozenset(self.op.output_fields). |
1085 | a8083063 | Iustin Pop | difference(self.all_fields)))
|
1086 | a8083063 | Iustin Pop | |
1087 | a8083063 | Iustin Pop | |
1088 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1089 | a8083063 | Iustin Pop | """Computes the list of nodes and their attributes.
|
1090 | a8083063 | Iustin Pop |
|
1091 | a8083063 | Iustin Pop | """
|
1092 | a8083063 | Iustin Pop | nodenames = utils.NiceSort(self.cfg.GetNodeList())
|
1093 | a8083063 | Iustin Pop | nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames] |
1094 | a8083063 | Iustin Pop | |
1095 | a8083063 | Iustin Pop | |
1096 | a8083063 | Iustin Pop | # begin data gathering
|
1097 | a8083063 | Iustin Pop | |
1098 | a8083063 | Iustin Pop | if self.dynamic_fields.intersection(self.op.output_fields): |
1099 | a8083063 | Iustin Pop | live_data = {} |
1100 | a8083063 | Iustin Pop | node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
|
1101 | a8083063 | Iustin Pop | for name in nodenames: |
1102 | a8083063 | Iustin Pop | nodeinfo = node_data.get(name, None)
|
1103 | a8083063 | Iustin Pop | if nodeinfo:
|
1104 | a8083063 | Iustin Pop | live_data[name] = { |
1105 | a8083063 | Iustin Pop | "mtotal": utils.TryConvert(int, nodeinfo['memory_total']), |
1106 | a8083063 | Iustin Pop | "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']), |
1107 | a8083063 | Iustin Pop | "mfree": utils.TryConvert(int, nodeinfo['memory_free']), |
1108 | a8083063 | Iustin Pop | "dtotal": utils.TryConvert(int, nodeinfo['vg_size']), |
1109 | a8083063 | Iustin Pop | "dfree": utils.TryConvert(int, nodeinfo['vg_free']), |
1110 | a8083063 | Iustin Pop | } |
1111 | a8083063 | Iustin Pop | else:
|
1112 | a8083063 | Iustin Pop | live_data[name] = {} |
1113 | a8083063 | Iustin Pop | else:
|
1114 | a8083063 | Iustin Pop | live_data = dict.fromkeys(nodenames, {})
|
1115 | a8083063 | Iustin Pop | |
1116 | a8083063 | Iustin Pop | node_to_primary = dict.fromkeys(nodenames, 0) |
1117 | a8083063 | Iustin Pop | node_to_secondary = dict.fromkeys(nodenames, 0) |
1118 | a8083063 | Iustin Pop | |
1119 | a8083063 | Iustin Pop | if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields: |
1120 | a8083063 | Iustin Pop | instancelist = self.cfg.GetInstanceList()
|
1121 | a8083063 | Iustin Pop | |
1122 | a8083063 | Iustin Pop | for instance in instancelist: |
1123 | a8083063 | Iustin Pop | instanceinfo = self.cfg.GetInstanceInfo(instance)
|
1124 | a8083063 | Iustin Pop | node_to_primary[instanceinfo.primary_node] += 1
|
1125 | a8083063 | Iustin Pop | for secnode in instanceinfo.secondary_nodes: |
1126 | a8083063 | Iustin Pop | node_to_secondary[secnode] += 1
|
1127 | a8083063 | Iustin Pop | |
1128 | a8083063 | Iustin Pop | # end data gathering
|
1129 | a8083063 | Iustin Pop | |
1130 | a8083063 | Iustin Pop | output = [] |
1131 | a8083063 | Iustin Pop | for node in nodelist: |
1132 | a8083063 | Iustin Pop | node_output = [] |
1133 | a8083063 | Iustin Pop | for field in self.op.output_fields: |
1134 | a8083063 | Iustin Pop | if field == "name": |
1135 | a8083063 | Iustin Pop | val = node.name |
1136 | a8083063 | Iustin Pop | elif field == "pinst": |
1137 | a8083063 | Iustin Pop | val = node_to_primary[node.name] |
1138 | a8083063 | Iustin Pop | elif field == "sinst": |
1139 | a8083063 | Iustin Pop | val = node_to_secondary[node.name] |
1140 | a8083063 | Iustin Pop | elif field == "pip": |
1141 | a8083063 | Iustin Pop | val = node.primary_ip |
1142 | a8083063 | Iustin Pop | elif field == "sip": |
1143 | a8083063 | Iustin Pop | val = node.secondary_ip |
1144 | a8083063 | Iustin Pop | elif field in self.dynamic_fields: |
1145 | a8083063 | Iustin Pop | val = live_data[node.name].get(field, "?")
|
1146 | a8083063 | Iustin Pop | else:
|
1147 | a8083063 | Iustin Pop | raise errors.ParameterError, field
|
1148 | a8083063 | Iustin Pop | val = str(val)
|
1149 | a8083063 | Iustin Pop | node_output.append(val) |
1150 | a8083063 | Iustin Pop | output.append(node_output) |
1151 | a8083063 | Iustin Pop | |
1152 | a8083063 | Iustin Pop | return output
|
1153 | a8083063 | Iustin Pop | |
1154 | a8083063 | Iustin Pop | |
1155 | a8083063 | Iustin Pop | def _CheckNodesDirs(node_list, paths): |
1156 | a8083063 | Iustin Pop | """Verify if the given nodes have the same files.
|
1157 | a8083063 | Iustin Pop |
|
1158 | a8083063 | Iustin Pop | Args:
|
1159 | a8083063 | Iustin Pop | node_list: the list of node names to check
|
1160 | a8083063 | Iustin Pop | paths: the list of directories to checksum and compare
|
1161 | a8083063 | Iustin Pop |
|
1162 | a8083063 | Iustin Pop | Returns:
|
1163 | a8083063 | Iustin Pop | list of (node, different_file, message); if empty, the files are in sync
|
1164 | a8083063 | Iustin Pop |
|
1165 | a8083063 | Iustin Pop | """
|
1166 | a8083063 | Iustin Pop | file_names = [] |
1167 | a8083063 | Iustin Pop | for dir_name in paths: |
1168 | a8083063 | Iustin Pop | flist = [os.path.join(dir_name, name) for name in os.listdir(dir_name)] |
1169 | a8083063 | Iustin Pop | flist = [name for name in flist if os.path.isfile(name)] |
1170 | a8083063 | Iustin Pop | file_names.extend(flist) |
1171 | a8083063 | Iustin Pop | |
1172 | a8083063 | Iustin Pop | local_checksums = utils.FingerprintFiles(file_names) |
1173 | a8083063 | Iustin Pop | |
1174 | a8083063 | Iustin Pop | results = [] |
1175 | a8083063 | Iustin Pop | verify_params = {'filelist': file_names}
|
1176 | a8083063 | Iustin Pop | all_node_results = rpc.call_node_verify(node_list, verify_params) |
1177 | a8083063 | Iustin Pop | for node_name in node_list: |
1178 | a8083063 | Iustin Pop | node_result = all_node_results.get(node_name, False)
|
1179 | a8083063 | Iustin Pop | if not node_result or 'filelist' not in node_result: |
1180 | a8083063 | Iustin Pop | results.append((node_name, "'all files'", "node communication error")) |
1181 | a8083063 | Iustin Pop | continue
|
1182 | a8083063 | Iustin Pop | remote_checksums = node_result['filelist']
|
1183 | a8083063 | Iustin Pop | for fname in local_checksums: |
1184 | a8083063 | Iustin Pop | if fname not in remote_checksums: |
1185 | a8083063 | Iustin Pop | results.append((node_name, fname, "missing file"))
|
1186 | a8083063 | Iustin Pop | elif remote_checksums[fname] != local_checksums[fname]:
|
1187 | a8083063 | Iustin Pop | results.append((node_name, fname, "wrong checksum"))
|
1188 | a8083063 | Iustin Pop | return results
|
1189 | a8083063 | Iustin Pop | |
1190 | a8083063 | Iustin Pop | |
1191 | a8083063 | Iustin Pop | class LUAddNode(LogicalUnit): |
1192 | a8083063 | Iustin Pop | """Logical unit for adding node to the cluster.
|
1193 | a8083063 | Iustin Pop |
|
1194 | a8083063 | Iustin Pop | """
|
1195 | a8083063 | Iustin Pop | HPATH = "node-add"
|
1196 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_NODE |
1197 | a8083063 | Iustin Pop | _OP_REQP = ["node_name"]
|
1198 | a8083063 | Iustin Pop | |
1199 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
1200 | a8083063 | Iustin Pop | """Build hooks env.
|
1201 | a8083063 | Iustin Pop |
|
1202 | a8083063 | Iustin Pop | This will run on all nodes before, and on all nodes + the new node after.
|
1203 | a8083063 | Iustin Pop |
|
1204 | a8083063 | Iustin Pop | """
|
1205 | a8083063 | Iustin Pop | env = { |
1206 | a8083063 | Iustin Pop | "NODE_NAME": self.op.node_name, |
1207 | a8083063 | Iustin Pop | "NODE_PIP": self.op.primary_ip, |
1208 | a8083063 | Iustin Pop | "NODE_SIP": self.op.secondary_ip, |
1209 | a8083063 | Iustin Pop | } |
1210 | a8083063 | Iustin Pop | nodes_0 = self.cfg.GetNodeList()
|
1211 | a8083063 | Iustin Pop | nodes_1 = nodes_0 + [self.op.node_name, ]
|
1212 | a8083063 | Iustin Pop | return env, nodes_0, nodes_1
|
1213 | a8083063 | Iustin Pop | |
1214 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1215 | a8083063 | Iustin Pop | """Check prerequisites.
|
1216 | a8083063 | Iustin Pop |
|
1217 | a8083063 | Iustin Pop | This checks:
|
1218 | a8083063 | Iustin Pop | - the new node is not already in the config
|
1219 | a8083063 | Iustin Pop | - it is resolvable
|
1220 | a8083063 | Iustin Pop | - its parameters (single/dual homed) matches the cluster
|
1221 | a8083063 | Iustin Pop |
|
1222 | a8083063 | Iustin Pop | Any errors are signalled by raising errors.OpPrereqError.
|
1223 | a8083063 | Iustin Pop |
|
1224 | a8083063 | Iustin Pop | """
|
1225 | a8083063 | Iustin Pop | node_name = self.op.node_name
|
1226 | a8083063 | Iustin Pop | cfg = self.cfg
|
1227 | a8083063 | Iustin Pop | |
1228 | a8083063 | Iustin Pop | dns_data = utils.LookupHostname(node_name) |
1229 | a8083063 | Iustin Pop | if not dns_data: |
1230 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node %s is not resolvable" % node_name) |
1231 | a8083063 | Iustin Pop | |
1232 | a8083063 | Iustin Pop | node = dns_data['hostname']
|
1233 | a8083063 | Iustin Pop | primary_ip = self.op.primary_ip = dns_data['ip'] |
1234 | a8083063 | Iustin Pop | secondary_ip = getattr(self.op, "secondary_ip", None) |
1235 | a8083063 | Iustin Pop | if secondary_ip is None: |
1236 | a8083063 | Iustin Pop | secondary_ip = primary_ip |
1237 | a8083063 | Iustin Pop | if not utils.IsValidIP(secondary_ip): |
1238 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid secondary IP given") |
1239 | a8083063 | Iustin Pop | self.op.secondary_ip = secondary_ip
|
1240 | a8083063 | Iustin Pop | node_list = cfg.GetNodeList() |
1241 | a8083063 | Iustin Pop | if node in node_list: |
1242 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node %s is already in the configuration" |
1243 | a8083063 | Iustin Pop | % node) |
1244 | a8083063 | Iustin Pop | |
1245 | a8083063 | Iustin Pop | for existing_node_name in node_list: |
1246 | a8083063 | Iustin Pop | existing_node = cfg.GetNodeInfo(existing_node_name) |
1247 | a8083063 | Iustin Pop | if (existing_node.primary_ip == primary_ip or |
1248 | a8083063 | Iustin Pop | existing_node.secondary_ip == primary_ip or
|
1249 | a8083063 | Iustin Pop | existing_node.primary_ip == secondary_ip or
|
1250 | a8083063 | Iustin Pop | existing_node.secondary_ip == secondary_ip): |
1251 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("New node ip address(es) conflict with" |
1252 | a8083063 | Iustin Pop | " existing node %s" % existing_node.name)
|
1253 | a8083063 | Iustin Pop | |
1254 | a8083063 | Iustin Pop | # check that the type of the node (single versus dual homed) is the
|
1255 | a8083063 | Iustin Pop | # same as for the master
|
1256 | a8083063 | Iustin Pop | myself = cfg.GetNodeInfo(cfg.GetMaster()) |
1257 | a8083063 | Iustin Pop | master_singlehomed = myself.secondary_ip == myself.primary_ip |
1258 | a8083063 | Iustin Pop | newbie_singlehomed = secondary_ip == primary_ip |
1259 | a8083063 | Iustin Pop | if master_singlehomed != newbie_singlehomed:
|
1260 | a8083063 | Iustin Pop | if master_singlehomed:
|
1261 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("The master has no private ip but the" |
1262 | a8083063 | Iustin Pop | " new node has one")
|
1263 | a8083063 | Iustin Pop | else:
|
1264 | a8083063 | Iustin Pop | raise errors.OpPrereqError ("The master has a private ip but the" |
1265 | a8083063 | Iustin Pop | " new node doesn't have one")
|
1266 | a8083063 | Iustin Pop | |
1267 | a8083063 | Iustin Pop | # checks reachablity
|
1268 | a8083063 | Iustin Pop | command = ["fping", "-q", primary_ip] |
1269 | a8083063 | Iustin Pop | result = utils.RunCmd(command) |
1270 | a8083063 | Iustin Pop | if result.failed:
|
1271 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node not reachable by ping") |
1272 | a8083063 | Iustin Pop | |
1273 | a8083063 | Iustin Pop | if not newbie_singlehomed: |
1274 | a8083063 | Iustin Pop | # check reachability from my secondary ip to newbie's secondary ip
|
1275 | a8083063 | Iustin Pop | command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip] |
1276 | a8083063 | Iustin Pop | result = utils.RunCmd(command) |
1277 | a8083063 | Iustin Pop | if result.failed:
|
1278 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node secondary ip not reachable by ping") |
1279 | a8083063 | Iustin Pop | |
1280 | a8083063 | Iustin Pop | self.new_node = objects.Node(name=node,
|
1281 | a8083063 | Iustin Pop | primary_ip=primary_ip, |
1282 | a8083063 | Iustin Pop | secondary_ip=secondary_ip) |
1283 | a8083063 | Iustin Pop | |
1284 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1285 | a8083063 | Iustin Pop | """Adds the new node to the cluster.
|
1286 | a8083063 | Iustin Pop |
|
1287 | a8083063 | Iustin Pop | """
|
1288 | a8083063 | Iustin Pop | new_node = self.new_node
|
1289 | a8083063 | Iustin Pop | node = new_node.name |
1290 | a8083063 | Iustin Pop | |
1291 | a8083063 | Iustin Pop | # set up inter-node password and certificate and restarts the node daemon
|
1292 | a8083063 | Iustin Pop | gntpass = self.sstore.GetNodeDaemonPassword()
|
1293 | a8083063 | Iustin Pop | if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass): |
1294 | a8083063 | Iustin Pop | raise errors.OpExecError, ("ganeti password corruption detected") |
1295 | a8083063 | Iustin Pop | f = open(constants.SSL_CERT_FILE)
|
1296 | a8083063 | Iustin Pop | try:
|
1297 | a8083063 | Iustin Pop | gntpem = f.read(8192)
|
1298 | a8083063 | Iustin Pop | finally:
|
1299 | a8083063 | Iustin Pop | f.close() |
1300 | a8083063 | Iustin Pop | # in the base64 pem encoding, neither '!' nor '.' are valid chars,
|
1301 | a8083063 | Iustin Pop | # so we use this to detect an invalid certificate; as long as the
|
1302 | a8083063 | Iustin Pop | # cert doesn't contain this, the here-document will be correctly
|
1303 | a8083063 | Iustin Pop | # parsed by the shell sequence below
|
1304 | a8083063 | Iustin Pop | if re.search('^!EOF\.', gntpem, re.MULTILINE): |
1305 | a8083063 | Iustin Pop | raise errors.OpExecError, ("invalid PEM encoding in the SSL certificate") |
1306 | a8083063 | Iustin Pop | if not gntpem.endswith("\n"): |
1307 | a8083063 | Iustin Pop | raise errors.OpExecError, ("PEM must end with newline") |
1308 | a8083063 | Iustin Pop | logger.Info("copy cluster pass to %s and starting the node daemon" % node)
|
1309 | a8083063 | Iustin Pop | |
1310 | a8083063 | Iustin Pop | # remove first the root's known_hosts file
|
1311 | a8083063 | Iustin Pop | utils.RemoveFile("/root/.ssh/known_hosts")
|
1312 | a8083063 | Iustin Pop | # and then connect with ssh to set password and start ganeti-noded
|
1313 | a8083063 | Iustin Pop | # note that all the below variables are sanitized at this point,
|
1314 | a8083063 | Iustin Pop | # either by being constants or by the checks above
|
1315 | a8083063 | Iustin Pop | ss = self.sstore
|
1316 | a8083063 | Iustin Pop | mycommand = ("umask 077 && "
|
1317 | a8083063 | Iustin Pop | "echo '%s' > '%s' && "
|
1318 | a8083063 | Iustin Pop | "cat > '%s' << '!EOF.' && \n"
|
1319 | a8083063 | Iustin Pop | "%s!EOF.\n%s restart" %
|
1320 | a8083063 | Iustin Pop | (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS), |
1321 | a8083063 | Iustin Pop | constants.SSL_CERT_FILE, gntpem, |
1322 | a8083063 | Iustin Pop | constants.NODE_INITD_SCRIPT)) |
1323 | a8083063 | Iustin Pop | |
1324 | a8083063 | Iustin Pop | result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True) |
1325 | a8083063 | Iustin Pop | if result.failed:
|
1326 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Remote command on node %s, error: %s," |
1327 | a8083063 | Iustin Pop | " output: %s" %
|
1328 | a8083063 | Iustin Pop | (node, result.fail_reason, result.output)) |
1329 | a8083063 | Iustin Pop | |
1330 | a8083063 | Iustin Pop | # check connectivity
|
1331 | a8083063 | Iustin Pop | time.sleep(4)
|
1332 | a8083063 | Iustin Pop | |
1333 | a8083063 | Iustin Pop | result = rpc.call_version([node])[node] |
1334 | a8083063 | Iustin Pop | if result:
|
1335 | a8083063 | Iustin Pop | if constants.PROTOCOL_VERSION == result:
|
1336 | a8083063 | Iustin Pop | logger.Info("communication to node %s fine, sw version %s match" %
|
1337 | a8083063 | Iustin Pop | (node, result)) |
1338 | a8083063 | Iustin Pop | else:
|
1339 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Version mismatch master version %s," |
1340 | a8083063 | Iustin Pop | " node version %s" %
|
1341 | a8083063 | Iustin Pop | (constants.PROTOCOL_VERSION, result)) |
1342 | a8083063 | Iustin Pop | else:
|
1343 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Cannot get version from the new node") |
1344 | a8083063 | Iustin Pop | |
1345 | a8083063 | Iustin Pop | # setup ssh on node
|
1346 | a8083063 | Iustin Pop | logger.Info("copy ssh key to node %s" % node)
|
1347 | a8083063 | Iustin Pop | keyarray = [] |
1348 | a8083063 | Iustin Pop | keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub", |
1349 | a8083063 | Iustin Pop | "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub", |
1350 | a8083063 | Iustin Pop | "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"] |
1351 | a8083063 | Iustin Pop | |
1352 | a8083063 | Iustin Pop | for i in keyfiles: |
1353 | a8083063 | Iustin Pop | f = open(i, 'r') |
1354 | a8083063 | Iustin Pop | try:
|
1355 | a8083063 | Iustin Pop | keyarray.append(f.read()) |
1356 | a8083063 | Iustin Pop | finally:
|
1357 | a8083063 | Iustin Pop | f.close() |
1358 | a8083063 | Iustin Pop | |
1359 | a8083063 | Iustin Pop | result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2], |
1360 | a8083063 | Iustin Pop | keyarray[3], keyarray[4], keyarray[5]) |
1361 | a8083063 | Iustin Pop | |
1362 | a8083063 | Iustin Pop | if not result: |
1363 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Cannot transfer ssh keys to the new node") |
1364 | a8083063 | Iustin Pop | |
1365 | a8083063 | Iustin Pop | # Add node to our /etc/hosts, and add key to known_hosts
|
1366 | a8083063 | Iustin Pop | _UpdateEtcHosts(new_node.name, new_node.primary_ip) |
1367 | a8083063 | Iustin Pop | _UpdateKnownHosts(new_node.name, new_node.primary_ip, |
1368 | a8083063 | Iustin Pop | self.cfg.GetHostKey())
|
1369 | a8083063 | Iustin Pop | |
1370 | a8083063 | Iustin Pop | if new_node.secondary_ip != new_node.primary_ip:
|
1371 | a8083063 | Iustin Pop | result = ssh.SSHCall(node, "root",
|
1372 | a8083063 | Iustin Pop | "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
|
1373 | a8083063 | Iustin Pop | if result.failed:
|
1374 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Node claims it doesn't have the" |
1375 | a8083063 | Iustin Pop | " secondary ip you gave (%s).\n"
|
1376 | a8083063 | Iustin Pop | "Please fix and re-run this command." %
|
1377 | a8083063 | Iustin Pop | new_node.secondary_ip) |
1378 | a8083063 | Iustin Pop | |
1379 | a8083063 | Iustin Pop | # Distribute updated /etc/hosts and known_hosts to all nodes,
|
1380 | a8083063 | Iustin Pop | # including the node just added
|
1381 | a8083063 | Iustin Pop | myself = self.cfg.GetNodeInfo(self.cfg.GetMaster()) |
1382 | a8083063 | Iustin Pop | dist_nodes = self.cfg.GetNodeList() + [node]
|
1383 | a8083063 | Iustin Pop | if myself.name in dist_nodes: |
1384 | a8083063 | Iustin Pop | dist_nodes.remove(myself.name) |
1385 | a8083063 | Iustin Pop | |
1386 | a8083063 | Iustin Pop | logger.Debug("Copying hosts and known_hosts to all nodes")
|
1387 | a8083063 | Iustin Pop | for fname in ("/etc/hosts", "/etc/ssh/ssh_known_hosts"): |
1388 | a8083063 | Iustin Pop | result = rpc.call_upload_file(dist_nodes, fname) |
1389 | a8083063 | Iustin Pop | for to_node in dist_nodes: |
1390 | a8083063 | Iustin Pop | if not result[to_node]: |
1391 | a8083063 | Iustin Pop | logger.Error("copy of file %s to node %s failed" %
|
1392 | a8083063 | Iustin Pop | (fname, to_node)) |
1393 | a8083063 | Iustin Pop | |
1394 | a8083063 | Iustin Pop | to_copy = [constants.MASTER_CRON_FILE, |
1395 | a8083063 | Iustin Pop | constants.MASTER_INITD_SCRIPT, |
1396 | a8083063 | Iustin Pop | constants.CLUSTER_NAME_FILE] |
1397 | a8083063 | Iustin Pop | to_copy.extend(ss.GetFileList()) |
1398 | a8083063 | Iustin Pop | for fname in to_copy: |
1399 | a8083063 | Iustin Pop | if not ssh.CopyFileToNode(node, fname): |
1400 | a8083063 | Iustin Pop | logger.Error("could not copy file %s to node %s" % (fname, node))
|
1401 | a8083063 | Iustin Pop | |
1402 | a8083063 | Iustin Pop | logger.Info("adding node %s to cluster.conf" % node)
|
1403 | a8083063 | Iustin Pop | self.cfg.AddNode(new_node)
|
1404 | a8083063 | Iustin Pop | |
1405 | a8083063 | Iustin Pop | |
1406 | a8083063 | Iustin Pop | class LUMasterFailover(LogicalUnit): |
1407 | a8083063 | Iustin Pop | """Failover the master node to the current node.
|
1408 | a8083063 | Iustin Pop |
|
1409 | a8083063 | Iustin Pop | This is a special LU in that it must run on a non-master node.
|
1410 | a8083063 | Iustin Pop |
|
1411 | a8083063 | Iustin Pop | """
|
1412 | a8083063 | Iustin Pop | HPATH = "master-failover"
|
1413 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_CLUSTER |
1414 | a8083063 | Iustin Pop | REQ_MASTER = False
|
1415 | a8083063 | Iustin Pop | _OP_REQP = [] |
1416 | a8083063 | Iustin Pop | |
1417 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
1418 | a8083063 | Iustin Pop | """Build hooks env.
|
1419 | a8083063 | Iustin Pop |
|
1420 | a8083063 | Iustin Pop | This will run on the new master only in the pre phase, and on all
|
1421 | a8083063 | Iustin Pop | the nodes in the post phase.
|
1422 | a8083063 | Iustin Pop |
|
1423 | a8083063 | Iustin Pop | """
|
1424 | a8083063 | Iustin Pop | env = { |
1425 | a8083063 | Iustin Pop | "NEW_MASTER": self.new_master, |
1426 | a8083063 | Iustin Pop | "OLD_MASTER": self.old_master, |
1427 | a8083063 | Iustin Pop | } |
1428 | a8083063 | Iustin Pop | return env, [self.new_master], self.cfg.GetNodeList() |
1429 | a8083063 | Iustin Pop | |
1430 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1431 | a8083063 | Iustin Pop | """Check prerequisites.
|
1432 | a8083063 | Iustin Pop |
|
1433 | a8083063 | Iustin Pop | This checks that we are not already the master.
|
1434 | a8083063 | Iustin Pop |
|
1435 | a8083063 | Iustin Pop | """
|
1436 | a8083063 | Iustin Pop | self.new_master = socket.gethostname()
|
1437 | a8083063 | Iustin Pop | |
1438 | a8083063 | Iustin Pop | self.old_master = self.cfg.GetMaster() |
1439 | a8083063 | Iustin Pop | |
1440 | a8083063 | Iustin Pop | if self.old_master == self.new_master: |
1441 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("This commands must be run on the node" |
1442 | a8083063 | Iustin Pop | " where you want the new master to be.\n"
|
1443 | a8083063 | Iustin Pop | "%s is already the master" %
|
1444 | a8083063 | Iustin Pop | self.old_master)
|
1445 | a8083063 | Iustin Pop | |
1446 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1447 | a8083063 | Iustin Pop | """Failover the master node.
|
1448 | a8083063 | Iustin Pop |
|
1449 | a8083063 | Iustin Pop | This command, when run on a non-master node, will cause the current
|
1450 | a8083063 | Iustin Pop | master to cease being master, and the non-master to become new
|
1451 | a8083063 | Iustin Pop | master.
|
1452 | a8083063 | Iustin Pop |
|
1453 | a8083063 | Iustin Pop | """
|
1454 | a8083063 | Iustin Pop | |
1455 | a8083063 | Iustin Pop | #TODO: do not rely on gethostname returning the FQDN
|
1456 | a8083063 | Iustin Pop | logger.Info("setting master to %s, old master: %s" %
|
1457 | a8083063 | Iustin Pop | (self.new_master, self.old_master)) |
1458 | a8083063 | Iustin Pop | |
1459 | a8083063 | Iustin Pop | if not rpc.call_node_stop_master(self.old_master): |
1460 | a8083063 | Iustin Pop | logger.Error("could disable the master role on the old master"
|
1461 | a8083063 | Iustin Pop | " %s, please disable manually" % self.old_master) |
1462 | a8083063 | Iustin Pop | |
1463 | a8083063 | Iustin Pop | if not rpc.call_node_start_master(self.new_master): |
1464 | a8083063 | Iustin Pop | logger.Error("could not start the master role on the new master"
|
1465 | a8083063 | Iustin Pop | " %s, please check" % self.new_master) |
1466 | a8083063 | Iustin Pop | |
1467 | a8083063 | Iustin Pop | self.cfg.SetMaster(self.new_master) |
1468 | a8083063 | Iustin Pop | |
1469 | a8083063 | Iustin Pop | |
1470 | a8083063 | Iustin Pop | class LUQueryClusterInfo(NoHooksLU): |
1471 | a8083063 | Iustin Pop | """Query cluster configuration.
|
1472 | a8083063 | Iustin Pop |
|
1473 | a8083063 | Iustin Pop | """
|
1474 | a8083063 | Iustin Pop | _OP_REQP = [] |
1475 | a8083063 | Iustin Pop | |
1476 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1477 | a8083063 | Iustin Pop | """No prerequsites needed for this LU.
|
1478 | a8083063 | Iustin Pop |
|
1479 | a8083063 | Iustin Pop | """
|
1480 | a8083063 | Iustin Pop | pass
|
1481 | a8083063 | Iustin Pop | |
1482 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1483 | a8083063 | Iustin Pop | """Return cluster config.
|
1484 | a8083063 | Iustin Pop |
|
1485 | a8083063 | Iustin Pop | """
|
1486 | a8083063 | Iustin Pop | instances = [self.cfg.GetInstanceInfo(name)
|
1487 | a8083063 | Iustin Pop | for name in self.cfg.GetInstanceList()] |
1488 | a8083063 | Iustin Pop | result = { |
1489 | a8083063 | Iustin Pop | "name": self.cfg.GetClusterName(), |
1490 | a8083063 | Iustin Pop | "software_version": constants.RELEASE_VERSION,
|
1491 | a8083063 | Iustin Pop | "protocol_version": constants.PROTOCOL_VERSION,
|
1492 | a8083063 | Iustin Pop | "config_version": constants.CONFIG_VERSION,
|
1493 | a8083063 | Iustin Pop | "os_api_version": constants.OS_API_VERSION,
|
1494 | a8083063 | Iustin Pop | "export_version": constants.EXPORT_VERSION,
|
1495 | a8083063 | Iustin Pop | "master": self.cfg.GetMaster(), |
1496 | a8083063 | Iustin Pop | "architecture": (platform.architecture()[0], platform.machine()), |
1497 | a8083063 | Iustin Pop | "instances": [(instance.name, instance.primary_node)
|
1498 | a8083063 | Iustin Pop | for instance in instances], |
1499 | a8083063 | Iustin Pop | "nodes": self.cfg.GetNodeList(), |
1500 | a8083063 | Iustin Pop | } |
1501 | a8083063 | Iustin Pop | |
1502 | a8083063 | Iustin Pop | return result
|
1503 | a8083063 | Iustin Pop | |
1504 | a8083063 | Iustin Pop | |
1505 | a8083063 | Iustin Pop | class LUClusterCopyFile(NoHooksLU): |
1506 | a8083063 | Iustin Pop | """Copy file to cluster.
|
1507 | a8083063 | Iustin Pop |
|
1508 | a8083063 | Iustin Pop | """
|
1509 | a8083063 | Iustin Pop | _OP_REQP = ["nodes", "filename"] |
1510 | a8083063 | Iustin Pop | |
1511 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1512 | a8083063 | Iustin Pop | """Check prerequisites.
|
1513 | a8083063 | Iustin Pop |
|
1514 | a8083063 | Iustin Pop | It should check that the named file exists and that the given list
|
1515 | a8083063 | Iustin Pop | of nodes is valid.
|
1516 | a8083063 | Iustin Pop |
|
1517 | a8083063 | Iustin Pop | """
|
1518 | a8083063 | Iustin Pop | if not os.path.exists(self.op.filename): |
1519 | a8083063 | Iustin Pop | raise errors.OpPrereqError("No such filename '%s'" % self.op.filename) |
1520 | a8083063 | Iustin Pop | if self.op.nodes: |
1521 | a8083063 | Iustin Pop | nodes = self.op.nodes
|
1522 | a8083063 | Iustin Pop | else:
|
1523 | a8083063 | Iustin Pop | nodes = self.cfg.GetNodeList()
|
1524 | a8083063 | Iustin Pop | self.nodes = []
|
1525 | a8083063 | Iustin Pop | for node in nodes: |
1526 | a8083063 | Iustin Pop | nname = self.cfg.ExpandNodeName(node)
|
1527 | a8083063 | Iustin Pop | if nname is None: |
1528 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node '%s' is unknown." % node) |
1529 | a8083063 | Iustin Pop | self.nodes.append(nname)
|
1530 | a8083063 | Iustin Pop | |
1531 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1532 | a8083063 | Iustin Pop | """Copy a file from master to some nodes.
|
1533 | a8083063 | Iustin Pop |
|
1534 | a8083063 | Iustin Pop | Args:
|
1535 | a8083063 | Iustin Pop | opts - class with options as members
|
1536 | a8083063 | Iustin Pop | args - list containing a single element, the file name
|
1537 | a8083063 | Iustin Pop | Opts used:
|
1538 | a8083063 | Iustin Pop | nodes - list containing the name of target nodes; if empty, all nodes
|
1539 | a8083063 | Iustin Pop |
|
1540 | a8083063 | Iustin Pop | """
|
1541 | a8083063 | Iustin Pop | filename = self.op.filename
|
1542 | a8083063 | Iustin Pop | |
1543 | a8083063 | Iustin Pop | myname = socket.gethostname() |
1544 | a8083063 | Iustin Pop | |
1545 | a8083063 | Iustin Pop | for node in self.nodes: |
1546 | a8083063 | Iustin Pop | if node == myname:
|
1547 | a8083063 | Iustin Pop | continue
|
1548 | a8083063 | Iustin Pop | if not ssh.CopyFileToNode(node, filename): |
1549 | a8083063 | Iustin Pop | logger.Error("Copy of file %s to node %s failed" % (filename, node))
|
1550 | a8083063 | Iustin Pop | |
1551 | a8083063 | Iustin Pop | |
1552 | a8083063 | Iustin Pop | class LUDumpClusterConfig(NoHooksLU): |
1553 | a8083063 | Iustin Pop | """Return a text-representation of the cluster-config.
|
1554 | a8083063 | Iustin Pop |
|
1555 | a8083063 | Iustin Pop | """
|
1556 | a8083063 | Iustin Pop | _OP_REQP = [] |
1557 | a8083063 | Iustin Pop | |
1558 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1559 | a8083063 | Iustin Pop | """No prerequisites.
|
1560 | a8083063 | Iustin Pop |
|
1561 | a8083063 | Iustin Pop | """
|
1562 | a8083063 | Iustin Pop | pass
|
1563 | a8083063 | Iustin Pop | |
1564 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1565 | a8083063 | Iustin Pop | """Dump a representation of the cluster config to the standard output.
|
1566 | a8083063 | Iustin Pop |
|
1567 | a8083063 | Iustin Pop | """
|
1568 | a8083063 | Iustin Pop | return self.cfg.DumpConfig() |
1569 | a8083063 | Iustin Pop | |
1570 | a8083063 | Iustin Pop | |
1571 | a8083063 | Iustin Pop | class LURunClusterCommand(NoHooksLU): |
1572 | a8083063 | Iustin Pop | """Run a command on some nodes.
|
1573 | a8083063 | Iustin Pop |
|
1574 | a8083063 | Iustin Pop | """
|
1575 | a8083063 | Iustin Pop | _OP_REQP = ["command", "nodes"] |
1576 | a8083063 | Iustin Pop | |
1577 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1578 | a8083063 | Iustin Pop | """Check prerequisites.
|
1579 | a8083063 | Iustin Pop |
|
1580 | a8083063 | Iustin Pop | It checks that the given list of nodes is valid.
|
1581 | a8083063 | Iustin Pop |
|
1582 | a8083063 | Iustin Pop | """
|
1583 | a8083063 | Iustin Pop | if self.op.nodes: |
1584 | a8083063 | Iustin Pop | nodes = self.op.nodes
|
1585 | a8083063 | Iustin Pop | else:
|
1586 | a8083063 | Iustin Pop | nodes = self.cfg.GetNodeList()
|
1587 | a8083063 | Iustin Pop | self.nodes = []
|
1588 | a8083063 | Iustin Pop | for node in nodes: |
1589 | a8083063 | Iustin Pop | nname = self.cfg.ExpandNodeName(node)
|
1590 | a8083063 | Iustin Pop | if nname is None: |
1591 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node '%s' is unknown." % node) |
1592 | a8083063 | Iustin Pop | self.nodes.append(nname)
|
1593 | a8083063 | Iustin Pop | |
1594 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1595 | a8083063 | Iustin Pop | """Run a command on some nodes.
|
1596 | a8083063 | Iustin Pop |
|
1597 | a8083063 | Iustin Pop | """
|
1598 | a8083063 | Iustin Pop | data = [] |
1599 | a8083063 | Iustin Pop | for node in self.nodes: |
1600 | a8083063 | Iustin Pop | result = utils.RunCmd(["ssh", node, self.op.command]) |
1601 | a8083063 | Iustin Pop | data.append((node, result.cmd, result.output, result.exit_code)) |
1602 | a8083063 | Iustin Pop | |
1603 | a8083063 | Iustin Pop | return data
|
1604 | a8083063 | Iustin Pop | |
1605 | a8083063 | Iustin Pop | |
1606 | a8083063 | Iustin Pop | class LUActivateInstanceDisks(NoHooksLU): |
1607 | a8083063 | Iustin Pop | """Bring up an instance's disks.
|
1608 | a8083063 | Iustin Pop |
|
1609 | a8083063 | Iustin Pop | """
|
1610 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name"]
|
1611 | a8083063 | Iustin Pop | |
1612 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1613 | a8083063 | Iustin Pop | """Check prerequisites.
|
1614 | a8083063 | Iustin Pop |
|
1615 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
1616 | a8083063 | Iustin Pop |
|
1617 | a8083063 | Iustin Pop | """
|
1618 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
1619 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
1620 | a8083063 | Iustin Pop | if instance is None: |
1621 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
1622 | a8083063 | Iustin Pop | self.op.instance_name)
|
1623 | a8083063 | Iustin Pop | self.instance = instance
|
1624 | a8083063 | Iustin Pop | |
1625 | a8083063 | Iustin Pop | |
1626 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1627 | a8083063 | Iustin Pop | """Activate the disks.
|
1628 | a8083063 | Iustin Pop |
|
1629 | a8083063 | Iustin Pop | """
|
1630 | a8083063 | Iustin Pop | disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg) |
1631 | a8083063 | Iustin Pop | if not disks_ok: |
1632 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Cannot activate block devices") |
1633 | a8083063 | Iustin Pop | |
1634 | a8083063 | Iustin Pop | return disks_info
|
1635 | a8083063 | Iustin Pop | |
1636 | a8083063 | Iustin Pop | |
1637 | a8083063 | Iustin Pop | def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False): |
1638 | a8083063 | Iustin Pop | """Prepare the block devices for an instance.
|
1639 | a8083063 | Iustin Pop |
|
1640 | a8083063 | Iustin Pop | This sets up the block devices on all nodes.
|
1641 | a8083063 | Iustin Pop |
|
1642 | a8083063 | Iustin Pop | Args:
|
1643 | a8083063 | Iustin Pop | instance: a ganeti.objects.Instance object
|
1644 | a8083063 | Iustin Pop | ignore_secondaries: if true, errors on secondary nodes won't result
|
1645 | a8083063 | Iustin Pop | in an error return from the function
|
1646 | a8083063 | Iustin Pop |
|
1647 | a8083063 | Iustin Pop | Returns:
|
1648 | a8083063 | Iustin Pop | false if the operation failed
|
1649 | a8083063 | Iustin Pop | list of (host, instance_visible_name, node_visible_name) if the operation
|
1650 | a8083063 | Iustin Pop | suceeded with the mapping from node devices to instance devices
|
1651 | a8083063 | Iustin Pop | """
|
1652 | a8083063 | Iustin Pop | device_info = [] |
1653 | a8083063 | Iustin Pop | disks_ok = True
|
1654 | a8083063 | Iustin Pop | for inst_disk in instance.disks: |
1655 | a8083063 | Iustin Pop | master_result = None
|
1656 | a8083063 | Iustin Pop | for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node): |
1657 | a8083063 | Iustin Pop | cfg.SetDiskID(node_disk, node) |
1658 | a8083063 | Iustin Pop | is_primary = node == instance.primary_node |
1659 | a8083063 | Iustin Pop | result = rpc.call_blockdev_assemble(node, node_disk, is_primary) |
1660 | a8083063 | Iustin Pop | if not result: |
1661 | a8083063 | Iustin Pop | logger.Error("could not prepare block device %s on node %s (is_pri"
|
1662 | a8083063 | Iustin Pop | "mary=%s)" % (inst_disk.iv_name, node, is_primary))
|
1663 | a8083063 | Iustin Pop | if is_primary or not ignore_secondaries: |
1664 | a8083063 | Iustin Pop | disks_ok = False
|
1665 | a8083063 | Iustin Pop | if is_primary:
|
1666 | a8083063 | Iustin Pop | master_result = result |
1667 | a8083063 | Iustin Pop | device_info.append((instance.primary_node, inst_disk.iv_name, |
1668 | a8083063 | Iustin Pop | master_result)) |
1669 | a8083063 | Iustin Pop | |
1670 | a8083063 | Iustin Pop | return disks_ok, device_info
|
1671 | a8083063 | Iustin Pop | |
1672 | a8083063 | Iustin Pop | |
1673 | a8083063 | Iustin Pop | class LUDeactivateInstanceDisks(NoHooksLU): |
1674 | a8083063 | Iustin Pop | """Shutdown an instance's disks.
|
1675 | a8083063 | Iustin Pop |
|
1676 | a8083063 | Iustin Pop | """
|
1677 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name"]
|
1678 | a8083063 | Iustin Pop | |
1679 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1680 | a8083063 | Iustin Pop | """Check prerequisites.
|
1681 | a8083063 | Iustin Pop |
|
1682 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
1683 | a8083063 | Iustin Pop |
|
1684 | a8083063 | Iustin Pop | """
|
1685 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
1686 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
1687 | a8083063 | Iustin Pop | if instance is None: |
1688 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
1689 | a8083063 | Iustin Pop | self.op.instance_name)
|
1690 | a8083063 | Iustin Pop | self.instance = instance
|
1691 | a8083063 | Iustin Pop | |
1692 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1693 | a8083063 | Iustin Pop | """Deactivate the disks
|
1694 | a8083063 | Iustin Pop |
|
1695 | a8083063 | Iustin Pop | """
|
1696 | a8083063 | Iustin Pop | instance = self.instance
|
1697 | a8083063 | Iustin Pop | ins_l = rpc.call_instance_list([instance.primary_node]) |
1698 | a8083063 | Iustin Pop | ins_l = ins_l[instance.primary_node] |
1699 | a8083063 | Iustin Pop | if not type(ins_l) is list: |
1700 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Can't contact node '%s'" % |
1701 | a8083063 | Iustin Pop | instance.primary_node) |
1702 | a8083063 | Iustin Pop | |
1703 | a8083063 | Iustin Pop | if self.instance.name in ins_l: |
1704 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Instance is running, can't shutdown" |
1705 | a8083063 | Iustin Pop | " block devices.")
|
1706 | a8083063 | Iustin Pop | |
1707 | a8083063 | Iustin Pop | _ShutdownInstanceDisks(instance, self.cfg)
|
1708 | a8083063 | Iustin Pop | |
1709 | a8083063 | Iustin Pop | |
1710 | a8083063 | Iustin Pop | def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False): |
1711 | a8083063 | Iustin Pop | """Shutdown block devices of an instance.
|
1712 | a8083063 | Iustin Pop |
|
1713 | a8083063 | Iustin Pop | This does the shutdown on all nodes of the instance.
|
1714 | a8083063 | Iustin Pop |
|
1715 | a8083063 | Iustin Pop | If the ignore_primary is false, errors on the primary node are
|
1716 | a8083063 | Iustin Pop | ignored.
|
1717 | a8083063 | Iustin Pop |
|
1718 | a8083063 | Iustin Pop | """
|
1719 | a8083063 | Iustin Pop | result = True
|
1720 | a8083063 | Iustin Pop | for disk in instance.disks: |
1721 | a8083063 | Iustin Pop | for node, top_disk in disk.ComputeNodeTree(instance.primary_node): |
1722 | a8083063 | Iustin Pop | cfg.SetDiskID(top_disk, node) |
1723 | a8083063 | Iustin Pop | if not rpc.call_blockdev_shutdown(node, top_disk): |
1724 | a8083063 | Iustin Pop | logger.Error("could not shutdown block device %s on node %s" %
|
1725 | a8083063 | Iustin Pop | (disk.iv_name, node)) |
1726 | a8083063 | Iustin Pop | if not ignore_primary or node != instance.primary_node: |
1727 | a8083063 | Iustin Pop | result = False
|
1728 | a8083063 | Iustin Pop | return result
|
1729 | a8083063 | Iustin Pop | |
1730 | a8083063 | Iustin Pop | |
1731 | a8083063 | Iustin Pop | class LUStartupInstance(LogicalUnit): |
1732 | a8083063 | Iustin Pop | """Starts an instance.
|
1733 | a8083063 | Iustin Pop |
|
1734 | a8083063 | Iustin Pop | """
|
1735 | a8083063 | Iustin Pop | HPATH = "instance-start"
|
1736 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
1737 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name", "force"] |
1738 | a8083063 | Iustin Pop | |
1739 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
1740 | a8083063 | Iustin Pop | """Build hooks env.
|
1741 | a8083063 | Iustin Pop |
|
1742 | a8083063 | Iustin Pop | This runs on master, primary and secondary nodes of the instance.
|
1743 | a8083063 | Iustin Pop |
|
1744 | a8083063 | Iustin Pop | """
|
1745 | a8083063 | Iustin Pop | env = { |
1746 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
1747 | a8083063 | Iustin Pop | "INSTANCE_PRIMARY": self.instance.primary_node, |
1748 | a8083063 | Iustin Pop | "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes), |
1749 | a8083063 | Iustin Pop | "FORCE": self.op.force, |
1750 | a8083063 | Iustin Pop | } |
1751 | a8083063 | Iustin Pop | nl = ([self.cfg.GetMaster(), self.instance.primary_node] + |
1752 | a8083063 | Iustin Pop | list(self.instance.secondary_nodes)) |
1753 | a8083063 | Iustin Pop | return env, nl, nl
|
1754 | a8083063 | Iustin Pop | |
1755 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1756 | a8083063 | Iustin Pop | """Check prerequisites.
|
1757 | a8083063 | Iustin Pop |
|
1758 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
1759 | a8083063 | Iustin Pop |
|
1760 | a8083063 | Iustin Pop | """
|
1761 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
1762 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
1763 | a8083063 | Iustin Pop | if instance is None: |
1764 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
1765 | a8083063 | Iustin Pop | self.op.instance_name)
|
1766 | a8083063 | Iustin Pop | |
1767 | a8083063 | Iustin Pop | # check bridges existance
|
1768 | a8083063 | Iustin Pop | brlist = [nic.bridge for nic in instance.nics] |
1769 | a8083063 | Iustin Pop | if not rpc.call_bridges_exist(instance.primary_node, brlist): |
1770 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("one or more target bridges %s does not" |
1771 | a8083063 | Iustin Pop | " exist on destination node '%s'" %
|
1772 | a8083063 | Iustin Pop | (brlist, instance.primary_node)) |
1773 | a8083063 | Iustin Pop | |
1774 | a8083063 | Iustin Pop | self.instance = instance
|
1775 | a8083063 | Iustin Pop | self.op.instance_name = instance.name
|
1776 | a8083063 | Iustin Pop | |
1777 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1778 | a8083063 | Iustin Pop | """Start the instance.
|
1779 | a8083063 | Iustin Pop |
|
1780 | a8083063 | Iustin Pop | """
|
1781 | a8083063 | Iustin Pop | instance = self.instance
|
1782 | a8083063 | Iustin Pop | force = self.op.force
|
1783 | a8083063 | Iustin Pop | extra_args = getattr(self.op, "extra_args", "") |
1784 | a8083063 | Iustin Pop | |
1785 | a8083063 | Iustin Pop | node_current = instance.primary_node |
1786 | a8083063 | Iustin Pop | |
1787 | a8083063 | Iustin Pop | nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
|
1788 | a8083063 | Iustin Pop | if not nodeinfo: |
1789 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Could not contact node %s for infos" % |
1790 | a8083063 | Iustin Pop | (node_current)) |
1791 | a8083063 | Iustin Pop | |
1792 | a8083063 | Iustin Pop | freememory = nodeinfo[node_current]['memory_free']
|
1793 | a8083063 | Iustin Pop | memory = instance.memory |
1794 | a8083063 | Iustin Pop | if memory > freememory:
|
1795 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Not enough memory to start instance" |
1796 | a8083063 | Iustin Pop | " %s on node %s"
|
1797 | a8083063 | Iustin Pop | " needed %s MiB, available %s MiB" %
|
1798 | a8083063 | Iustin Pop | (instance.name, node_current, memory, |
1799 | a8083063 | Iustin Pop | freememory)) |
1800 | a8083063 | Iustin Pop | |
1801 | a8083063 | Iustin Pop | disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
|
1802 | a8083063 | Iustin Pop | ignore_secondaries=force) |
1803 | a8083063 | Iustin Pop | if not disks_ok: |
1804 | a8083063 | Iustin Pop | _ShutdownInstanceDisks(instance, self.cfg)
|
1805 | a8083063 | Iustin Pop | if not force: |
1806 | a8083063 | Iustin Pop | logger.Error("If the message above refers to a secondary node,"
|
1807 | a8083063 | Iustin Pop | " you can retry the operation using '--force'.")
|
1808 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Disk consistency error") |
1809 | a8083063 | Iustin Pop | |
1810 | a8083063 | Iustin Pop | if not rpc.call_instance_start(node_current, instance, extra_args): |
1811 | a8083063 | Iustin Pop | _ShutdownInstanceDisks(instance, self.cfg)
|
1812 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Could not start instance") |
1813 | a8083063 | Iustin Pop | |
1814 | a8083063 | Iustin Pop | self.cfg.MarkInstanceUp(instance.name)
|
1815 | a8083063 | Iustin Pop | |
1816 | a8083063 | Iustin Pop | |
1817 | a8083063 | Iustin Pop | class LUShutdownInstance(LogicalUnit): |
1818 | a8083063 | Iustin Pop | """Shutdown an instance.
|
1819 | a8083063 | Iustin Pop |
|
1820 | a8083063 | Iustin Pop | """
|
1821 | a8083063 | Iustin Pop | HPATH = "instance-stop"
|
1822 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
1823 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name"]
|
1824 | a8083063 | Iustin Pop | |
1825 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
1826 | a8083063 | Iustin Pop | """Build hooks env.
|
1827 | a8083063 | Iustin Pop |
|
1828 | a8083063 | Iustin Pop | This runs on master, primary and secondary nodes of the instance.
|
1829 | a8083063 | Iustin Pop |
|
1830 | a8083063 | Iustin Pop | """
|
1831 | a8083063 | Iustin Pop | env = { |
1832 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
1833 | a8083063 | Iustin Pop | "INSTANCE_PRIMARY": self.instance.primary_node, |
1834 | a8083063 | Iustin Pop | "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes), |
1835 | a8083063 | Iustin Pop | } |
1836 | a8083063 | Iustin Pop | nl = ([self.cfg.GetMaster(), self.instance.primary_node] + |
1837 | a8083063 | Iustin Pop | list(self.instance.secondary_nodes)) |
1838 | a8083063 | Iustin Pop | return env, nl, nl
|
1839 | a8083063 | Iustin Pop | |
1840 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1841 | a8083063 | Iustin Pop | """Check prerequisites.
|
1842 | a8083063 | Iustin Pop |
|
1843 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
1844 | a8083063 | Iustin Pop |
|
1845 | a8083063 | Iustin Pop | """
|
1846 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
1847 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
1848 | a8083063 | Iustin Pop | if instance is None: |
1849 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
1850 | a8083063 | Iustin Pop | self.op.instance_name)
|
1851 | a8083063 | Iustin Pop | self.instance = instance
|
1852 | a8083063 | Iustin Pop | |
1853 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1854 | a8083063 | Iustin Pop | """Shutdown the instance.
|
1855 | a8083063 | Iustin Pop |
|
1856 | a8083063 | Iustin Pop | """
|
1857 | a8083063 | Iustin Pop | instance = self.instance
|
1858 | a8083063 | Iustin Pop | node_current = instance.primary_node |
1859 | a8083063 | Iustin Pop | if not rpc.call_instance_shutdown(node_current, instance): |
1860 | a8083063 | Iustin Pop | logger.Error("could not shutdown instance")
|
1861 | a8083063 | Iustin Pop | |
1862 | a8083063 | Iustin Pop | self.cfg.MarkInstanceDown(instance.name)
|
1863 | a8083063 | Iustin Pop | _ShutdownInstanceDisks(instance, self.cfg)
|
1864 | a8083063 | Iustin Pop | |
1865 | a8083063 | Iustin Pop | |
1866 | a8083063 | Iustin Pop | class LURemoveInstance(LogicalUnit): |
1867 | a8083063 | Iustin Pop | """Remove an instance.
|
1868 | a8083063 | Iustin Pop |
|
1869 | a8083063 | Iustin Pop | """
|
1870 | a8083063 | Iustin Pop | HPATH = "instance-remove"
|
1871 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
1872 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name"]
|
1873 | a8083063 | Iustin Pop | |
1874 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
1875 | a8083063 | Iustin Pop | """Build hooks env.
|
1876 | a8083063 | Iustin Pop |
|
1877 | a8083063 | Iustin Pop | This runs on master, primary and secondary nodes of the instance.
|
1878 | a8083063 | Iustin Pop |
|
1879 | a8083063 | Iustin Pop | """
|
1880 | a8083063 | Iustin Pop | env = { |
1881 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
1882 | a8083063 | Iustin Pop | "INSTANCE_PRIMARY": self.instance.primary_node, |
1883 | a8083063 | Iustin Pop | "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes), |
1884 | a8083063 | Iustin Pop | } |
1885 | a8083063 | Iustin Pop | nl = ([self.cfg.GetMaster(), self.instance.primary_node] + |
1886 | a8083063 | Iustin Pop | list(self.instance.secondary_nodes)) |
1887 | a8083063 | Iustin Pop | return env, nl, nl
|
1888 | a8083063 | Iustin Pop | |
1889 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1890 | a8083063 | Iustin Pop | """Check prerequisites.
|
1891 | a8083063 | Iustin Pop |
|
1892 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
1893 | a8083063 | Iustin Pop |
|
1894 | a8083063 | Iustin Pop | """
|
1895 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
1896 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
1897 | a8083063 | Iustin Pop | if instance is None: |
1898 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
1899 | a8083063 | Iustin Pop | self.op.instance_name)
|
1900 | a8083063 | Iustin Pop | self.instance = instance
|
1901 | a8083063 | Iustin Pop | |
1902 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1903 | a8083063 | Iustin Pop | """Remove the instance.
|
1904 | a8083063 | Iustin Pop |
|
1905 | a8083063 | Iustin Pop | """
|
1906 | a8083063 | Iustin Pop | instance = self.instance
|
1907 | a8083063 | Iustin Pop | logger.Info("shutting down instance %s on node %s" %
|
1908 | a8083063 | Iustin Pop | (instance.name, instance.primary_node)) |
1909 | a8083063 | Iustin Pop | |
1910 | a8083063 | Iustin Pop | if not rpc.call_instance_shutdown(instance.primary_node, instance): |
1911 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Could not shutdown instance %s on node %s" % |
1912 | a8083063 | Iustin Pop | (instance.name, instance.primary_node)) |
1913 | a8083063 | Iustin Pop | |
1914 | a8083063 | Iustin Pop | logger.Info("removing block devices for instance %s" % instance.name)
|
1915 | a8083063 | Iustin Pop | |
1916 | a8083063 | Iustin Pop | _RemoveDisks(instance, self.cfg)
|
1917 | a8083063 | Iustin Pop | |
1918 | a8083063 | Iustin Pop | logger.Info("removing instance %s out of cluster config" % instance.name)
|
1919 | a8083063 | Iustin Pop | |
1920 | a8083063 | Iustin Pop | self.cfg.RemoveInstance(instance.name)
|
1921 | a8083063 | Iustin Pop | |
1922 | a8083063 | Iustin Pop | |
1923 | a8083063 | Iustin Pop | class LUQueryInstances(NoHooksLU): |
1924 | a8083063 | Iustin Pop | """Logical unit for querying instances.
|
1925 | a8083063 | Iustin Pop |
|
1926 | a8083063 | Iustin Pop | """
|
1927 | a8083063 | Iustin Pop | OP_REQP = ["output_fields"]
|
1928 | a8083063 | Iustin Pop | |
1929 | a8083063 | Iustin Pop | def CheckPrereq(self): |
1930 | a8083063 | Iustin Pop | """Check prerequisites.
|
1931 | a8083063 | Iustin Pop |
|
1932 | a8083063 | Iustin Pop | This checks that the fields required are valid output fields.
|
1933 | a8083063 | Iustin Pop |
|
1934 | a8083063 | Iustin Pop | """
|
1935 | a8083063 | Iustin Pop | |
1936 | a8083063 | Iustin Pop | self.static_fields = frozenset(["name", "os", "pnode", "snodes", |
1937 | a8083063 | Iustin Pop | "admin_state", "admin_ram", |
1938 | a8083063 | Iustin Pop | "disk_template", "ip", "mac", "bridge"]) |
1939 | a8083063 | Iustin Pop | self.dynamic_fields = frozenset(["oper_state", "oper_ram"]) |
1940 | a8083063 | Iustin Pop | self.all_fields = self.static_fields | self.dynamic_fields |
1941 | a8083063 | Iustin Pop | |
1942 | a8083063 | Iustin Pop | if not self.all_fields.issuperset(self.op.output_fields): |
1943 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Unknown output fields selected: %s" |
1944 | a8083063 | Iustin Pop | % ",".join(frozenset(self.op.output_fields). |
1945 | a8083063 | Iustin Pop | difference(self.all_fields)))
|
1946 | a8083063 | Iustin Pop | |
1947 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
1948 | a8083063 | Iustin Pop | """Computes the list of nodes and their attributes.
|
1949 | a8083063 | Iustin Pop |
|
1950 | a8083063 | Iustin Pop | """
|
1951 | a8083063 | Iustin Pop | |
1952 | a8083063 | Iustin Pop | instance_names = utils.NiceSort(self.cfg.GetInstanceList())
|
1953 | a8083063 | Iustin Pop | instance_list = [self.cfg.GetInstanceInfo(iname) for iname |
1954 | a8083063 | Iustin Pop | in instance_names]
|
1955 | a8083063 | Iustin Pop | |
1956 | a8083063 | Iustin Pop | # begin data gathering
|
1957 | a8083063 | Iustin Pop | |
1958 | a8083063 | Iustin Pop | nodes = frozenset([inst.primary_node for inst in instance_list]) |
1959 | a8083063 | Iustin Pop | |
1960 | a8083063 | Iustin Pop | bad_nodes = [] |
1961 | a8083063 | Iustin Pop | if self.dynamic_fields.intersection(self.op.output_fields): |
1962 | a8083063 | Iustin Pop | live_data = {} |
1963 | a8083063 | Iustin Pop | node_data = rpc.call_all_instances_info(nodes) |
1964 | a8083063 | Iustin Pop | for name in nodes: |
1965 | a8083063 | Iustin Pop | result = node_data[name] |
1966 | a8083063 | Iustin Pop | if result:
|
1967 | a8083063 | Iustin Pop | live_data.update(result) |
1968 | a8083063 | Iustin Pop | elif result == False: |
1969 | a8083063 | Iustin Pop | bad_nodes.append(name) |
1970 | a8083063 | Iustin Pop | # else no instance is alive
|
1971 | a8083063 | Iustin Pop | else:
|
1972 | a8083063 | Iustin Pop | live_data = dict([(name, {}) for name in instance_names]) |
1973 | a8083063 | Iustin Pop | |
1974 | a8083063 | Iustin Pop | # end data gathering
|
1975 | a8083063 | Iustin Pop | |
1976 | a8083063 | Iustin Pop | output = [] |
1977 | a8083063 | Iustin Pop | for instance in instance_list: |
1978 | a8083063 | Iustin Pop | iout = [] |
1979 | a8083063 | Iustin Pop | for field in self.op.output_fields: |
1980 | a8083063 | Iustin Pop | if field == "name": |
1981 | a8083063 | Iustin Pop | val = instance.name |
1982 | a8083063 | Iustin Pop | elif field == "os": |
1983 | a8083063 | Iustin Pop | val = instance.os |
1984 | a8083063 | Iustin Pop | elif field == "pnode": |
1985 | a8083063 | Iustin Pop | val = instance.primary_node |
1986 | a8083063 | Iustin Pop | elif field == "snodes": |
1987 | a8083063 | Iustin Pop | val = ",".join(instance.secondary_nodes) or "-" |
1988 | a8083063 | Iustin Pop | elif field == "admin_state": |
1989 | a8083063 | Iustin Pop | if instance.status == "down": |
1990 | a8083063 | Iustin Pop | val = "no"
|
1991 | a8083063 | Iustin Pop | else:
|
1992 | a8083063 | Iustin Pop | val = "yes"
|
1993 | a8083063 | Iustin Pop | elif field == "oper_state": |
1994 | a8083063 | Iustin Pop | if instance.primary_node in bad_nodes: |
1995 | a8083063 | Iustin Pop | val = "(node down)"
|
1996 | a8083063 | Iustin Pop | else:
|
1997 | a8083063 | Iustin Pop | if live_data.get(instance.name):
|
1998 | a8083063 | Iustin Pop | val = "running"
|
1999 | a8083063 | Iustin Pop | else:
|
2000 | a8083063 | Iustin Pop | val = "stopped"
|
2001 | a8083063 | Iustin Pop | elif field == "admin_ram": |
2002 | a8083063 | Iustin Pop | val = instance.memory |
2003 | a8083063 | Iustin Pop | elif field == "oper_ram": |
2004 | a8083063 | Iustin Pop | if instance.primary_node in bad_nodes: |
2005 | a8083063 | Iustin Pop | val = "(node down)"
|
2006 | a8083063 | Iustin Pop | elif instance.name in live_data: |
2007 | a8083063 | Iustin Pop | val = live_data[instance.name].get("memory", "?") |
2008 | a8083063 | Iustin Pop | else:
|
2009 | a8083063 | Iustin Pop | val = "-"
|
2010 | a8083063 | Iustin Pop | elif field == "disk_template": |
2011 | a8083063 | Iustin Pop | val = instance.disk_template |
2012 | a8083063 | Iustin Pop | elif field == "ip": |
2013 | a8083063 | Iustin Pop | val = instance.nics[0].ip
|
2014 | a8083063 | Iustin Pop | elif field == "bridge": |
2015 | a8083063 | Iustin Pop | val = instance.nics[0].bridge
|
2016 | a8083063 | Iustin Pop | elif field == "mac": |
2017 | a8083063 | Iustin Pop | val = instance.nics[0].mac
|
2018 | a8083063 | Iustin Pop | else:
|
2019 | a8083063 | Iustin Pop | raise errors.ParameterError, field
|
2020 | a8083063 | Iustin Pop | val = str(val)
|
2021 | a8083063 | Iustin Pop | iout.append(val) |
2022 | a8083063 | Iustin Pop | output.append(iout) |
2023 | a8083063 | Iustin Pop | |
2024 | a8083063 | Iustin Pop | return output
|
2025 | a8083063 | Iustin Pop | |
2026 | a8083063 | Iustin Pop | |
2027 | a8083063 | Iustin Pop | class LUFailoverInstance(LogicalUnit): |
2028 | a8083063 | Iustin Pop | """Failover an instance.
|
2029 | a8083063 | Iustin Pop |
|
2030 | a8083063 | Iustin Pop | """
|
2031 | a8083063 | Iustin Pop | HPATH = "instance-failover"
|
2032 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
2033 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name", "ignore_consistency"] |
2034 | a8083063 | Iustin Pop | |
2035 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
2036 | a8083063 | Iustin Pop | """Build hooks env.
|
2037 | a8083063 | Iustin Pop |
|
2038 | a8083063 | Iustin Pop | This runs on master, primary and secondary nodes of the instance.
|
2039 | a8083063 | Iustin Pop |
|
2040 | a8083063 | Iustin Pop | """
|
2041 | a8083063 | Iustin Pop | env = { |
2042 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
2043 | a8083063 | Iustin Pop | "INSTANCE_PRIMARY": self.instance.primary_node, |
2044 | a8083063 | Iustin Pop | "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes), |
2045 | a8083063 | Iustin Pop | "IGNORE_CONSISTENCY": self.op.ignore_consistency, |
2046 | a8083063 | Iustin Pop | } |
2047 | a8083063 | Iustin Pop | nl = [self.cfg.GetMaster()] + list(self.instance.secondary_nodes) |
2048 | a8083063 | Iustin Pop | return env, nl, nl
|
2049 | a8083063 | Iustin Pop | |
2050 | a8083063 | Iustin Pop | def CheckPrereq(self): |
2051 | a8083063 | Iustin Pop | """Check prerequisites.
|
2052 | a8083063 | Iustin Pop |
|
2053 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
2054 | a8083063 | Iustin Pop |
|
2055 | a8083063 | Iustin Pop | """
|
2056 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
2057 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
2058 | a8083063 | Iustin Pop | if instance is None: |
2059 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
2060 | a8083063 | Iustin Pop | self.op.instance_name)
|
2061 | a8083063 | Iustin Pop | |
2062 | 3a7c308e | Guido Trotter | # check memory requirements on the secondary node
|
2063 | 3a7c308e | Guido Trotter | target_node = instance.secondary_nodes[0]
|
2064 | 3a7c308e | Guido Trotter | nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
|
2065 | 3a7c308e | Guido Trotter | info = nodeinfo.get(target_node, None)
|
2066 | 3a7c308e | Guido Trotter | if not info: |
2067 | 3a7c308e | Guido Trotter | raise errors.OpPrereqError, ("Cannot get current information" |
2068 | 3a7c308e | Guido Trotter | " from node '%s'" % nodeinfo)
|
2069 | 3a7c308e | Guido Trotter | if instance.memory > info['memory_free']: |
2070 | 3a7c308e | Guido Trotter | raise errors.OpPrereqError, ("Not enough memory on target node %s." |
2071 | 3a7c308e | Guido Trotter | " %d MB available, %d MB required" %
|
2072 | 3a7c308e | Guido Trotter | (target_node, info['memory_free'],
|
2073 | 3a7c308e | Guido Trotter | instance.memory)) |
2074 | 3a7c308e | Guido Trotter | |
2075 | a8083063 | Iustin Pop | # check bridge existance
|
2076 | a8083063 | Iustin Pop | brlist = [nic.bridge for nic in instance.nics] |
2077 | a8083063 | Iustin Pop | if not rpc.call_bridges_exist(instance.primary_node, brlist): |
2078 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("one or more target bridges %s does not" |
2079 | a8083063 | Iustin Pop | " exist on destination node '%s'" %
|
2080 | a8083063 | Iustin Pop | (brlist, instance.primary_node)) |
2081 | a8083063 | Iustin Pop | |
2082 | a8083063 | Iustin Pop | self.instance = instance
|
2083 | a8083063 | Iustin Pop | |
2084 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
2085 | a8083063 | Iustin Pop | """Failover an instance.
|
2086 | a8083063 | Iustin Pop |
|
2087 | a8083063 | Iustin Pop | The failover is done by shutting it down on its present node and
|
2088 | a8083063 | Iustin Pop | starting it on the secondary.
|
2089 | a8083063 | Iustin Pop |
|
2090 | a8083063 | Iustin Pop | """
|
2091 | a8083063 | Iustin Pop | instance = self.instance
|
2092 | a8083063 | Iustin Pop | |
2093 | a8083063 | Iustin Pop | source_node = instance.primary_node |
2094 | a8083063 | Iustin Pop | target_node = instance.secondary_nodes[0]
|
2095 | a8083063 | Iustin Pop | |
2096 | a8083063 | Iustin Pop | feedback_fn("* checking disk consistency between source and target")
|
2097 | a8083063 | Iustin Pop | for dev in instance.disks: |
2098 | a8083063 | Iustin Pop | # for remote_raid1, these are md over drbd
|
2099 | a8083063 | Iustin Pop | if not _CheckDiskConsistency(self.cfg, dev, target_node, False): |
2100 | a8083063 | Iustin Pop | if not self.op.ignore_consistency: |
2101 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Disk %s is degraded on target node," |
2102 | a8083063 | Iustin Pop | " aborting failover." % dev.iv_name)
|
2103 | a8083063 | Iustin Pop | |
2104 | a8083063 | Iustin Pop | feedback_fn("* checking target node resource availability")
|
2105 | a8083063 | Iustin Pop | nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
|
2106 | a8083063 | Iustin Pop | |
2107 | a8083063 | Iustin Pop | if not nodeinfo: |
2108 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Could not contact target node %s." % |
2109 | a8083063 | Iustin Pop | target_node) |
2110 | a8083063 | Iustin Pop | |
2111 | a8083063 | Iustin Pop | free_memory = int(nodeinfo[target_node]['memory_free']) |
2112 | a8083063 | Iustin Pop | memory = instance.memory |
2113 | a8083063 | Iustin Pop | if memory > free_memory:
|
2114 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Not enough memory to create instance %s on" |
2115 | a8083063 | Iustin Pop | " node %s. needed %s MiB, available %s MiB" %
|
2116 | a8083063 | Iustin Pop | (instance.name, target_node, memory, |
2117 | a8083063 | Iustin Pop | free_memory)) |
2118 | a8083063 | Iustin Pop | |
2119 | a8083063 | Iustin Pop | feedback_fn("* shutting down instance on source node")
|
2120 | a8083063 | Iustin Pop | logger.Info("Shutting down instance %s on node %s" %
|
2121 | a8083063 | Iustin Pop | (instance.name, source_node)) |
2122 | a8083063 | Iustin Pop | |
2123 | a8083063 | Iustin Pop | if not rpc.call_instance_shutdown(source_node, instance): |
2124 | a8083063 | Iustin Pop | logger.Error("Could not shutdown instance %s on node %s. Proceeding"
|
2125 | a8083063 | Iustin Pop | " anyway. Please make sure node %s is down" %
|
2126 | a8083063 | Iustin Pop | (instance.name, source_node, source_node)) |
2127 | a8083063 | Iustin Pop | |
2128 | a8083063 | Iustin Pop | feedback_fn("* deactivating the instance's disks on source node")
|
2129 | a8083063 | Iustin Pop | if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True): |
2130 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Can't shut down the instance's disks.") |
2131 | a8083063 | Iustin Pop | |
2132 | a8083063 | Iustin Pop | instance.primary_node = target_node |
2133 | a8083063 | Iustin Pop | # distribute new instance config to the other nodes
|
2134 | a8083063 | Iustin Pop | self.cfg.AddInstance(instance)
|
2135 | a8083063 | Iustin Pop | |
2136 | a8083063 | Iustin Pop | feedback_fn("* activating the instance's disks on target node")
|
2137 | a8083063 | Iustin Pop | logger.Info("Starting instance %s on node %s" %
|
2138 | a8083063 | Iustin Pop | (instance.name, target_node)) |
2139 | a8083063 | Iustin Pop | |
2140 | a8083063 | Iustin Pop | disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
|
2141 | a8083063 | Iustin Pop | ignore_secondaries=True)
|
2142 | a8083063 | Iustin Pop | if not disks_ok: |
2143 | a8083063 | Iustin Pop | _ShutdownInstanceDisks(instance, self.cfg)
|
2144 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Can't activate the instance's disks") |
2145 | a8083063 | Iustin Pop | |
2146 | a8083063 | Iustin Pop | feedback_fn("* starting the instance on the target node")
|
2147 | a8083063 | Iustin Pop | if not rpc.call_instance_start(target_node, instance, None): |
2148 | a8083063 | Iustin Pop | _ShutdownInstanceDisks(instance, self.cfg)
|
2149 | a8083063 | Iustin Pop | raise errors.OpExecError("Could not start instance %s on node %s." % |
2150 | d0b3526f | Michael Hanselmann | (instance.name, target_node)) |
2151 | a8083063 | Iustin Pop | |
2152 | a8083063 | Iustin Pop | |
2153 | a8083063 | Iustin Pop | def _CreateBlockDevOnPrimary(cfg, node, device): |
2154 | a8083063 | Iustin Pop | """Create a tree of block devices on the primary node.
|
2155 | a8083063 | Iustin Pop |
|
2156 | a8083063 | Iustin Pop | This always creates all devices.
|
2157 | a8083063 | Iustin Pop |
|
2158 | a8083063 | Iustin Pop | """
|
2159 | a8083063 | Iustin Pop | |
2160 | a8083063 | Iustin Pop | if device.children:
|
2161 | a8083063 | Iustin Pop | for child in device.children: |
2162 | a8083063 | Iustin Pop | if not _CreateBlockDevOnPrimary(cfg, node, child): |
2163 | a8083063 | Iustin Pop | return False |
2164 | a8083063 | Iustin Pop | |
2165 | a8083063 | Iustin Pop | cfg.SetDiskID(device, node) |
2166 | a8083063 | Iustin Pop | new_id = rpc.call_blockdev_create(node, device, device.size, True)
|
2167 | a8083063 | Iustin Pop | if not new_id: |
2168 | a8083063 | Iustin Pop | return False |
2169 | a8083063 | Iustin Pop | if device.physical_id is None: |
2170 | a8083063 | Iustin Pop | device.physical_id = new_id |
2171 | a8083063 | Iustin Pop | return True |
2172 | a8083063 | Iustin Pop | |
2173 | a8083063 | Iustin Pop | |
2174 | a8083063 | Iustin Pop | def _CreateBlockDevOnSecondary(cfg, node, device, force): |
2175 | a8083063 | Iustin Pop | """Create a tree of block devices on a secondary node.
|
2176 | a8083063 | Iustin Pop |
|
2177 | a8083063 | Iustin Pop | If this device type has to be created on secondaries, create it and
|
2178 | a8083063 | Iustin Pop | all its children.
|
2179 | a8083063 | Iustin Pop |
|
2180 | a8083063 | Iustin Pop | If not, just recurse to children keeping the same 'force' value.
|
2181 | a8083063 | Iustin Pop |
|
2182 | a8083063 | Iustin Pop | """
|
2183 | a8083063 | Iustin Pop | if device.CreateOnSecondary():
|
2184 | a8083063 | Iustin Pop | force = True
|
2185 | a8083063 | Iustin Pop | if device.children:
|
2186 | a8083063 | Iustin Pop | for child in device.children: |
2187 | a8083063 | Iustin Pop | if not _CreateBlockDevOnSecondary(cfg, node, child, force): |
2188 | a8083063 | Iustin Pop | return False |
2189 | a8083063 | Iustin Pop | |
2190 | a8083063 | Iustin Pop | if not force: |
2191 | a8083063 | Iustin Pop | return True |
2192 | a8083063 | Iustin Pop | cfg.SetDiskID(device, node) |
2193 | a8083063 | Iustin Pop | new_id = rpc.call_blockdev_create(node, device, device.size, False)
|
2194 | a8083063 | Iustin Pop | if not new_id: |
2195 | a8083063 | Iustin Pop | return False |
2196 | a8083063 | Iustin Pop | if device.physical_id is None: |
2197 | a8083063 | Iustin Pop | device.physical_id = new_id |
2198 | a8083063 | Iustin Pop | return True |
2199 | a8083063 | Iustin Pop | |
2200 | a8083063 | Iustin Pop | |
2201 | a8083063 | Iustin Pop | def _GenerateMDDRBDBranch(cfg, vgname, primary, secondary, size, base): |
2202 | a8083063 | Iustin Pop | """Generate a drbd device complete with its children.
|
2203 | a8083063 | Iustin Pop |
|
2204 | a8083063 | Iustin Pop | """
|
2205 | a8083063 | Iustin Pop | port = cfg.AllocatePort() |
2206 | a8083063 | Iustin Pop | base = "%s_%s" % (base, port)
|
2207 | a8083063 | Iustin Pop | dev_data = objects.Disk(dev_type="lvm", size=size,
|
2208 | a8083063 | Iustin Pop | logical_id=(vgname, "%s.data" % base))
|
2209 | a8083063 | Iustin Pop | dev_meta = objects.Disk(dev_type="lvm", size=128, |
2210 | a8083063 | Iustin Pop | logical_id=(vgname, "%s.meta" % base))
|
2211 | a8083063 | Iustin Pop | drbd_dev = objects.Disk(dev_type="drbd", size=size,
|
2212 | a8083063 | Iustin Pop | logical_id = (primary, secondary, port), |
2213 | a8083063 | Iustin Pop | children = [dev_data, dev_meta]) |
2214 | a8083063 | Iustin Pop | return drbd_dev
|
2215 | a8083063 | Iustin Pop | |
2216 | a8083063 | Iustin Pop | |
2217 | a8083063 | Iustin Pop | def _GenerateDiskTemplate(cfg, vgname, template_name, |
2218 | a8083063 | Iustin Pop | instance_name, primary_node, |
2219 | a8083063 | Iustin Pop | secondary_nodes, disk_sz, swap_sz): |
2220 | a8083063 | Iustin Pop | """Generate the entire disk layout for a given template type.
|
2221 | a8083063 | Iustin Pop |
|
2222 | a8083063 | Iustin Pop | """
|
2223 | a8083063 | Iustin Pop | #TODO: compute space requirements
|
2224 | a8083063 | Iustin Pop | |
2225 | a8083063 | Iustin Pop | if template_name == "diskless": |
2226 | a8083063 | Iustin Pop | disks = [] |
2227 | a8083063 | Iustin Pop | elif template_name == "plain": |
2228 | a8083063 | Iustin Pop | if len(secondary_nodes) != 0: |
2229 | a8083063 | Iustin Pop | raise errors.ProgrammerError("Wrong template configuration") |
2230 | a8083063 | Iustin Pop | sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
|
2231 | a8083063 | Iustin Pop | logical_id=(vgname, "%s.os" % instance_name),
|
2232 | a8083063 | Iustin Pop | iv_name = "sda")
|
2233 | a8083063 | Iustin Pop | sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
|
2234 | a8083063 | Iustin Pop | logical_id=(vgname, "%s.swap" % instance_name),
|
2235 | a8083063 | Iustin Pop | iv_name = "sdb")
|
2236 | a8083063 | Iustin Pop | disks = [sda_dev, sdb_dev] |
2237 | a8083063 | Iustin Pop | elif template_name == "local_raid1": |
2238 | a8083063 | Iustin Pop | if len(secondary_nodes) != 0: |
2239 | a8083063 | Iustin Pop | raise errors.ProgrammerError("Wrong template configuration") |
2240 | a8083063 | Iustin Pop | sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
|
2241 | a8083063 | Iustin Pop | logical_id=(vgname, "%s.os_m1" % instance_name))
|
2242 | a8083063 | Iustin Pop | sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
|
2243 | a8083063 | Iustin Pop | logical_id=(vgname, "%s.os_m2" % instance_name))
|
2244 | a8083063 | Iustin Pop | md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda", |
2245 | a8083063 | Iustin Pop | size=disk_sz, |
2246 | a8083063 | Iustin Pop | children = [sda_dev_m1, sda_dev_m2]) |
2247 | a8083063 | Iustin Pop | sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
|
2248 | a8083063 | Iustin Pop | logical_id=(vgname, "%s.swap_m1" %
|
2249 | a8083063 | Iustin Pop | instance_name)) |
2250 | a8083063 | Iustin Pop | sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
|
2251 | a8083063 | Iustin Pop | logical_id=(vgname, "%s.swap_m2" %
|
2252 | a8083063 | Iustin Pop | instance_name)) |
2253 | a8083063 | Iustin Pop | md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb", |
2254 | a8083063 | Iustin Pop | size=swap_sz, |
2255 | a8083063 | Iustin Pop | children = [sdb_dev_m1, sdb_dev_m2]) |
2256 | a8083063 | Iustin Pop | disks = [md_sda_dev, md_sdb_dev] |
2257 | a8083063 | Iustin Pop | elif template_name == "remote_raid1": |
2258 | a8083063 | Iustin Pop | if len(secondary_nodes) != 1: |
2259 | a8083063 | Iustin Pop | raise errors.ProgrammerError("Wrong template configuration") |
2260 | a8083063 | Iustin Pop | remote_node = secondary_nodes[0]
|
2261 | a8083063 | Iustin Pop | drbd_sda_dev = _GenerateMDDRBDBranch(cfg, vgname, |
2262 | a8083063 | Iustin Pop | primary_node, remote_node, disk_sz, |
2263 | a8083063 | Iustin Pop | "%s-sda" % instance_name)
|
2264 | a8083063 | Iustin Pop | md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda", |
2265 | a8083063 | Iustin Pop | children = [drbd_sda_dev], size=disk_sz) |
2266 | a8083063 | Iustin Pop | drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, vgname, |
2267 | a8083063 | Iustin Pop | primary_node, remote_node, swap_sz, |
2268 | a8083063 | Iustin Pop | "%s-sdb" % instance_name)
|
2269 | a8083063 | Iustin Pop | md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb", |
2270 | a8083063 | Iustin Pop | children = [drbd_sdb_dev], size=swap_sz) |
2271 | a8083063 | Iustin Pop | disks = [md_sda_dev, md_sdb_dev] |
2272 | a8083063 | Iustin Pop | else:
|
2273 | a8083063 | Iustin Pop | raise errors.ProgrammerError("Invalid disk template '%s'" % template_name) |
2274 | a8083063 | Iustin Pop | return disks
|
2275 | a8083063 | Iustin Pop | |
2276 | a8083063 | Iustin Pop | |
2277 | a8083063 | Iustin Pop | def _CreateDisks(cfg, instance): |
2278 | a8083063 | Iustin Pop | """Create all disks for an instance.
|
2279 | a8083063 | Iustin Pop |
|
2280 | a8083063 | Iustin Pop | This abstracts away some work from AddInstance.
|
2281 | a8083063 | Iustin Pop |
|
2282 | a8083063 | Iustin Pop | Args:
|
2283 | a8083063 | Iustin Pop | instance: the instance object
|
2284 | a8083063 | Iustin Pop |
|
2285 | a8083063 | Iustin Pop | Returns:
|
2286 | a8083063 | Iustin Pop | True or False showing the success of the creation process
|
2287 | a8083063 | Iustin Pop |
|
2288 | a8083063 | Iustin Pop | """
|
2289 | a8083063 | Iustin Pop | for device in instance.disks: |
2290 | a8083063 | Iustin Pop | logger.Info("creating volume %s for instance %s" %
|
2291 | a8083063 | Iustin Pop | (device.iv_name, instance.name)) |
2292 | a8083063 | Iustin Pop | #HARDCODE
|
2293 | a8083063 | Iustin Pop | for secondary_node in instance.secondary_nodes: |
2294 | a8083063 | Iustin Pop | if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False): |
2295 | a8083063 | Iustin Pop | logger.Error("failed to create volume %s (%s) on secondary node %s!" %
|
2296 | a8083063 | Iustin Pop | (device.iv_name, device, secondary_node)) |
2297 | a8083063 | Iustin Pop | return False |
2298 | a8083063 | Iustin Pop | #HARDCODE
|
2299 | a8083063 | Iustin Pop | if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device): |
2300 | a8083063 | Iustin Pop | logger.Error("failed to create volume %s on primary!" %
|
2301 | a8083063 | Iustin Pop | device.iv_name) |
2302 | a8083063 | Iustin Pop | return False |
2303 | a8083063 | Iustin Pop | return True |
2304 | a8083063 | Iustin Pop | |
2305 | a8083063 | Iustin Pop | |
2306 | a8083063 | Iustin Pop | def _RemoveDisks(instance, cfg): |
2307 | a8083063 | Iustin Pop | """Remove all disks for an instance.
|
2308 | a8083063 | Iustin Pop |
|
2309 | a8083063 | Iustin Pop | This abstracts away some work from `AddInstance()` and
|
2310 | a8083063 | Iustin Pop | `RemoveInstance()`. Note that in case some of the devices couldn't
|
2311 | a8083063 | Iustin Pop | be remove, the removal will continue with the other ones (compare
|
2312 | a8083063 | Iustin Pop | with `_CreateDisks()`).
|
2313 | a8083063 | Iustin Pop |
|
2314 | a8083063 | Iustin Pop | Args:
|
2315 | a8083063 | Iustin Pop | instance: the instance object
|
2316 | a8083063 | Iustin Pop |
|
2317 | a8083063 | Iustin Pop | Returns:
|
2318 | a8083063 | Iustin Pop | True or False showing the success of the removal proces
|
2319 | a8083063 | Iustin Pop |
|
2320 | a8083063 | Iustin Pop | """
|
2321 | a8083063 | Iustin Pop | logger.Info("removing block devices for instance %s" % instance.name)
|
2322 | a8083063 | Iustin Pop | |
2323 | a8083063 | Iustin Pop | result = True
|
2324 | a8083063 | Iustin Pop | for device in instance.disks: |
2325 | a8083063 | Iustin Pop | for node, disk in device.ComputeNodeTree(instance.primary_node): |
2326 | a8083063 | Iustin Pop | cfg.SetDiskID(disk, node) |
2327 | a8083063 | Iustin Pop | if not rpc.call_blockdev_remove(node, disk): |
2328 | a8083063 | Iustin Pop | logger.Error("could not remove block device %s on node %s,"
|
2329 | a8083063 | Iustin Pop | " continuing anyway" %
|
2330 | a8083063 | Iustin Pop | (device.iv_name, node)) |
2331 | a8083063 | Iustin Pop | result = False
|
2332 | a8083063 | Iustin Pop | return result
|
2333 | a8083063 | Iustin Pop | |
2334 | a8083063 | Iustin Pop | |
2335 | a8083063 | Iustin Pop | class LUCreateInstance(LogicalUnit): |
2336 | a8083063 | Iustin Pop | """Create an instance.
|
2337 | a8083063 | Iustin Pop |
|
2338 | a8083063 | Iustin Pop | """
|
2339 | a8083063 | Iustin Pop | HPATH = "instance-add"
|
2340 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
2341 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode", |
2342 | a8083063 | Iustin Pop | "disk_template", "swap_size", "mode", "start", "vcpus", |
2343 | a8083063 | Iustin Pop | "wait_for_sync"]
|
2344 | a8083063 | Iustin Pop | |
2345 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
2346 | a8083063 | Iustin Pop | """Build hooks env.
|
2347 | a8083063 | Iustin Pop |
|
2348 | a8083063 | Iustin Pop | This runs on master, primary and secondary nodes of the instance.
|
2349 | a8083063 | Iustin Pop |
|
2350 | a8083063 | Iustin Pop | """
|
2351 | a8083063 | Iustin Pop | env = { |
2352 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
2353 | a8083063 | Iustin Pop | "INSTANCE_PRIMARY": self.op.pnode, |
2354 | a8083063 | Iustin Pop | "INSTANCE_SECONDARIES": " ".join(self.secondaries), |
2355 | a8083063 | Iustin Pop | "DISK_TEMPLATE": self.op.disk_template, |
2356 | a8083063 | Iustin Pop | "MEM_SIZE": self.op.mem_size, |
2357 | a8083063 | Iustin Pop | "DISK_SIZE": self.op.disk_size, |
2358 | a8083063 | Iustin Pop | "SWAP_SIZE": self.op.swap_size, |
2359 | a8083063 | Iustin Pop | "VCPUS": self.op.vcpus, |
2360 | a8083063 | Iustin Pop | "BRIDGE": self.op.bridge, |
2361 | a8083063 | Iustin Pop | "INSTANCE_ADD_MODE": self.op.mode, |
2362 | a8083063 | Iustin Pop | } |
2363 | a8083063 | Iustin Pop | if self.op.mode == constants.INSTANCE_IMPORT: |
2364 | a8083063 | Iustin Pop | env["SRC_NODE"] = self.op.src_node |
2365 | a8083063 | Iustin Pop | env["SRC_PATH"] = self.op.src_path |
2366 | a8083063 | Iustin Pop | env["SRC_IMAGE"] = self.src_image |
2367 | a8083063 | Iustin Pop | if self.inst_ip: |
2368 | a8083063 | Iustin Pop | env["INSTANCE_IP"] = self.inst_ip |
2369 | a8083063 | Iustin Pop | |
2370 | a8083063 | Iustin Pop | nl = ([self.cfg.GetMaster(), self.op.pnode] + |
2371 | a8083063 | Iustin Pop | self.secondaries)
|
2372 | a8083063 | Iustin Pop | return env, nl, nl
|
2373 | a8083063 | Iustin Pop | |
2374 | a8083063 | Iustin Pop | |
2375 | a8083063 | Iustin Pop | def CheckPrereq(self): |
2376 | a8083063 | Iustin Pop | """Check prerequisites.
|
2377 | a8083063 | Iustin Pop |
|
2378 | a8083063 | Iustin Pop | """
|
2379 | a8083063 | Iustin Pop | if self.op.mode not in (constants.INSTANCE_CREATE, |
2380 | a8083063 | Iustin Pop | constants.INSTANCE_IMPORT): |
2381 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid instance creation mode '%s'" % |
2382 | a8083063 | Iustin Pop | self.op.mode)
|
2383 | a8083063 | Iustin Pop | |
2384 | a8083063 | Iustin Pop | if self.op.mode == constants.INSTANCE_IMPORT: |
2385 | a8083063 | Iustin Pop | src_node = getattr(self.op, "src_node", None) |
2386 | a8083063 | Iustin Pop | src_path = getattr(self.op, "src_path", None) |
2387 | a8083063 | Iustin Pop | if src_node is None or src_path is None: |
2388 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Importing an instance requires source" |
2389 | a8083063 | Iustin Pop | " node and path options")
|
2390 | a8083063 | Iustin Pop | src_node_full = self.cfg.ExpandNodeName(src_node)
|
2391 | a8083063 | Iustin Pop | if src_node_full is None: |
2392 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Unknown source node '%s'" % src_node) |
2393 | a8083063 | Iustin Pop | self.op.src_node = src_node = src_node_full
|
2394 | a8083063 | Iustin Pop | |
2395 | a8083063 | Iustin Pop | if not os.path.isabs(src_path): |
2396 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("The source path must be absolute") |
2397 | a8083063 | Iustin Pop | |
2398 | a8083063 | Iustin Pop | export_info = rpc.call_export_info(src_node, src_path) |
2399 | a8083063 | Iustin Pop | |
2400 | a8083063 | Iustin Pop | if not export_info: |
2401 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("No export found in dir %s" % src_path) |
2402 | a8083063 | Iustin Pop | |
2403 | a8083063 | Iustin Pop | if not export_info.has_section(constants.INISECT_EXP): |
2404 | a8083063 | Iustin Pop | raise errors.ProgrammerError, ("Corrupted export config") |
2405 | a8083063 | Iustin Pop | |
2406 | a8083063 | Iustin Pop | ei_version = export_info.get(constants.INISECT_EXP, 'version')
|
2407 | a8083063 | Iustin Pop | if (int(ei_version) != constants.EXPORT_VERSION): |
2408 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Wrong export version %s (wanted %d)" % |
2409 | a8083063 | Iustin Pop | (ei_version, constants.EXPORT_VERSION)) |
2410 | a8083063 | Iustin Pop | |
2411 | a8083063 | Iustin Pop | if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1: |
2412 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Can't import instance with more than" |
2413 | a8083063 | Iustin Pop | " one data disk")
|
2414 | a8083063 | Iustin Pop | |
2415 | a8083063 | Iustin Pop | # FIXME: are the old os-es, disk sizes, etc. useful?
|
2416 | a8083063 | Iustin Pop | self.op.os_type = export_info.get(constants.INISECT_EXP, 'os') |
2417 | a8083063 | Iustin Pop | diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS, |
2418 | a8083063 | Iustin Pop | 'disk0_dump'))
|
2419 | a8083063 | Iustin Pop | self.src_image = diskimage
|
2420 | a8083063 | Iustin Pop | else: # INSTANCE_CREATE |
2421 | a8083063 | Iustin Pop | if getattr(self.op, "os_type", None) is None: |
2422 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("No guest OS specified") |
2423 | a8083063 | Iustin Pop | |
2424 | a8083063 | Iustin Pop | # check primary node
|
2425 | a8083063 | Iustin Pop | pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode)) |
2426 | a8083063 | Iustin Pop | if pnode is None: |
2427 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Primary node '%s' is uknown" % |
2428 | a8083063 | Iustin Pop | self.op.pnode)
|
2429 | a8083063 | Iustin Pop | self.op.pnode = pnode.name
|
2430 | a8083063 | Iustin Pop | self.pnode = pnode
|
2431 | a8083063 | Iustin Pop | self.secondaries = []
|
2432 | a8083063 | Iustin Pop | # disk template and mirror node verification
|
2433 | a8083063 | Iustin Pop | if self.op.disk_template not in constants.DISK_TEMPLATES: |
2434 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid disk template name") |
2435 | a8083063 | Iustin Pop | |
2436 | a8083063 | Iustin Pop | if self.op.disk_template == constants.DT_REMOTE_RAID1: |
2437 | a8083063 | Iustin Pop | if getattr(self.op, "snode", None) is None: |
2438 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("The 'remote_raid1' disk template needs" |
2439 | a8083063 | Iustin Pop | " a mirror node")
|
2440 | a8083063 | Iustin Pop | |
2441 | a8083063 | Iustin Pop | snode_name = self.cfg.ExpandNodeName(self.op.snode) |
2442 | a8083063 | Iustin Pop | if snode_name is None: |
2443 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Unknown secondary node '%s'" % |
2444 | a8083063 | Iustin Pop | self.op.snode)
|
2445 | a8083063 | Iustin Pop | elif snode_name == pnode.name:
|
2446 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("The secondary node cannot be" |
2447 | a8083063 | Iustin Pop | " the primary node.")
|
2448 | a8083063 | Iustin Pop | self.secondaries.append(snode_name)
|
2449 | a8083063 | Iustin Pop | |
2450 | ed1ebc60 | Guido Trotter | # Check lv size requirements
|
2451 | ed1ebc60 | Guido Trotter | nodenames = [pnode.name] + self.secondaries
|
2452 | ed1ebc60 | Guido Trotter | nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
|
2453 | ed1ebc60 | Guido Trotter | |
2454 | ed1ebc60 | Guido Trotter | # Required free disk space as a function of disk and swap space
|
2455 | ed1ebc60 | Guido Trotter | req_size_dict = { |
2456 | ed1ebc60 | Guido Trotter | constants.DT_DISKLESS: 0,
|
2457 | ed1ebc60 | Guido Trotter | constants.DT_PLAIN: self.op.disk_size + self.op.swap_size, |
2458 | ed1ebc60 | Guido Trotter | constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2, |
2459 | ed1ebc60 | Guido Trotter | # 256 MB are added for drbd metadata, 128MB for each drbd device
|
2460 | ed1ebc60 | Guido Trotter | constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256, |
2461 | ed1ebc60 | Guido Trotter | } |
2462 | ed1ebc60 | Guido Trotter | |
2463 | ed1ebc60 | Guido Trotter | if self.op.disk_template not in req_size_dict: |
2464 | ed1ebc60 | Guido Trotter | raise errors.ProgrammerError, ("Disk template '%s' size requirement" |
2465 | ed1ebc60 | Guido Trotter | " is unknown" % self.op.disk_template) |
2466 | ed1ebc60 | Guido Trotter | |
2467 | ed1ebc60 | Guido Trotter | req_size = req_size_dict[self.op.disk_template]
|
2468 | ed1ebc60 | Guido Trotter | |
2469 | ed1ebc60 | Guido Trotter | for node in nodenames: |
2470 | ed1ebc60 | Guido Trotter | info = nodeinfo.get(node, None)
|
2471 | ed1ebc60 | Guido Trotter | if not info: |
2472 | ed1ebc60 | Guido Trotter | raise errors.OpPrereqError, ("Cannot get current information" |
2473 | ed1ebc60 | Guido Trotter | " from node '%s'" % nodeinfo)
|
2474 | ed1ebc60 | Guido Trotter | if req_size > info['vg_free']: |
2475 | ed1ebc60 | Guido Trotter | raise errors.OpPrereqError, ("Not enough disk space on target node %s." |
2476 | ed1ebc60 | Guido Trotter | " %d MB available, %d MB required" %
|
2477 | ed1ebc60 | Guido Trotter | (node, info['vg_free'], req_size))
|
2478 | ed1ebc60 | Guido Trotter | |
2479 | a8083063 | Iustin Pop | # os verification
|
2480 | a8083063 | Iustin Pop | os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
|
2481 | a8083063 | Iustin Pop | if not isinstance(os_obj, objects.OS): |
2482 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("OS '%s' not in supported os list for" |
2483 | a8083063 | Iustin Pop | " primary node" % self.op.os_type) |
2484 | a8083063 | Iustin Pop | |
2485 | a8083063 | Iustin Pop | # instance verification
|
2486 | a8083063 | Iustin Pop | hostname1 = utils.LookupHostname(self.op.instance_name)
|
2487 | a8083063 | Iustin Pop | if not hostname1: |
2488 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance name '%s' not found in dns" % |
2489 | a8083063 | Iustin Pop | self.op.instance_name)
|
2490 | a8083063 | Iustin Pop | |
2491 | a8083063 | Iustin Pop | self.op.instance_name = instance_name = hostname1['hostname'] |
2492 | a8083063 | Iustin Pop | instance_list = self.cfg.GetInstanceList()
|
2493 | a8083063 | Iustin Pop | if instance_name in instance_list: |
2494 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' is already in the cluster" % |
2495 | a8083063 | Iustin Pop | instance_name) |
2496 | a8083063 | Iustin Pop | |
2497 | a8083063 | Iustin Pop | ip = getattr(self.op, "ip", None) |
2498 | a8083063 | Iustin Pop | if ip is None or ip.lower() == "none": |
2499 | a8083063 | Iustin Pop | inst_ip = None
|
2500 | a8083063 | Iustin Pop | elif ip.lower() == "auto": |
2501 | a8083063 | Iustin Pop | inst_ip = hostname1['ip']
|
2502 | a8083063 | Iustin Pop | else:
|
2503 | a8083063 | Iustin Pop | if not utils.IsValidIP(ip): |
2504 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("given IP address '%s' doesn't look" |
2505 | a8083063 | Iustin Pop | " like a valid IP" % ip)
|
2506 | a8083063 | Iustin Pop | inst_ip = ip |
2507 | a8083063 | Iustin Pop | self.inst_ip = inst_ip
|
2508 | a8083063 | Iustin Pop | |
2509 | a8083063 | Iustin Pop | command = ["fping", "-q", hostname1['ip']] |
2510 | a8083063 | Iustin Pop | result = utils.RunCmd(command) |
2511 | a8083063 | Iustin Pop | if not result.failed: |
2512 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("IP %s of instance %s already in use" % |
2513 | a8083063 | Iustin Pop | (hostname1['ip'], instance_name))
|
2514 | a8083063 | Iustin Pop | |
2515 | a8083063 | Iustin Pop | # bridge verification
|
2516 | a8083063 | Iustin Pop | bridge = getattr(self.op, "bridge", None) |
2517 | a8083063 | Iustin Pop | if bridge is None: |
2518 | a8083063 | Iustin Pop | self.op.bridge = self.cfg.GetDefBridge() |
2519 | a8083063 | Iustin Pop | else:
|
2520 | a8083063 | Iustin Pop | self.op.bridge = bridge
|
2521 | a8083063 | Iustin Pop | |
2522 | a8083063 | Iustin Pop | if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]): |
2523 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("target bridge '%s' does not exist on" |
2524 | a8083063 | Iustin Pop | " destination node '%s'" %
|
2525 | a8083063 | Iustin Pop | (self.op.bridge, pnode.name))
|
2526 | a8083063 | Iustin Pop | |
2527 | a8083063 | Iustin Pop | if self.op.start: |
2528 | a8083063 | Iustin Pop | self.instance_status = 'up' |
2529 | a8083063 | Iustin Pop | else:
|
2530 | a8083063 | Iustin Pop | self.instance_status = 'down' |
2531 | a8083063 | Iustin Pop | |
2532 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
2533 | a8083063 | Iustin Pop | """Create and add the instance to the cluster.
|
2534 | a8083063 | Iustin Pop |
|
2535 | a8083063 | Iustin Pop | """
|
2536 | a8083063 | Iustin Pop | instance = self.op.instance_name
|
2537 | a8083063 | Iustin Pop | pnode_name = self.pnode.name
|
2538 | a8083063 | Iustin Pop | |
2539 | a8083063 | Iustin Pop | nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC()) |
2540 | a8083063 | Iustin Pop | if self.inst_ip is not None: |
2541 | a8083063 | Iustin Pop | nic.ip = self.inst_ip
|
2542 | a8083063 | Iustin Pop | |
2543 | a8083063 | Iustin Pop | disks = _GenerateDiskTemplate(self.cfg, self.cfg.GetVGName(), |
2544 | a8083063 | Iustin Pop | self.op.disk_template,
|
2545 | a8083063 | Iustin Pop | instance, pnode_name, |
2546 | a8083063 | Iustin Pop | self.secondaries, self.op.disk_size, |
2547 | a8083063 | Iustin Pop | self.op.swap_size)
|
2548 | a8083063 | Iustin Pop | |
2549 | a8083063 | Iustin Pop | iobj = objects.Instance(name=instance, os=self.op.os_type,
|
2550 | a8083063 | Iustin Pop | primary_node=pnode_name, |
2551 | a8083063 | Iustin Pop | memory=self.op.mem_size,
|
2552 | a8083063 | Iustin Pop | vcpus=self.op.vcpus,
|
2553 | a8083063 | Iustin Pop | nics=[nic], disks=disks, |
2554 | a8083063 | Iustin Pop | disk_template=self.op.disk_template,
|
2555 | a8083063 | Iustin Pop | status=self.instance_status,
|
2556 | a8083063 | Iustin Pop | ) |
2557 | a8083063 | Iustin Pop | |
2558 | a8083063 | Iustin Pop | feedback_fn("* creating instance disks...")
|
2559 | a8083063 | Iustin Pop | if not _CreateDisks(self.cfg, iobj): |
2560 | a8083063 | Iustin Pop | _RemoveDisks(iobj, self.cfg)
|
2561 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Device creation failed, reverting...") |
2562 | a8083063 | Iustin Pop | |
2563 | a8083063 | Iustin Pop | feedback_fn("adding instance %s to cluster config" % instance)
|
2564 | a8083063 | Iustin Pop | |
2565 | a8083063 | Iustin Pop | self.cfg.AddInstance(iobj)
|
2566 | a8083063 | Iustin Pop | |
2567 | a8083063 | Iustin Pop | if self.op.wait_for_sync: |
2568 | a8083063 | Iustin Pop | disk_abort = not _WaitForSync(self.cfg, iobj) |
2569 | a8083063 | Iustin Pop | elif iobj.disk_template == "remote_raid1": |
2570 | a8083063 | Iustin Pop | # make sure the disks are not degraded (still sync-ing is ok)
|
2571 | a8083063 | Iustin Pop | time.sleep(15)
|
2572 | a8083063 | Iustin Pop | feedback_fn("* checking mirrors status")
|
2573 | a8083063 | Iustin Pop | disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True) |
2574 | a8083063 | Iustin Pop | else:
|
2575 | a8083063 | Iustin Pop | disk_abort = False
|
2576 | a8083063 | Iustin Pop | |
2577 | a8083063 | Iustin Pop | if disk_abort:
|
2578 | a8083063 | Iustin Pop | _RemoveDisks(iobj, self.cfg)
|
2579 | a8083063 | Iustin Pop | self.cfg.RemoveInstance(iobj.name)
|
2580 | a8083063 | Iustin Pop | raise errors.OpExecError, ("There are some degraded disks for" |
2581 | a8083063 | Iustin Pop | " this instance")
|
2582 | a8083063 | Iustin Pop | |
2583 | a8083063 | Iustin Pop | feedback_fn("creating os for instance %s on node %s" %
|
2584 | a8083063 | Iustin Pop | (instance, pnode_name)) |
2585 | a8083063 | Iustin Pop | |
2586 | a8083063 | Iustin Pop | if iobj.disk_template != constants.DT_DISKLESS:
|
2587 | a8083063 | Iustin Pop | if self.op.mode == constants.INSTANCE_CREATE: |
2588 | a8083063 | Iustin Pop | feedback_fn("* running the instance OS create scripts...")
|
2589 | a8083063 | Iustin Pop | if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"): |
2590 | a8083063 | Iustin Pop | raise errors.OpExecError, ("could not add os for instance %s" |
2591 | a8083063 | Iustin Pop | " on node %s" %
|
2592 | a8083063 | Iustin Pop | (instance, pnode_name)) |
2593 | a8083063 | Iustin Pop | |
2594 | a8083063 | Iustin Pop | elif self.op.mode == constants.INSTANCE_IMPORT: |
2595 | a8083063 | Iustin Pop | feedback_fn("* running the instance OS import scripts...")
|
2596 | a8083063 | Iustin Pop | src_node = self.op.src_node
|
2597 | a8083063 | Iustin Pop | src_image = self.src_image
|
2598 | a8083063 | Iustin Pop | if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb", |
2599 | a8083063 | Iustin Pop | src_node, src_image): |
2600 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Could not import os for instance" |
2601 | a8083063 | Iustin Pop | " %s on node %s" %
|
2602 | a8083063 | Iustin Pop | (instance, pnode_name)) |
2603 | a8083063 | Iustin Pop | else:
|
2604 | a8083063 | Iustin Pop | # also checked in the prereq part
|
2605 | a8083063 | Iustin Pop | raise errors.ProgrammerError, ("Unknown OS initialization mode '%s'" |
2606 | a8083063 | Iustin Pop | % self.op.mode)
|
2607 | a8083063 | Iustin Pop | |
2608 | a8083063 | Iustin Pop | if self.op.start: |
2609 | a8083063 | Iustin Pop | logger.Info("starting instance %s on node %s" % (instance, pnode_name))
|
2610 | a8083063 | Iustin Pop | feedback_fn("* starting instance...")
|
2611 | a8083063 | Iustin Pop | if not rpc.call_instance_start(pnode_name, iobj, None): |
2612 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Could not start instance") |
2613 | a8083063 | Iustin Pop | |
2614 | a8083063 | Iustin Pop | |
2615 | a8083063 | Iustin Pop | class LUConnectConsole(NoHooksLU): |
2616 | a8083063 | Iustin Pop | """Connect to an instance's console.
|
2617 | a8083063 | Iustin Pop |
|
2618 | a8083063 | Iustin Pop | This is somewhat special in that it returns the command line that
|
2619 | a8083063 | Iustin Pop | you need to run on the master node in order to connect to the
|
2620 | a8083063 | Iustin Pop | console.
|
2621 | a8083063 | Iustin Pop |
|
2622 | a8083063 | Iustin Pop | """
|
2623 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name"]
|
2624 | a8083063 | Iustin Pop | |
2625 | a8083063 | Iustin Pop | def CheckPrereq(self): |
2626 | a8083063 | Iustin Pop | """Check prerequisites.
|
2627 | a8083063 | Iustin Pop |
|
2628 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
2629 | a8083063 | Iustin Pop |
|
2630 | a8083063 | Iustin Pop | """
|
2631 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
2632 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
2633 | a8083063 | Iustin Pop | if instance is None: |
2634 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
2635 | a8083063 | Iustin Pop | self.op.instance_name)
|
2636 | a8083063 | Iustin Pop | self.instance = instance
|
2637 | a8083063 | Iustin Pop | |
2638 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
2639 | a8083063 | Iustin Pop | """Connect to the console of an instance
|
2640 | a8083063 | Iustin Pop |
|
2641 | a8083063 | Iustin Pop | """
|
2642 | a8083063 | Iustin Pop | instance = self.instance
|
2643 | a8083063 | Iustin Pop | node = instance.primary_node |
2644 | a8083063 | Iustin Pop | |
2645 | a8083063 | Iustin Pop | node_insts = rpc.call_instance_list([node])[node] |
2646 | a8083063 | Iustin Pop | if node_insts is False: |
2647 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Can't connect to node %s." % node) |
2648 | a8083063 | Iustin Pop | |
2649 | a8083063 | Iustin Pop | if instance.name not in node_insts: |
2650 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Instance %s is not running." % instance.name) |
2651 | a8083063 | Iustin Pop | |
2652 | a8083063 | Iustin Pop | logger.Debug("connecting to console of %s on %s" % (instance.name, node))
|
2653 | a8083063 | Iustin Pop | |
2654 | a8083063 | Iustin Pop | hyper = hypervisor.GetHypervisor() |
2655 | a8083063 | Iustin Pop | console_cmd = hyper.GetShellCommandForConsole(instance.name) |
2656 | a8083063 | Iustin Pop | return node, console_cmd
|
2657 | a8083063 | Iustin Pop | |
2658 | a8083063 | Iustin Pop | |
2659 | a8083063 | Iustin Pop | class LUAddMDDRBDComponent(LogicalUnit): |
2660 | a8083063 | Iustin Pop | """Adda new mirror member to an instance's disk.
|
2661 | a8083063 | Iustin Pop |
|
2662 | a8083063 | Iustin Pop | """
|
2663 | a8083063 | Iustin Pop | HPATH = "mirror-add"
|
2664 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
2665 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name", "remote_node", "disk_name"] |
2666 | a8083063 | Iustin Pop | |
2667 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
2668 | a8083063 | Iustin Pop | """Build hooks env.
|
2669 | a8083063 | Iustin Pop |
|
2670 | a8083063 | Iustin Pop | This runs on the master, the primary and all the secondaries.
|
2671 | a8083063 | Iustin Pop |
|
2672 | a8083063 | Iustin Pop | """
|
2673 | a8083063 | Iustin Pop | env = { |
2674 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
2675 | a8083063 | Iustin Pop | "NEW_SECONDARY": self.op.remote_node, |
2676 | a8083063 | Iustin Pop | "DISK_NAME": self.op.disk_name, |
2677 | a8083063 | Iustin Pop | } |
2678 | a8083063 | Iustin Pop | nl = [self.cfg.GetMaster(), self.instance.primary_node, |
2679 | a8083063 | Iustin Pop | self.op.remote_node,] + list(self.instance.secondary_nodes) |
2680 | a8083063 | Iustin Pop | return env, nl, nl
|
2681 | a8083063 | Iustin Pop | |
2682 | a8083063 | Iustin Pop | def CheckPrereq(self): |
2683 | a8083063 | Iustin Pop | """Check prerequisites.
|
2684 | a8083063 | Iustin Pop |
|
2685 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
2686 | a8083063 | Iustin Pop |
|
2687 | a8083063 | Iustin Pop | """
|
2688 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
2689 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
2690 | a8083063 | Iustin Pop | if instance is None: |
2691 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
2692 | a8083063 | Iustin Pop | self.op.instance_name)
|
2693 | a8083063 | Iustin Pop | self.instance = instance
|
2694 | a8083063 | Iustin Pop | |
2695 | a8083063 | Iustin Pop | remote_node = self.cfg.ExpandNodeName(self.op.remote_node) |
2696 | a8083063 | Iustin Pop | if remote_node is None: |
2697 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node '%s' not known" % self.op.remote_node) |
2698 | a8083063 | Iustin Pop | self.remote_node = remote_node
|
2699 | a8083063 | Iustin Pop | |
2700 | a8083063 | Iustin Pop | if remote_node == instance.primary_node:
|
2701 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("The specified node is the primary node of" |
2702 | a8083063 | Iustin Pop | " the instance.")
|
2703 | a8083063 | Iustin Pop | |
2704 | a8083063 | Iustin Pop | if instance.disk_template != constants.DT_REMOTE_RAID1:
|
2705 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance's disk layout is not" |
2706 | a8083063 | Iustin Pop | " remote_raid1.")
|
2707 | a8083063 | Iustin Pop | for disk in instance.disks: |
2708 | a8083063 | Iustin Pop | if disk.iv_name == self.op.disk_name: |
2709 | a8083063 | Iustin Pop | break
|
2710 | a8083063 | Iustin Pop | else:
|
2711 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Can't find this device ('%s') in the" |
2712 | a8083063 | Iustin Pop | " instance." % self.op.disk_name) |
2713 | a8083063 | Iustin Pop | if len(disk.children) > 1: |
2714 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("The device already has two slave" |
2715 | a8083063 | Iustin Pop | " devices.\n"
|
2716 | a8083063 | Iustin Pop | "This would create a 3-disk raid1"
|
2717 | a8083063 | Iustin Pop | " which we don't allow.")
|
2718 | a8083063 | Iustin Pop | self.disk = disk
|
2719 | a8083063 | Iustin Pop | |
2720 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
2721 | a8083063 | Iustin Pop | """Add the mirror component
|
2722 | a8083063 | Iustin Pop |
|
2723 | a8083063 | Iustin Pop | """
|
2724 | a8083063 | Iustin Pop | disk = self.disk
|
2725 | a8083063 | Iustin Pop | instance = self.instance
|
2726 | a8083063 | Iustin Pop | |
2727 | a8083063 | Iustin Pop | remote_node = self.remote_node
|
2728 | 72d6c464 | Michael Hanselmann | new_drbd = _GenerateMDDRBDBranch(self.cfg, self.cfg.GetVGName(), |
2729 | 72d6c464 | Michael Hanselmann | instance.primary_node, remote_node, |
2730 | 72d6c464 | Michael Hanselmann | disk.size, "%s-%s" %
|
2731 | a8083063 | Iustin Pop | (instance.name, self.op.disk_name))
|
2732 | a8083063 | Iustin Pop | |
2733 | a8083063 | Iustin Pop | logger.Info("adding new mirror component on secondary")
|
2734 | a8083063 | Iustin Pop | #HARDCODE
|
2735 | a8083063 | Iustin Pop | if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False): |
2736 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Failed to create new component on secondary" |
2737 | a8083063 | Iustin Pop | " node %s" % remote_node)
|
2738 | a8083063 | Iustin Pop | |
2739 | a8083063 | Iustin Pop | logger.Info("adding new mirror component on primary")
|
2740 | a8083063 | Iustin Pop | #HARDCODE
|
2741 | a8083063 | Iustin Pop | if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd): |
2742 | a8083063 | Iustin Pop | # remove secondary dev
|
2743 | a8083063 | Iustin Pop | self.cfg.SetDiskID(new_drbd, remote_node)
|
2744 | a8083063 | Iustin Pop | rpc.call_blockdev_remove(remote_node, new_drbd) |
2745 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Failed to create volume on primary") |
2746 | a8083063 | Iustin Pop | |
2747 | a8083063 | Iustin Pop | # the device exists now
|
2748 | a8083063 | Iustin Pop | # call the primary node to add the mirror to md
|
2749 | a8083063 | Iustin Pop | logger.Info("adding new mirror component to md")
|
2750 | a8083063 | Iustin Pop | if not rpc.call_blockdev_addchild(instance.primary_node, |
2751 | a8083063 | Iustin Pop | disk, new_drbd): |
2752 | a8083063 | Iustin Pop | logger.Error("Can't add mirror compoment to md!")
|
2753 | a8083063 | Iustin Pop | self.cfg.SetDiskID(new_drbd, remote_node)
|
2754 | a8083063 | Iustin Pop | if not rpc.call_blockdev_remove(remote_node, new_drbd): |
2755 | a8083063 | Iustin Pop | logger.Error("Can't rollback on secondary")
|
2756 | a8083063 | Iustin Pop | self.cfg.SetDiskID(new_drbd, instance.primary_node)
|
2757 | a8083063 | Iustin Pop | if not rpc.call_blockdev_remove(instance.primary_node, new_drbd): |
2758 | a8083063 | Iustin Pop | logger.Error("Can't rollback on primary")
|
2759 | a8083063 | Iustin Pop | raise errors.OpExecError, "Can't add mirror component to md array" |
2760 | a8083063 | Iustin Pop | |
2761 | a8083063 | Iustin Pop | disk.children.append(new_drbd) |
2762 | a8083063 | Iustin Pop | |
2763 | a8083063 | Iustin Pop | self.cfg.AddInstance(instance)
|
2764 | a8083063 | Iustin Pop | |
2765 | a8083063 | Iustin Pop | _WaitForSync(self.cfg, instance)
|
2766 | a8083063 | Iustin Pop | |
2767 | a8083063 | Iustin Pop | return 0 |
2768 | a8083063 | Iustin Pop | |
2769 | a8083063 | Iustin Pop | |
2770 | a8083063 | Iustin Pop | class LURemoveMDDRBDComponent(LogicalUnit): |
2771 | a8083063 | Iustin Pop | """Remove a component from a remote_raid1 disk.
|
2772 | a8083063 | Iustin Pop |
|
2773 | a8083063 | Iustin Pop | """
|
2774 | a8083063 | Iustin Pop | HPATH = "mirror-remove"
|
2775 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
2776 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name", "disk_name", "disk_id"] |
2777 | a8083063 | Iustin Pop | |
2778 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
2779 | a8083063 | Iustin Pop | """Build hooks env.
|
2780 | a8083063 | Iustin Pop |
|
2781 | a8083063 | Iustin Pop | This runs on the master, the primary and all the secondaries.
|
2782 | a8083063 | Iustin Pop |
|
2783 | a8083063 | Iustin Pop | """
|
2784 | a8083063 | Iustin Pop | env = { |
2785 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
2786 | a8083063 | Iustin Pop | "DISK_NAME": self.op.disk_name, |
2787 | a8083063 | Iustin Pop | "DISK_ID": self.op.disk_id, |
2788 | a8083063 | Iustin Pop | "OLD_SECONDARY": self.old_secondary, |
2789 | a8083063 | Iustin Pop | } |
2790 | a8083063 | Iustin Pop | nl = [self.cfg.GetMaster(),
|
2791 | a8083063 | Iustin Pop | self.instance.primary_node] + list(self.instance.secondary_nodes) |
2792 | a8083063 | Iustin Pop | return env, nl, nl
|
2793 | a8083063 | Iustin Pop | |
2794 | a8083063 | Iustin Pop | def CheckPrereq(self): |
2795 | a8083063 | Iustin Pop | """Check prerequisites.
|
2796 | a8083063 | Iustin Pop |
|
2797 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
2798 | a8083063 | Iustin Pop |
|
2799 | a8083063 | Iustin Pop | """
|
2800 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
2801 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
2802 | a8083063 | Iustin Pop | if instance is None: |
2803 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
2804 | a8083063 | Iustin Pop | self.op.instance_name)
|
2805 | a8083063 | Iustin Pop | self.instance = instance
|
2806 | a8083063 | Iustin Pop | |
2807 | a8083063 | Iustin Pop | if instance.disk_template != constants.DT_REMOTE_RAID1:
|
2808 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance's disk layout is not" |
2809 | a8083063 | Iustin Pop | " remote_raid1.")
|
2810 | a8083063 | Iustin Pop | for disk in instance.disks: |
2811 | a8083063 | Iustin Pop | if disk.iv_name == self.op.disk_name: |
2812 | a8083063 | Iustin Pop | break
|
2813 | a8083063 | Iustin Pop | else:
|
2814 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Can't find this device ('%s') in the" |
2815 | a8083063 | Iustin Pop | " instance." % self.op.disk_name) |
2816 | a8083063 | Iustin Pop | for child in disk.children: |
2817 | a8083063 | Iustin Pop | if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id: |
2818 | a8083063 | Iustin Pop | break
|
2819 | a8083063 | Iustin Pop | else:
|
2820 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Can't find the device with this port.") |
2821 | a8083063 | Iustin Pop | |
2822 | a8083063 | Iustin Pop | if len(disk.children) < 2: |
2823 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Cannot remove the last component from" |
2824 | a8083063 | Iustin Pop | " a mirror.")
|
2825 | a8083063 | Iustin Pop | self.disk = disk
|
2826 | a8083063 | Iustin Pop | self.child = child
|
2827 | a8083063 | Iustin Pop | if self.child.logical_id[0] == instance.primary_node: |
2828 | a8083063 | Iustin Pop | oid = 1
|
2829 | a8083063 | Iustin Pop | else:
|
2830 | a8083063 | Iustin Pop | oid = 0
|
2831 | a8083063 | Iustin Pop | self.old_secondary = self.child.logical_id[oid] |
2832 | a8083063 | Iustin Pop | |
2833 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
2834 | a8083063 | Iustin Pop | """Remove the mirror component
|
2835 | a8083063 | Iustin Pop |
|
2836 | a8083063 | Iustin Pop | """
|
2837 | a8083063 | Iustin Pop | instance = self.instance
|
2838 | a8083063 | Iustin Pop | disk = self.disk
|
2839 | a8083063 | Iustin Pop | child = self.child
|
2840 | a8083063 | Iustin Pop | logger.Info("remove mirror component")
|
2841 | a8083063 | Iustin Pop | self.cfg.SetDiskID(disk, instance.primary_node)
|
2842 | a8083063 | Iustin Pop | if not rpc.call_blockdev_removechild(instance.primary_node, |
2843 | a8083063 | Iustin Pop | disk, child): |
2844 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Can't remove child from mirror.") |
2845 | a8083063 | Iustin Pop | |
2846 | a8083063 | Iustin Pop | for node in child.logical_id[:2]: |
2847 | a8083063 | Iustin Pop | self.cfg.SetDiskID(child, node)
|
2848 | a8083063 | Iustin Pop | if not rpc.call_blockdev_remove(node, child): |
2849 | a8083063 | Iustin Pop | logger.Error("Warning: failed to remove device from node %s,"
|
2850 | a8083063 | Iustin Pop | " continuing operation." % node)
|
2851 | a8083063 | Iustin Pop | |
2852 | a8083063 | Iustin Pop | disk.children.remove(child) |
2853 | a8083063 | Iustin Pop | self.cfg.AddInstance(instance)
|
2854 | a8083063 | Iustin Pop | |
2855 | a8083063 | Iustin Pop | |
2856 | a8083063 | Iustin Pop | class LUReplaceDisks(LogicalUnit): |
2857 | a8083063 | Iustin Pop | """Replace the disks of an instance.
|
2858 | a8083063 | Iustin Pop |
|
2859 | a8083063 | Iustin Pop | """
|
2860 | a8083063 | Iustin Pop | HPATH = "mirrors-replace"
|
2861 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
2862 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name"]
|
2863 | a8083063 | Iustin Pop | |
2864 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
2865 | a8083063 | Iustin Pop | """Build hooks env.
|
2866 | a8083063 | Iustin Pop |
|
2867 | a8083063 | Iustin Pop | This runs on the master, the primary and all the secondaries.
|
2868 | a8083063 | Iustin Pop |
|
2869 | a8083063 | Iustin Pop | """
|
2870 | a8083063 | Iustin Pop | env = { |
2871 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
2872 | a8083063 | Iustin Pop | "NEW_SECONDARY": self.op.remote_node, |
2873 | a8083063 | Iustin Pop | "OLD_SECONDARY": self.instance.secondary_nodes[0], |
2874 | a8083063 | Iustin Pop | } |
2875 | a8083063 | Iustin Pop | nl = [self.cfg.GetMaster(),
|
2876 | a8083063 | Iustin Pop | self.instance.primary_node] + list(self.instance.secondary_nodes) |
2877 | a8083063 | Iustin Pop | return env, nl, nl
|
2878 | a8083063 | Iustin Pop | |
2879 | a8083063 | Iustin Pop | def CheckPrereq(self): |
2880 | a8083063 | Iustin Pop | """Check prerequisites.
|
2881 | a8083063 | Iustin Pop |
|
2882 | a8083063 | Iustin Pop | This checks that the instance is in the cluster.
|
2883 | a8083063 | Iustin Pop |
|
2884 | a8083063 | Iustin Pop | """
|
2885 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
2886 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
2887 | a8083063 | Iustin Pop | if instance is None: |
2888 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not known" % |
2889 | a8083063 | Iustin Pop | self.op.instance_name)
|
2890 | a8083063 | Iustin Pop | self.instance = instance
|
2891 | a8083063 | Iustin Pop | |
2892 | a8083063 | Iustin Pop | if instance.disk_template != constants.DT_REMOTE_RAID1:
|
2893 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance's disk layout is not" |
2894 | a8083063 | Iustin Pop | " remote_raid1.")
|
2895 | a8083063 | Iustin Pop | |
2896 | a8083063 | Iustin Pop | if len(instance.secondary_nodes) != 1: |
2897 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("The instance has a strange layout," |
2898 | a8083063 | Iustin Pop | " expected one secondary but found %d" %
|
2899 | a8083063 | Iustin Pop | len(instance.secondary_nodes))
|
2900 | a8083063 | Iustin Pop | |
2901 | a8083063 | Iustin Pop | remote_node = getattr(self.op, "remote_node", None) |
2902 | a8083063 | Iustin Pop | if remote_node is None: |
2903 | a8083063 | Iustin Pop | remote_node = instance.secondary_nodes[0]
|
2904 | a8083063 | Iustin Pop | else:
|
2905 | a8083063 | Iustin Pop | remote_node = self.cfg.ExpandNodeName(remote_node)
|
2906 | a8083063 | Iustin Pop | if remote_node is None: |
2907 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Node '%s' not known" % |
2908 | a8083063 | Iustin Pop | self.op.remote_node)
|
2909 | a8083063 | Iustin Pop | if remote_node == instance.primary_node:
|
2910 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("The specified node is the primary node of" |
2911 | a8083063 | Iustin Pop | " the instance.")
|
2912 | a8083063 | Iustin Pop | self.op.remote_node = remote_node
|
2913 | a8083063 | Iustin Pop | |
2914 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
2915 | a8083063 | Iustin Pop | """Replace the disks of an instance.
|
2916 | a8083063 | Iustin Pop |
|
2917 | a8083063 | Iustin Pop | """
|
2918 | a8083063 | Iustin Pop | instance = self.instance
|
2919 | a8083063 | Iustin Pop | iv_names = {} |
2920 | a8083063 | Iustin Pop | # start of work
|
2921 | a8083063 | Iustin Pop | remote_node = self.op.remote_node
|
2922 | a8083063 | Iustin Pop | cfg = self.cfg
|
2923 | a8083063 | Iustin Pop | for dev in instance.disks: |
2924 | a8083063 | Iustin Pop | size = dev.size |
2925 | 72d6c464 | Michael Hanselmann | new_drbd = _GenerateMDDRBDBranch(cfg, self.cfg.GetVGName(),
|
2926 | 72d6c464 | Michael Hanselmann | instance.primary_node, remote_node, size, |
2927 | a8083063 | Iustin Pop | "%s-%s" % (instance.name, dev.iv_name))
|
2928 | a8083063 | Iustin Pop | iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
|
2929 | a8083063 | Iustin Pop | logger.Info("adding new mirror component on secondary for %s" %
|
2930 | a8083063 | Iustin Pop | dev.iv_name) |
2931 | a8083063 | Iustin Pop | #HARDCODE
|
2932 | a8083063 | Iustin Pop | if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False): |
2933 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Failed to create new component on" |
2934 | a8083063 | Iustin Pop | " secondary node %s\n"
|
2935 | a8083063 | Iustin Pop | "Full abort, cleanup manually!" %
|
2936 | a8083063 | Iustin Pop | remote_node) |
2937 | a8083063 | Iustin Pop | |
2938 | a8083063 | Iustin Pop | logger.Info("adding new mirror component on primary")
|
2939 | a8083063 | Iustin Pop | #HARDCODE
|
2940 | a8083063 | Iustin Pop | if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd): |
2941 | a8083063 | Iustin Pop | # remove secondary dev
|
2942 | a8083063 | Iustin Pop | cfg.SetDiskID(new_drbd, remote_node) |
2943 | a8083063 | Iustin Pop | rpc.call_blockdev_remove(remote_node, new_drbd) |
2944 | a8083063 | Iustin Pop | raise errors.OpExecError("Failed to create volume on primary!\n" |
2945 | a8083063 | Iustin Pop | "Full abort, cleanup manually!!")
|
2946 | a8083063 | Iustin Pop | |
2947 | a8083063 | Iustin Pop | # the device exists now
|
2948 | a8083063 | Iustin Pop | # call the primary node to add the mirror to md
|
2949 | a8083063 | Iustin Pop | logger.Info("adding new mirror component to md")
|
2950 | a8083063 | Iustin Pop | if not rpc.call_blockdev_addchild(instance.primary_node, dev, |
2951 | a8083063 | Iustin Pop | new_drbd): |
2952 | a8083063 | Iustin Pop | logger.Error("Can't add mirror compoment to md!")
|
2953 | a8083063 | Iustin Pop | cfg.SetDiskID(new_drbd, remote_node) |
2954 | a8083063 | Iustin Pop | if not rpc.call_blockdev_remove(remote_node, new_drbd): |
2955 | a8083063 | Iustin Pop | logger.Error("Can't rollback on secondary")
|
2956 | a8083063 | Iustin Pop | cfg.SetDiskID(new_drbd, instance.primary_node) |
2957 | a8083063 | Iustin Pop | if not rpc.call_blockdev_remove(instance.primary_node, new_drbd): |
2958 | a8083063 | Iustin Pop | logger.Error("Can't rollback on primary")
|
2959 | a8083063 | Iustin Pop | raise errors.OpExecError, ("Full abort, cleanup manually!!") |
2960 | a8083063 | Iustin Pop | |
2961 | a8083063 | Iustin Pop | dev.children.append(new_drbd) |
2962 | a8083063 | Iustin Pop | cfg.AddInstance(instance) |
2963 | a8083063 | Iustin Pop | |
2964 | a8083063 | Iustin Pop | # this can fail as the old devices are degraded and _WaitForSync
|
2965 | a8083063 | Iustin Pop | # does a combined result over all disks, so we don't check its
|
2966 | a8083063 | Iustin Pop | # return value
|
2967 | a8083063 | Iustin Pop | _WaitForSync(cfg, instance, unlock=True)
|
2968 | a8083063 | Iustin Pop | |
2969 | a8083063 | Iustin Pop | # so check manually all the devices
|
2970 | a8083063 | Iustin Pop | for name in iv_names: |
2971 | a8083063 | Iustin Pop | dev, child, new_drbd = iv_names[name] |
2972 | a8083063 | Iustin Pop | cfg.SetDiskID(dev, instance.primary_node) |
2973 | a8083063 | Iustin Pop | is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
|
2974 | a8083063 | Iustin Pop | if is_degr:
|
2975 | a8083063 | Iustin Pop | raise errors.OpExecError, ("MD device %s is degraded!" % name) |
2976 | a8083063 | Iustin Pop | cfg.SetDiskID(new_drbd, instance.primary_node) |
2977 | a8083063 | Iustin Pop | is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
|
2978 | a8083063 | Iustin Pop | if is_degr:
|
2979 | a8083063 | Iustin Pop | raise errors.OpExecError, ("New drbd device %s is degraded!" % name) |
2980 | a8083063 | Iustin Pop | |
2981 | a8083063 | Iustin Pop | for name in iv_names: |
2982 | a8083063 | Iustin Pop | dev, child, new_drbd = iv_names[name] |
2983 | a8083063 | Iustin Pop | logger.Info("remove mirror %s component" % name)
|
2984 | a8083063 | Iustin Pop | cfg.SetDiskID(dev, instance.primary_node) |
2985 | a8083063 | Iustin Pop | if not rpc.call_blockdev_removechild(instance.primary_node, |
2986 | a8083063 | Iustin Pop | dev, child): |
2987 | a8083063 | Iustin Pop | logger.Error("Can't remove child from mirror, aborting"
|
2988 | a8083063 | Iustin Pop | " *this device cleanup*.\nYou need to cleanup manually!!")
|
2989 | a8083063 | Iustin Pop | continue
|
2990 | a8083063 | Iustin Pop | |
2991 | a8083063 | Iustin Pop | for node in child.logical_id[:2]: |
2992 | a8083063 | Iustin Pop | logger.Info("remove child device on %s" % node)
|
2993 | a8083063 | Iustin Pop | cfg.SetDiskID(child, node) |
2994 | a8083063 | Iustin Pop | if not rpc.call_blockdev_remove(node, child): |
2995 | a8083063 | Iustin Pop | logger.Error("Warning: failed to remove device from node %s,"
|
2996 | a8083063 | Iustin Pop | " continuing operation." % node)
|
2997 | a8083063 | Iustin Pop | |
2998 | a8083063 | Iustin Pop | dev.children.remove(child) |
2999 | a8083063 | Iustin Pop | |
3000 | a8083063 | Iustin Pop | cfg.AddInstance(instance) |
3001 | a8083063 | Iustin Pop | |
3002 | a8083063 | Iustin Pop | |
3003 | a8083063 | Iustin Pop | class LUQueryInstanceData(NoHooksLU): |
3004 | a8083063 | Iustin Pop | """Query runtime instance data.
|
3005 | a8083063 | Iustin Pop |
|
3006 | a8083063 | Iustin Pop | """
|
3007 | a8083063 | Iustin Pop | _OP_REQP = ["instances"]
|
3008 | a8083063 | Iustin Pop | |
3009 | a8083063 | Iustin Pop | def CheckPrereq(self): |
3010 | a8083063 | Iustin Pop | """Check prerequisites.
|
3011 | a8083063 | Iustin Pop |
|
3012 | a8083063 | Iustin Pop | This only checks the optional instance list against the existing names.
|
3013 | a8083063 | Iustin Pop |
|
3014 | a8083063 | Iustin Pop | """
|
3015 | a8083063 | Iustin Pop | if not isinstance(self.op.instances, list): |
3016 | a8083063 | Iustin Pop | raise errors.OpPrereqError, "Invalid argument type 'instances'" |
3017 | a8083063 | Iustin Pop | if self.op.instances: |
3018 | a8083063 | Iustin Pop | self.wanted_instances = []
|
3019 | a8083063 | Iustin Pop | names = self.op.instances
|
3020 | a8083063 | Iustin Pop | for name in names: |
3021 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name)) |
3022 | a8083063 | Iustin Pop | if instance is None: |
3023 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("No such instance name '%s'" % name) |
3024 | a8083063 | Iustin Pop | self.wanted_instances.append(instance)
|
3025 | a8083063 | Iustin Pop | else:
|
3026 | a8083063 | Iustin Pop | self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name |
3027 | a8083063 | Iustin Pop | in self.cfg.GetInstanceList()] |
3028 | a8083063 | Iustin Pop | return
|
3029 | a8083063 | Iustin Pop | |
3030 | a8083063 | Iustin Pop | |
3031 | a8083063 | Iustin Pop | def _ComputeDiskStatus(self, instance, snode, dev): |
3032 | a8083063 | Iustin Pop | """Compute block device status.
|
3033 | a8083063 | Iustin Pop |
|
3034 | a8083063 | Iustin Pop | """
|
3035 | a8083063 | Iustin Pop | self.cfg.SetDiskID(dev, instance.primary_node)
|
3036 | a8083063 | Iustin Pop | dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev) |
3037 | a8083063 | Iustin Pop | if dev.dev_type == "drbd": |
3038 | a8083063 | Iustin Pop | # we change the snode then (otherwise we use the one passed in)
|
3039 | a8083063 | Iustin Pop | if dev.logical_id[0] == instance.primary_node: |
3040 | a8083063 | Iustin Pop | snode = dev.logical_id[1]
|
3041 | a8083063 | Iustin Pop | else:
|
3042 | a8083063 | Iustin Pop | snode = dev.logical_id[0]
|
3043 | a8083063 | Iustin Pop | |
3044 | a8083063 | Iustin Pop | if snode:
|
3045 | a8083063 | Iustin Pop | self.cfg.SetDiskID(dev, snode)
|
3046 | a8083063 | Iustin Pop | dev_sstatus = rpc.call_blockdev_find(snode, dev) |
3047 | a8083063 | Iustin Pop | else:
|
3048 | a8083063 | Iustin Pop | dev_sstatus = None
|
3049 | a8083063 | Iustin Pop | |
3050 | a8083063 | Iustin Pop | if dev.children:
|
3051 | a8083063 | Iustin Pop | dev_children = [self._ComputeDiskStatus(instance, snode, child)
|
3052 | a8083063 | Iustin Pop | for child in dev.children] |
3053 | a8083063 | Iustin Pop | else:
|
3054 | a8083063 | Iustin Pop | dev_children = [] |
3055 | a8083063 | Iustin Pop | |
3056 | a8083063 | Iustin Pop | data = { |
3057 | a8083063 | Iustin Pop | "iv_name": dev.iv_name,
|
3058 | a8083063 | Iustin Pop | "dev_type": dev.dev_type,
|
3059 | a8083063 | Iustin Pop | "logical_id": dev.logical_id,
|
3060 | a8083063 | Iustin Pop | "physical_id": dev.physical_id,
|
3061 | a8083063 | Iustin Pop | "pstatus": dev_pstatus,
|
3062 | a8083063 | Iustin Pop | "sstatus": dev_sstatus,
|
3063 | a8083063 | Iustin Pop | "children": dev_children,
|
3064 | a8083063 | Iustin Pop | } |
3065 | a8083063 | Iustin Pop | |
3066 | a8083063 | Iustin Pop | return data
|
3067 | a8083063 | Iustin Pop | |
3068 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
3069 | a8083063 | Iustin Pop | """Gather and return data"""
|
3070 | a8083063 | Iustin Pop | |
3071 | a8083063 | Iustin Pop | result = {} |
3072 | a8083063 | Iustin Pop | for instance in self.wanted_instances: |
3073 | a8083063 | Iustin Pop | remote_info = rpc.call_instance_info(instance.primary_node, |
3074 | a8083063 | Iustin Pop | instance.name) |
3075 | a8083063 | Iustin Pop | if remote_info and "state" in remote_info: |
3076 | a8083063 | Iustin Pop | remote_state = "up"
|
3077 | a8083063 | Iustin Pop | else:
|
3078 | a8083063 | Iustin Pop | remote_state = "down"
|
3079 | a8083063 | Iustin Pop | if instance.status == "down": |
3080 | a8083063 | Iustin Pop | config_state = "down"
|
3081 | a8083063 | Iustin Pop | else:
|
3082 | a8083063 | Iustin Pop | config_state = "up"
|
3083 | a8083063 | Iustin Pop | |
3084 | a8083063 | Iustin Pop | disks = [self._ComputeDiskStatus(instance, None, device) |
3085 | a8083063 | Iustin Pop | for device in instance.disks] |
3086 | a8083063 | Iustin Pop | |
3087 | a8083063 | Iustin Pop | idict = { |
3088 | a8083063 | Iustin Pop | "name": instance.name,
|
3089 | a8083063 | Iustin Pop | "config_state": config_state,
|
3090 | a8083063 | Iustin Pop | "run_state": remote_state,
|
3091 | a8083063 | Iustin Pop | "pnode": instance.primary_node,
|
3092 | a8083063 | Iustin Pop | "snodes": instance.secondary_nodes,
|
3093 | a8083063 | Iustin Pop | "os": instance.os,
|
3094 | a8083063 | Iustin Pop | "memory": instance.memory,
|
3095 | a8083063 | Iustin Pop | "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics], |
3096 | a8083063 | Iustin Pop | "disks": disks,
|
3097 | a8083063 | Iustin Pop | } |
3098 | a8083063 | Iustin Pop | |
3099 | a8083063 | Iustin Pop | result[instance.name] = idict |
3100 | a8083063 | Iustin Pop | |
3101 | a8083063 | Iustin Pop | return result
|
3102 | a8083063 | Iustin Pop | |
3103 | a8083063 | Iustin Pop | |
3104 | a8083063 | Iustin Pop | class LUQueryNodeData(NoHooksLU): |
3105 | a8083063 | Iustin Pop | """Logical unit for querying node data.
|
3106 | a8083063 | Iustin Pop |
|
3107 | a8083063 | Iustin Pop | """
|
3108 | a8083063 | Iustin Pop | _OP_REQP = ["nodes"]
|
3109 | a8083063 | Iustin Pop | |
3110 | a8083063 | Iustin Pop | def CheckPrereq(self): |
3111 | a8083063 | Iustin Pop | """Check prerequisites.
|
3112 | a8083063 | Iustin Pop |
|
3113 | a8083063 | Iustin Pop | This only checks the optional node list against the existing names.
|
3114 | a8083063 | Iustin Pop |
|
3115 | a8083063 | Iustin Pop | """
|
3116 | a8083063 | Iustin Pop | if not isinstance(self.op.nodes, list): |
3117 | a8083063 | Iustin Pop | raise errors.OpPrereqError, "Invalid argument type 'nodes'" |
3118 | a8083063 | Iustin Pop | if self.op.nodes: |
3119 | a8083063 | Iustin Pop | self.wanted_nodes = []
|
3120 | a8083063 | Iustin Pop | names = self.op.nodes
|
3121 | a8083063 | Iustin Pop | for name in names: |
3122 | a8083063 | Iustin Pop | node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(name)) |
3123 | a8083063 | Iustin Pop | if node is None: |
3124 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("No such node name '%s'" % name) |
3125 | a8083063 | Iustin Pop | self.wanted_nodes.append(node)
|
3126 | a8083063 | Iustin Pop | else:
|
3127 | a8083063 | Iustin Pop | self.wanted_nodes = [self.cfg.GetNodeInfo(name) for name |
3128 | a8083063 | Iustin Pop | in self.cfg.GetNodeList()] |
3129 | a8083063 | Iustin Pop | return
|
3130 | a8083063 | Iustin Pop | |
3131 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
3132 | a8083063 | Iustin Pop | """Compute and return the list of nodes.
|
3133 | a8083063 | Iustin Pop |
|
3134 | a8083063 | Iustin Pop | """
|
3135 | a8083063 | Iustin Pop | |
3136 | a8083063 | Iustin Pop | ilist = [self.cfg.GetInstanceInfo(iname) for iname |
3137 | a8083063 | Iustin Pop | in self.cfg.GetInstanceList()] |
3138 | a8083063 | Iustin Pop | result = [] |
3139 | a8083063 | Iustin Pop | for node in self.wanted_nodes: |
3140 | a8083063 | Iustin Pop | result.append((node.name, node.primary_ip, node.secondary_ip, |
3141 | a8083063 | Iustin Pop | [inst.name for inst in ilist |
3142 | a8083063 | Iustin Pop | if inst.primary_node == node.name],
|
3143 | a8083063 | Iustin Pop | [inst.name for inst in ilist |
3144 | a8083063 | Iustin Pop | if node.name in inst.secondary_nodes], |
3145 | a8083063 | Iustin Pop | )) |
3146 | a8083063 | Iustin Pop | return result
|
3147 | a8083063 | Iustin Pop | |
3148 | a8083063 | Iustin Pop | |
3149 | a8083063 | Iustin Pop | class LUSetInstanceParms(LogicalUnit): |
3150 | a8083063 | Iustin Pop | """Modifies an instances's parameters.
|
3151 | a8083063 | Iustin Pop |
|
3152 | a8083063 | Iustin Pop | """
|
3153 | a8083063 | Iustin Pop | HPATH = "instance-modify"
|
3154 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
3155 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name"]
|
3156 | a8083063 | Iustin Pop | |
3157 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
3158 | a8083063 | Iustin Pop | """Build hooks env.
|
3159 | a8083063 | Iustin Pop |
|
3160 | a8083063 | Iustin Pop | This runs on the master, primary and secondaries.
|
3161 | a8083063 | Iustin Pop |
|
3162 | a8083063 | Iustin Pop | """
|
3163 | a8083063 | Iustin Pop | env = { |
3164 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
3165 | a8083063 | Iustin Pop | } |
3166 | a8083063 | Iustin Pop | if self.mem: |
3167 | a8083063 | Iustin Pop | env["MEM_SIZE"] = self.mem |
3168 | a8083063 | Iustin Pop | if self.vcpus: |
3169 | a8083063 | Iustin Pop | env["VCPUS"] = self.vcpus |
3170 | a8083063 | Iustin Pop | if self.do_ip: |
3171 | a8083063 | Iustin Pop | env["INSTANCE_IP"] = self.ip |
3172 | a8083063 | Iustin Pop | if self.bridge: |
3173 | a8083063 | Iustin Pop | env["BRIDGE"] = self.bridge |
3174 | a8083063 | Iustin Pop | |
3175 | a8083063 | Iustin Pop | nl = [self.cfg.GetMaster(),
|
3176 | a8083063 | Iustin Pop | self.instance.primary_node] + list(self.instance.secondary_nodes) |
3177 | a8083063 | Iustin Pop | |
3178 | a8083063 | Iustin Pop | return env, nl, nl
|
3179 | a8083063 | Iustin Pop | |
3180 | a8083063 | Iustin Pop | def CheckPrereq(self): |
3181 | a8083063 | Iustin Pop | """Check prerequisites.
|
3182 | a8083063 | Iustin Pop |
|
3183 | a8083063 | Iustin Pop | This only checks the instance list against the existing names.
|
3184 | a8083063 | Iustin Pop |
|
3185 | a8083063 | Iustin Pop | """
|
3186 | a8083063 | Iustin Pop | self.mem = getattr(self.op, "mem", None) |
3187 | a8083063 | Iustin Pop | self.vcpus = getattr(self.op, "vcpus", None) |
3188 | a8083063 | Iustin Pop | self.ip = getattr(self.op, "ip", None) |
3189 | a8083063 | Iustin Pop | self.bridge = getattr(self.op, "bridge", None) |
3190 | a8083063 | Iustin Pop | if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4: |
3191 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("No changes submitted") |
3192 | a8083063 | Iustin Pop | if self.mem is not None: |
3193 | a8083063 | Iustin Pop | try:
|
3194 | a8083063 | Iustin Pop | self.mem = int(self.mem) |
3195 | a8083063 | Iustin Pop | except ValueError, err: |
3196 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid memory size: %s" % str(err)) |
3197 | a8083063 | Iustin Pop | if self.vcpus is not None: |
3198 | a8083063 | Iustin Pop | try:
|
3199 | a8083063 | Iustin Pop | self.vcpus = int(self.vcpus) |
3200 | a8083063 | Iustin Pop | except ValueError, err: |
3201 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid vcpus number: %s" % str(err)) |
3202 | a8083063 | Iustin Pop | if self.ip is not None: |
3203 | a8083063 | Iustin Pop | self.do_ip = True |
3204 | a8083063 | Iustin Pop | if self.ip.lower() == "none": |
3205 | a8083063 | Iustin Pop | self.ip = None |
3206 | a8083063 | Iustin Pop | else:
|
3207 | a8083063 | Iustin Pop | if not utils.IsValidIP(self.ip): |
3208 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Invalid IP address '%s'." % self.ip) |
3209 | a8083063 | Iustin Pop | else:
|
3210 | a8083063 | Iustin Pop | self.do_ip = False |
3211 | a8083063 | Iustin Pop | |
3212 | a8083063 | Iustin Pop | instance = self.cfg.GetInstanceInfo(
|
3213 | a8083063 | Iustin Pop | self.cfg.ExpandInstanceName(self.op.instance_name)) |
3214 | a8083063 | Iustin Pop | if instance is None: |
3215 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("No such instance name '%s'" % |
3216 | a8083063 | Iustin Pop | self.op.instance_name)
|
3217 | a8083063 | Iustin Pop | self.op.instance_name = instance.name
|
3218 | a8083063 | Iustin Pop | self.instance = instance
|
3219 | a8083063 | Iustin Pop | return
|
3220 | a8083063 | Iustin Pop | |
3221 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
3222 | a8083063 | Iustin Pop | """Modifies an instance.
|
3223 | a8083063 | Iustin Pop |
|
3224 | a8083063 | Iustin Pop | All parameters take effect only at the next restart of the instance.
|
3225 | a8083063 | Iustin Pop | """
|
3226 | a8083063 | Iustin Pop | result = [] |
3227 | a8083063 | Iustin Pop | instance = self.instance
|
3228 | a8083063 | Iustin Pop | if self.mem: |
3229 | a8083063 | Iustin Pop | instance.memory = self.mem
|
3230 | a8083063 | Iustin Pop | result.append(("mem", self.mem)) |
3231 | a8083063 | Iustin Pop | if self.vcpus: |
3232 | a8083063 | Iustin Pop | instance.vcpus = self.vcpus
|
3233 | a8083063 | Iustin Pop | result.append(("vcpus", self.vcpus)) |
3234 | a8083063 | Iustin Pop | if self.do_ip: |
3235 | a8083063 | Iustin Pop | instance.nics[0].ip = self.ip |
3236 | a8083063 | Iustin Pop | result.append(("ip", self.ip)) |
3237 | a8083063 | Iustin Pop | if self.bridge: |
3238 | a8083063 | Iustin Pop | instance.nics[0].bridge = self.bridge |
3239 | a8083063 | Iustin Pop | result.append(("bridge", self.bridge)) |
3240 | a8083063 | Iustin Pop | |
3241 | a8083063 | Iustin Pop | self.cfg.AddInstance(instance)
|
3242 | a8083063 | Iustin Pop | |
3243 | a8083063 | Iustin Pop | return result
|
3244 | a8083063 | Iustin Pop | |
3245 | a8083063 | Iustin Pop | |
3246 | a8083063 | Iustin Pop | class LUQueryExports(NoHooksLU): |
3247 | a8083063 | Iustin Pop | """Query the exports list
|
3248 | a8083063 | Iustin Pop |
|
3249 | a8083063 | Iustin Pop | """
|
3250 | a8083063 | Iustin Pop | _OP_REQP = [] |
3251 | a8083063 | Iustin Pop | |
3252 | a8083063 | Iustin Pop | def CheckPrereq(self): |
3253 | a8083063 | Iustin Pop | """Check that the nodelist contains only existing nodes.
|
3254 | a8083063 | Iustin Pop |
|
3255 | a8083063 | Iustin Pop | """
|
3256 | a8083063 | Iustin Pop | nodes = getattr(self.op, "nodes", None) |
3257 | a8083063 | Iustin Pop | if not nodes: |
3258 | a8083063 | Iustin Pop | self.op.nodes = self.cfg.GetNodeList() |
3259 | a8083063 | Iustin Pop | else:
|
3260 | a8083063 | Iustin Pop | expnodes = [self.cfg.ExpandNodeName(node) for node in nodes] |
3261 | a8083063 | Iustin Pop | if expnodes.count(None) > 0: |
3262 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("At least one of the given nodes %s" |
3263 | a8083063 | Iustin Pop | " is unknown" % self.op.nodes) |
3264 | a8083063 | Iustin Pop | self.op.nodes = expnodes
|
3265 | a8083063 | Iustin Pop | |
3266 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
3267 | a8083063 | Iustin Pop | |
3268 | a8083063 | Iustin Pop | """Compute the list of all the exported system images.
|
3269 | a8083063 | Iustin Pop |
|
3270 | a8083063 | Iustin Pop | Returns:
|
3271 | a8083063 | Iustin Pop | a dictionary with the structure node->(export-list)
|
3272 | a8083063 | Iustin Pop | where export-list is a list of the instances exported on
|
3273 | a8083063 | Iustin Pop | that node.
|
3274 | a8083063 | Iustin Pop |
|
3275 | a8083063 | Iustin Pop | """
|
3276 | a8083063 | Iustin Pop | return rpc.call_export_list(self.op.nodes) |
3277 | a8083063 | Iustin Pop | |
3278 | a8083063 | Iustin Pop | |
3279 | a8083063 | Iustin Pop | class LUExportInstance(LogicalUnit): |
3280 | a8083063 | Iustin Pop | """Export an instance to an image in the cluster.
|
3281 | a8083063 | Iustin Pop |
|
3282 | a8083063 | Iustin Pop | """
|
3283 | a8083063 | Iustin Pop | HPATH = "instance-export"
|
3284 | a8083063 | Iustin Pop | HTYPE = constants.HTYPE_INSTANCE |
3285 | a8083063 | Iustin Pop | _OP_REQP = ["instance_name", "target_node", "shutdown"] |
3286 | a8083063 | Iustin Pop | |
3287 | a8083063 | Iustin Pop | def BuildHooksEnv(self): |
3288 | a8083063 | Iustin Pop | """Build hooks env.
|
3289 | a8083063 | Iustin Pop |
|
3290 | a8083063 | Iustin Pop | This will run on the master, primary node and target node.
|
3291 | a8083063 | Iustin Pop |
|
3292 | a8083063 | Iustin Pop | """
|
3293 | a8083063 | Iustin Pop | env = { |
3294 | a8083063 | Iustin Pop | "INSTANCE_NAME": self.op.instance_name, |
3295 | a8083063 | Iustin Pop | "EXPORT_NODE": self.op.target_node, |
3296 | a8083063 | Iustin Pop | "EXPORT_DO_SHUTDOWN": self.op.shutdown, |
3297 | a8083063 | Iustin Pop | } |
3298 | a8083063 | Iustin Pop | nl = [self.cfg.GetMaster(), self.instance.primary_node, |
3299 | a8083063 | Iustin Pop | self.op.target_node]
|
3300 | a8083063 | Iustin Pop | return env, nl, nl
|
3301 | a8083063 | Iustin Pop | |
3302 | a8083063 | Iustin Pop | def CheckPrereq(self): |
3303 | a8083063 | Iustin Pop | """Check prerequisites.
|
3304 | a8083063 | Iustin Pop |
|
3305 | a8083063 | Iustin Pop | This checks that the instance name is a valid one.
|
3306 | a8083063 | Iustin Pop |
|
3307 | a8083063 | Iustin Pop | """
|
3308 | a8083063 | Iustin Pop | instance_name = self.cfg.ExpandInstanceName(self.op.instance_name) |
3309 | a8083063 | Iustin Pop | self.instance = self.cfg.GetInstanceInfo(instance_name) |
3310 | a8083063 | Iustin Pop | if self.instance is None: |
3311 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Instance '%s' not found" % |
3312 | a8083063 | Iustin Pop | self.op.instance_name)
|
3313 | a8083063 | Iustin Pop | |
3314 | a8083063 | Iustin Pop | # node verification
|
3315 | a8083063 | Iustin Pop | dst_node_short = self.cfg.ExpandNodeName(self.op.target_node) |
3316 | a8083063 | Iustin Pop | self.dst_node = self.cfg.GetNodeInfo(dst_node_short) |
3317 | a8083063 | Iustin Pop | |
3318 | a8083063 | Iustin Pop | if self.dst_node is None: |
3319 | a8083063 | Iustin Pop | raise errors.OpPrereqError, ("Destination node '%s' is uknown." % |
3320 | a8083063 | Iustin Pop | self.op.target_node)
|
3321 | a8083063 | Iustin Pop | self.op.target_node = self.dst_node.name |
3322 | a8083063 | Iustin Pop | |
3323 | a8083063 | Iustin Pop | def Exec(self, feedback_fn): |
3324 | a8083063 | Iustin Pop | """Export an instance to an image in the cluster.
|
3325 | a8083063 | Iustin Pop |
|
3326 | a8083063 | Iustin Pop | """
|
3327 | a8083063 | Iustin Pop | instance = self.instance
|
3328 | a8083063 | Iustin Pop | dst_node = self.dst_node
|
3329 | a8083063 | Iustin Pop | src_node = instance.primary_node |
3330 | a8083063 | Iustin Pop | # shutdown the instance, unless requested not to do so
|
3331 | a8083063 | Iustin Pop | if self.op.shutdown: |
3332 | a8083063 | Iustin Pop | op = opcodes.OpShutdownInstance(instance_name=instance.name) |
3333 | a8083063 | Iustin Pop | self.processor.ChainOpCode(op, feedback_fn)
|
3334 | a8083063 | Iustin Pop | |
3335 | a8083063 | Iustin Pop | vgname = self.cfg.GetVGName()
|
3336 | a8083063 | Iustin Pop | |
3337 | a8083063 | Iustin Pop | snap_disks = [] |
3338 | a8083063 | Iustin Pop | |
3339 | a8083063 | Iustin Pop | try:
|
3340 | a8083063 | Iustin Pop | for disk in instance.disks: |
3341 | a8083063 | Iustin Pop | if disk.iv_name == "sda": |
3342 | a8083063 | Iustin Pop | # new_dev_name will be a snapshot of an lvm leaf of the one we passed
|
3343 | a8083063 | Iustin Pop | new_dev_name = rpc.call_blockdev_snapshot(src_node, disk) |
3344 | a8083063 | Iustin Pop | |
3345 | a8083063 | Iustin Pop | if not new_dev_name: |
3346 | a8083063 | Iustin Pop | logger.Error("could not snapshot block device %s on node %s" %
|
3347 | a8083063 | Iustin Pop | (disk.logical_id[1], src_node))
|
3348 | a8083063 | Iustin Pop | else:
|
3349 | a8083063 | Iustin Pop | new_dev = objects.Disk(dev_type="lvm", size=disk.size,
|
3350 | a8083063 | Iustin Pop | logical_id=(vgname, new_dev_name), |
3351 | a8083063 | Iustin Pop | physical_id=(vgname, new_dev_name), |
3352 | a8083063 | Iustin Pop | iv_name=disk.iv_name) |
3353 | a8083063 | Iustin Pop | snap_disks.append(new_dev) |
3354 | a8083063 | Iustin Pop | |
3355 | a8083063 | Iustin Pop | finally:
|
3356 | a8083063 | Iustin Pop | if self.op.shutdown: |
3357 | a8083063 | Iustin Pop | op = opcodes.OpStartupInstance(instance_name=instance.name, |
3358 | a8083063 | Iustin Pop | force=False)
|
3359 | a8083063 | Iustin Pop | self.processor.ChainOpCode(op, feedback_fn)
|
3360 | a8083063 | Iustin Pop | |
3361 | a8083063 | Iustin Pop | # TODO: check for size
|
3362 | a8083063 | Iustin Pop | |
3363 | a8083063 | Iustin Pop | for dev in snap_disks: |
3364 | a8083063 | Iustin Pop | if not rpc.call_snapshot_export(src_node, dev, dst_node.name, |
3365 | a8083063 | Iustin Pop | instance): |
3366 | a8083063 | Iustin Pop | logger.Error("could not export block device %s from node"
|
3367 | a8083063 | Iustin Pop | " %s to node %s" %
|
3368 | a8083063 | Iustin Pop | (dev.logical_id[1], src_node, dst_node.name))
|
3369 | a8083063 | Iustin Pop | if not rpc.call_blockdev_remove(src_node, dev): |
3370 | a8083063 | Iustin Pop | logger.Error("could not remove snapshot block device %s from"
|
3371 | a8083063 | Iustin Pop | " node %s" % (dev.logical_id[1], src_node)) |
3372 | a8083063 | Iustin Pop | |
3373 | a8083063 | Iustin Pop | if not rpc.call_finalize_export(dst_node.name, instance, snap_disks): |
3374 | a8083063 | Iustin Pop | logger.Error("could not finalize export for instance %s on node %s" %
|
3375 | a8083063 | Iustin Pop | (instance.name, dst_node.name)) |
3376 | a8083063 | Iustin Pop | |
3377 | a8083063 | Iustin Pop | nodelist = self.cfg.GetNodeList()
|
3378 | a8083063 | Iustin Pop | nodelist.remove(dst_node.name) |
3379 | a8083063 | Iustin Pop | |
3380 | a8083063 | Iustin Pop | # on one-node clusters nodelist will be empty after the removal
|
3381 | a8083063 | Iustin Pop | # if we proceed the backup would be removed because OpQueryExports
|
3382 | a8083063 | Iustin Pop | # substitutes an empty list with the full cluster node list.
|
3383 | a8083063 | Iustin Pop | if nodelist:
|
3384 | a8083063 | Iustin Pop | op = opcodes.OpQueryExports(nodes=nodelist) |
3385 | a8083063 | Iustin Pop | exportlist = self.processor.ChainOpCode(op, feedback_fn)
|
3386 | a8083063 | Iustin Pop | for node in exportlist: |
3387 | a8083063 | Iustin Pop | if instance.name in exportlist[node]: |
3388 | a8083063 | Iustin Pop | if not rpc.call_export_remove(node, instance.name): |
3389 | a8083063 | Iustin Pop | logger.Error("could not remove older export for instance %s"
|
3390 | a8083063 | Iustin Pop | " on node %s" % (instance.name, node)) |