1 |
|
#
|
2 |
|
#
|
3 |
|
|
4 |
|
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
|
#
|
6 |
|
# This program is free software; you can redistribute it and/or modify
|
7 |
|
# it under the terms of the GNU General Public License as published by
|
8 |
|
# the Free Software Foundation; either version 2 of the License, or
|
9 |
|
# (at your option) any later version.
|
10 |
|
#
|
11 |
|
# This program is distributed in the hope that it will be useful, but
|
12 |
|
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
|
# General Public License for more details.
|
15 |
|
#
|
16 |
|
# You should have received a copy of the GNU General Public License
|
17 |
|
# along with this program; if not, write to the Free Software
|
18 |
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
|
# 02110-1301, USA.
|
20 |
|
|
21 |
|
|
22 |
|
"""Module implementing the master-side code."""
|
23 |
|
|
24 |
|
# pylint: disable=W0201,C0302
|
25 |
|
|
26 |
|
# W0201 since most LU attributes are defined in CheckPrereq or similar
|
27 |
|
# functions
|
28 |
|
|
29 |
|
# C0302: since we have waaaay too many lines in this module
|
30 |
|
|
31 |
|
import os
|
32 |
|
import os.path
|
33 |
|
import time
|
34 |
|
import re
|
35 |
|
import logging
|
36 |
|
import copy
|
37 |
|
import OpenSSL
|
38 |
|
import socket
|
39 |
|
import tempfile
|
40 |
|
import shutil
|
41 |
|
import itertools
|
42 |
|
import operator
|
43 |
|
|
44 |
|
from ganeti import ssh
|
45 |
|
from ganeti import utils
|
46 |
|
from ganeti import errors
|
47 |
|
from ganeti import hypervisor
|
48 |
|
from ganeti import locking
|
49 |
|
from ganeti import constants
|
50 |
|
from ganeti import objects
|
51 |
|
from ganeti import ssconf
|
52 |
|
from ganeti import uidpool
|
53 |
|
from ganeti import compat
|
54 |
|
from ganeti import masterd
|
55 |
|
from ganeti import netutils
|
56 |
|
from ganeti import query
|
57 |
|
from ganeti import qlang
|
58 |
|
from ganeti import opcodes
|
59 |
|
from ganeti import ht
|
60 |
|
from ganeti import rpc
|
61 |
|
from ganeti import runtime
|
62 |
|
from ganeti import pathutils
|
63 |
|
from ganeti import vcluster
|
64 |
|
from ganeti import network
|
65 |
|
from ganeti.masterd import iallocator
|
66 |
|
|
67 |
|
import ganeti.masterd.instance # pylint: disable=W0611
|
68 |
|
|
69 |
|
|
70 |
|
# States of instance
|
71 |
|
INSTANCE_DOWN = [constants.ADMINST_DOWN]
|
72 |
|
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
|
73 |
|
INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
|
74 |
|
|
75 |
|
#: Instance status in which an instance can be marked as offline/online
|
76 |
|
CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
|
77 |
|
constants.ADMINST_OFFLINE,
|
78 |
|
]))
|
79 |
|
|
80 |
|
|
81 |
|
class ResultWithJobs:
|
82 |
|
"""Data container for LU results with jobs.
|
83 |
|
|
84 |
|
Instances of this class returned from L{LogicalUnit.Exec} will be recognized
|
85 |
|
by L{mcpu._ProcessResult}. The latter will then submit the jobs
|
86 |
|
contained in the C{jobs} attribute and include the job IDs in the opcode
|
87 |
|
result.
|
88 |
|
|
89 |
|
"""
|
90 |
|
def __init__(self, jobs, **kwargs):
|
91 |
|
"""Initializes this class.
|
92 |
|
|
93 |
|
Additional return values can be specified as keyword arguments.
|
94 |
|
|
95 |
|
@type jobs: list of lists of L{opcode.OpCode}
|
96 |
|
@param jobs: A list of lists of opcode objects
|
97 |
|
|
98 |
|
"""
|
99 |
|
self.jobs = jobs
|
100 |
|
self.other = kwargs
|
101 |
|
|
102 |
|
|
103 |
|
class LogicalUnit(object):
|
104 |
|
"""Logical Unit base class.
|
105 |
|
|
106 |
|
Subclasses must follow these rules:
|
107 |
|
- implement ExpandNames
|
108 |
|
- implement CheckPrereq (except when tasklets are used)
|
109 |
|
- implement Exec (except when tasklets are used)
|
110 |
|
- implement BuildHooksEnv
|
111 |
|
- implement BuildHooksNodes
|
112 |
|
- redefine HPATH and HTYPE
|
113 |
|
- optionally redefine their run requirements:
|
114 |
|
REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
|
115 |
|
|
116 |
|
Note that all commands require root permissions.
|
117 |
|
|
118 |
|
@ivar dry_run_result: the value (if any) that will be returned to the caller
|
119 |
|
in dry-run mode (signalled by opcode dry_run parameter)
|
120 |
|
|
121 |
|
"""
|
122 |
|
HPATH = None
|
123 |
|
HTYPE = None
|
124 |
|
REQ_BGL = True
|
125 |
|
|
126 |
|
def __init__(self, processor, op, context, rpc_runner):
|
127 |
|
"""Constructor for LogicalUnit.
|
128 |
|
|
129 |
|
This needs to be overridden in derived classes in order to check op
|
130 |
|
validity.
|
131 |
|
|
132 |
|
"""
|
133 |
|
self.proc = processor
|
134 |
|
self.op = op
|
135 |
|
self.cfg = context.cfg
|
136 |
|
self.glm = context.glm
|
137 |
|
# readability alias
|
138 |
|
self.owned_locks = context.glm.list_owned
|
139 |
|
self.context = context
|
140 |
|
self.rpc = rpc_runner
|
141 |
|
|
142 |
|
# Dictionaries used to declare locking needs to mcpu
|
143 |
|
self.needed_locks = None
|
144 |
|
self.share_locks = dict.fromkeys(locking.LEVELS, 0)
|
145 |
|
self.opportunistic_locks = dict.fromkeys(locking.LEVELS, False)
|
146 |
|
|
147 |
|
self.add_locks = {}
|
148 |
|
self.remove_locks = {}
|
149 |
|
|
150 |
|
# Used to force good behavior when calling helper functions
|
151 |
|
self.recalculate_locks = {}
|
152 |
|
|
153 |
|
# logging
|
154 |
|
self.Log = processor.Log # pylint: disable=C0103
|
155 |
|
self.LogWarning = processor.LogWarning # pylint: disable=C0103
|
156 |
|
self.LogInfo = processor.LogInfo # pylint: disable=C0103
|
157 |
|
self.LogStep = processor.LogStep # pylint: disable=C0103
|
158 |
|
# support for dry-run
|
159 |
|
self.dry_run_result = None
|
160 |
|
# support for generic debug attribute
|
161 |
|
if (not hasattr(self.op, "debug_level") or
|
162 |
|
not isinstance(self.op.debug_level, int)):
|
163 |
|
self.op.debug_level = 0
|
164 |
|
|
165 |
|
# Tasklets
|
166 |
|
self.tasklets = None
|
167 |
|
|
168 |
|
# Validate opcode parameters and set defaults
|
169 |
|
self.op.Validate(True)
|
170 |
|
|
171 |
|
self.CheckArguments()
|
172 |
|
|
173 |
|
def CheckArguments(self):
|
174 |
|
"""Check syntactic validity for the opcode arguments.
|
175 |
|
|
176 |
|
This method is for doing a simple syntactic check and ensure
|
177 |
|
validity of opcode parameters, without any cluster-related
|
178 |
|
checks. While the same can be accomplished in ExpandNames and/or
|
179 |
|
CheckPrereq, doing these separate is better because:
|
180 |
|
|
181 |
|
- ExpandNames is left as as purely a lock-related function
|
182 |
|
- CheckPrereq is run after we have acquired locks (and possible
|
183 |
|
waited for them)
|
184 |
|
|
185 |
|
The function is allowed to change the self.op attribute so that
|
186 |
|
later methods can no longer worry about missing parameters.
|
187 |
|
|
188 |
|
"""
|
189 |
|
pass
|
190 |
|
|
191 |
|
def ExpandNames(self):
|
192 |
|
"""Expand names for this LU.
|
193 |
|
|
194 |
|
This method is called before starting to execute the opcode, and it should
|
195 |
|
update all the parameters of the opcode to their canonical form (e.g. a
|
196 |
|
short node name must be fully expanded after this method has successfully
|
197 |
|
completed). This way locking, hooks, logging, etc. can work correctly.
|
198 |
|
|
199 |
|
LUs which implement this method must also populate the self.needed_locks
|
200 |
|
member, as a dict with lock levels as keys, and a list of needed lock names
|
201 |
|
as values. Rules:
|
202 |
|
|
203 |
|
- use an empty dict if you don't need any lock
|
204 |
|
- if you don't need any lock at a particular level omit that
|
205 |
|
level (note that in this case C{DeclareLocks} won't be called
|
206 |
|
at all for that level)
|
207 |
|
- if you need locks at a level, but you can't calculate it in
|
208 |
|
this function, initialise that level with an empty list and do
|
209 |
|
further processing in L{LogicalUnit.DeclareLocks} (see that
|
210 |
|
function's docstring)
|
211 |
|
- don't put anything for the BGL level
|
212 |
|
- if you want all locks at a level use L{locking.ALL_SET} as a value
|
213 |
|
|
214 |
|
If you need to share locks (rather than acquire them exclusively) at one
|
215 |
|
level you can modify self.share_locks, setting a true value (usually 1) for
|
216 |
|
that level. By default locks are not shared.
|
217 |
|
|
218 |
|
This function can also define a list of tasklets, which then will be
|
219 |
|
executed in order instead of the usual LU-level CheckPrereq and Exec
|
220 |
|
functions, if those are not defined by the LU.
|
221 |
|
|
222 |
|
Examples::
|
223 |
|
|
224 |
|
# Acquire all nodes and one instance
|
225 |
|
self.needed_locks = {
|
226 |
|
locking.LEVEL_NODE: locking.ALL_SET,
|
227 |
|
locking.LEVEL_INSTANCE: ['instance1.example.com'],
|
228 |
|
}
|
229 |
|
# Acquire just two nodes
|
230 |
|
self.needed_locks = {
|
231 |
|
locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
|
232 |
|
}
|
233 |
|
# Acquire no locks
|
234 |
|
self.needed_locks = {} # No, you can't leave it to the default value None
|
235 |
|
|
236 |
|
"""
|
237 |
|
# The implementation of this method is mandatory only if the new LU is
|
238 |
|
# concurrent, so that old LUs don't need to be changed all at the same
|
239 |
|
# time.
|
240 |
|
if self.REQ_BGL:
|
241 |
|
self.needed_locks = {} # Exclusive LUs don't need locks.
|
242 |
|
else:
|
243 |
|
raise NotImplementedError
|
244 |
|
|
245 |
|
def DeclareLocks(self, level):
|
246 |
|
"""Declare LU locking needs for a level
|
247 |
|
|
248 |
|
While most LUs can just declare their locking needs at ExpandNames time,
|
249 |
|
sometimes there's the need to calculate some locks after having acquired
|
250 |
|
the ones before. This function is called just before acquiring locks at a
|
251 |
|
particular level, but after acquiring the ones at lower levels, and permits
|
252 |
|
such calculations. It can be used to modify self.needed_locks, and by
|
253 |
|
default it does nothing.
|
254 |
|
|
255 |
|
This function is only called if you have something already set in
|
256 |
|
self.needed_locks for the level.
|
257 |
|
|
258 |
|
@param level: Locking level which is going to be locked
|
259 |
|
@type level: member of L{ganeti.locking.LEVELS}
|
260 |
|
|
261 |
|
"""
|
262 |
|
|
263 |
|
def CheckPrereq(self):
|
264 |
|
"""Check prerequisites for this LU.
|
265 |
|
|
266 |
|
This method should check that the prerequisites for the execution
|
267 |
|
of this LU are fulfilled. It can do internode communication, but
|
268 |
|
it should be idempotent - no cluster or system changes are
|
269 |
|
allowed.
|
270 |
|
|
271 |
|
The method should raise errors.OpPrereqError in case something is
|
272 |
|
not fulfilled. Its return value is ignored.
|
273 |
|
|
274 |
|
This method should also update all the parameters of the opcode to
|
275 |
|
their canonical form if it hasn't been done by ExpandNames before.
|
276 |
|
|
277 |
|
"""
|
278 |
|
if self.tasklets is not None:
|
279 |
|
for (idx, tl) in enumerate(self.tasklets):
|
280 |
|
logging.debug("Checking prerequisites for tasklet %s/%s",
|
281 |
|
idx + 1, len(self.tasklets))
|
282 |
|
tl.CheckPrereq()
|
283 |
|
else:
|
284 |
|
pass
|
285 |
|
|
286 |
|
def Exec(self, feedback_fn):
|
287 |
|
"""Execute the LU.
|
288 |
|
|
289 |
|
This method should implement the actual work. It should raise
|
290 |
|
errors.OpExecError for failures that are somewhat dealt with in
|
291 |
|
code, or expected.
|
292 |
|
|
293 |
|
"""
|
294 |
|
if self.tasklets is not None:
|
295 |
|
for (idx, tl) in enumerate(self.tasklets):
|
296 |
|
logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
|
297 |
|
tl.Exec(feedback_fn)
|
298 |
|
else:
|
299 |
|
raise NotImplementedError
|
300 |
|
|
301 |
|
def BuildHooksEnv(self):
|
302 |
|
"""Build hooks environment for this LU.
|
303 |
|
|
304 |
|
@rtype: dict
|
305 |
|
@return: Dictionary containing the environment that will be used for
|
306 |
|
running the hooks for this LU. The keys of the dict must not be prefixed
|
307 |
|
with "GANETI_"--that'll be added by the hooks runner. The hooks runner
|
308 |
|
will extend the environment with additional variables. If no environment
|
309 |
|
should be defined, an empty dictionary should be returned (not C{None}).
|
310 |
|
@note: If the C{HPATH} attribute of the LU class is C{None}, this function
|
311 |
|
will not be called.
|
312 |
|
|
313 |
|
"""
|
314 |
|
raise NotImplementedError
|
315 |
|
|
316 |
|
def BuildHooksNodes(self):
|
317 |
|
"""Build list of nodes to run LU's hooks.
|
318 |
|
|
319 |
|
@rtype: tuple; (list, list)
|
320 |
|
@return: Tuple containing a list of node names on which the hook
|
321 |
|
should run before the execution and a list of node names on which the
|
322 |
|
hook should run after the execution. No nodes should be returned as an
|
323 |
|
empty list (and not None).
|
324 |
|
@note: If the C{HPATH} attribute of the LU class is C{None}, this function
|
325 |
|
will not be called.
|
326 |
|
|
327 |
|
"""
|
328 |
|
raise NotImplementedError
|
329 |
|
|
330 |
|
def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
|
331 |
|
"""Notify the LU about the results of its hooks.
|
332 |
|
|
333 |
|
This method is called every time a hooks phase is executed, and notifies
|
334 |
|
the Logical Unit about the hooks' result. The LU can then use it to alter
|
335 |
|
its result based on the hooks. By default the method does nothing and the
|
336 |
|
previous result is passed back unchanged but any LU can define it if it
|
337 |
|
wants to use the local cluster hook-scripts somehow.
|
338 |
|
|
339 |
|
@param phase: one of L{constants.HOOKS_PHASE_POST} or
|
340 |
|
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
|
341 |
|
@param hook_results: the results of the multi-node hooks rpc call
|
342 |
|
@param feedback_fn: function used send feedback back to the caller
|
343 |
|
@param lu_result: the previous Exec result this LU had, or None
|
344 |
|
in the PRE phase
|
345 |
|
@return: the new Exec result, based on the previous result
|
346 |
|
and hook results
|
347 |
|
|
348 |
|
"""
|
349 |
|
# API must be kept, thus we ignore the unused argument and could
|
350 |
|
# be a function warnings
|
351 |
|
# pylint: disable=W0613,R0201
|
352 |
|
return lu_result
|
353 |
|
|
354 |
|
def _ExpandAndLockInstance(self):
|
355 |
|
"""Helper function to expand and lock an instance.
|
356 |
|
|
357 |
|
Many LUs that work on an instance take its name in self.op.instance_name
|
358 |
|
and need to expand it and then declare the expanded name for locking. This
|
359 |
|
function does it, and then updates self.op.instance_name to the expanded
|
360 |
|
name. It also initializes needed_locks as a dict, if this hasn't been done
|
361 |
|
before.
|
362 |
|
|
363 |
|
"""
|
364 |
|
if self.needed_locks is None:
|
365 |
|
self.needed_locks = {}
|
366 |
|
else:
|
367 |
|
assert locking.LEVEL_INSTANCE not in self.needed_locks, \
|
368 |
|
"_ExpandAndLockInstance called with instance-level locks set"
|
369 |
|
self.op.instance_name = _ExpandInstanceName(self.cfg,
|
370 |
|
self.op.instance_name)
|
371 |
|
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
|
372 |
|
|
373 |
|
def _LockInstancesNodes(self, primary_only=False,
|
374 |
|
level=locking.LEVEL_NODE):
|
375 |
|
"""Helper function to declare instances' nodes for locking.
|
376 |
|
|
377 |
|
This function should be called after locking one or more instances to lock
|
378 |
|
their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
|
379 |
|
with all primary or secondary nodes for instances already locked and
|
380 |
|
present in self.needed_locks[locking.LEVEL_INSTANCE].
|
381 |
|
|
382 |
|
It should be called from DeclareLocks, and for safety only works if
|
383 |
|
self.recalculate_locks[locking.LEVEL_NODE] is set.
|
384 |
|
|
385 |
|
In the future it may grow parameters to just lock some instance's nodes, or
|
386 |
|
to just lock primaries or secondary nodes, if needed.
|
387 |
|
|
388 |
|
If should be called in DeclareLocks in a way similar to::
|
389 |
|
|
390 |
|
if level == locking.LEVEL_NODE:
|
391 |
|
self._LockInstancesNodes()
|
392 |
|
|
393 |
|
@type primary_only: boolean
|
394 |
|
@param primary_only: only lock primary nodes of locked instances
|
395 |
|
@param level: Which lock level to use for locking nodes
|
396 |
|
|
397 |
|
"""
|
398 |
|
assert level in self.recalculate_locks, \
|
399 |
|
"_LockInstancesNodes helper function called with no nodes to recalculate"
|
400 |
|
|
401 |
|
# TODO: check if we're really been called with the instance locks held
|
402 |
|
|
403 |
|
# For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
|
404 |
|
# future we might want to have different behaviors depending on the value
|
405 |
|
# of self.recalculate_locks[locking.LEVEL_NODE]
|
406 |
|
wanted_nodes = []
|
407 |
|
locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
|
408 |
|
for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
|
409 |
|
wanted_nodes.append(instance.primary_node)
|
410 |
|
if not primary_only:
|
411 |
|
wanted_nodes.extend(instance.secondary_nodes)
|
412 |
|
|
413 |
|
if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
|
414 |
|
self.needed_locks[level] = wanted_nodes
|
415 |
|
elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
|
416 |
|
self.needed_locks[level].extend(wanted_nodes)
|
417 |
|
else:
|
418 |
|
raise errors.ProgrammerError("Unknown recalculation mode")
|
419 |
|
|
420 |
|
del self.recalculate_locks[level]
|
421 |
|
|
422 |
|
|
423 |
|
class NoHooksLU(LogicalUnit): # pylint: disable=W0223
|
424 |
|
"""Simple LU which runs no hooks.
|
425 |
|
|
426 |
|
This LU is intended as a parent for other LogicalUnits which will
|
427 |
|
run no hooks, in order to reduce duplicate code.
|
428 |
|
|
429 |
|
"""
|
430 |
|
HPATH = None
|
431 |
|
HTYPE = None
|
432 |
|
|
433 |
|
def BuildHooksEnv(self):
|
434 |
|
"""Empty BuildHooksEnv for NoHooksLu.
|
435 |
|
|
436 |
|
This just raises an error.
|
437 |
|
|
438 |
|
"""
|
439 |
|
raise AssertionError("BuildHooksEnv called for NoHooksLUs")
|
440 |
|
|
441 |
|
def BuildHooksNodes(self):
|
442 |
|
"""Empty BuildHooksNodes for NoHooksLU.
|
443 |
|
|
444 |
|
"""
|
445 |
|
raise AssertionError("BuildHooksNodes called for NoHooksLU")
|
446 |
|
|
447 |
|
|
448 |
|
class Tasklet:
|
449 |
|
"""Tasklet base class.
|
450 |
|
|
451 |
|
Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
|
452 |
|
they can mix legacy code with tasklets. Locking needs to be done in the LU,
|
453 |
|
tasklets know nothing about locks.
|
454 |
|
|
455 |
|
Subclasses must follow these rules:
|
456 |
|
- Implement CheckPrereq
|
457 |
|
- Implement Exec
|
458 |
|
|
459 |
|
"""
|
460 |
|
def __init__(self, lu):
|
461 |
|
self.lu = lu
|
462 |
|
|
463 |
|
# Shortcuts
|
464 |
|
self.cfg = lu.cfg
|
465 |
|
self.rpc = lu.rpc
|
466 |
|
|
467 |
|
def CheckPrereq(self):
|
468 |
|
"""Check prerequisites for this tasklets.
|
469 |
|
|
470 |
|
This method should check whether the prerequisites for the execution of
|
471 |
|
this tasklet are fulfilled. It can do internode communication, but it
|
472 |
|
should be idempotent - no cluster or system changes are allowed.
|
473 |
|
|
474 |
|
The method should raise errors.OpPrereqError in case something is not
|
475 |
|
fulfilled. Its return value is ignored.
|
476 |
|
|
477 |
|
This method should also update all parameters to their canonical form if it
|
478 |
|
hasn't been done before.
|
479 |
|
|
480 |
|
"""
|
481 |
|
pass
|
482 |
|
|
483 |
|
def Exec(self, feedback_fn):
|
484 |
|
"""Execute the tasklet.
|
485 |
|
|
486 |
|
This method should implement the actual work. It should raise
|
487 |
|
errors.OpExecError for failures that are somewhat dealt with in code, or
|
488 |
|
expected.
|
489 |
|
|
490 |
|
"""
|
491 |
|
raise NotImplementedError
|
492 |
|
|
493 |
|
|
494 |
|
class _QueryBase:
|
495 |
|
"""Base for query utility classes.
|
496 |
|
|
497 |
|
"""
|
498 |
|
#: Attribute holding field definitions
|
499 |
|
FIELDS = None
|
500 |
|
|
501 |
|
#: Field to sort by
|
502 |
|
SORT_FIELD = "name"
|
503 |
|
|
504 |
|
def __init__(self, qfilter, fields, use_locking):
|
505 |
|
"""Initializes this class.
|
506 |
|
|
507 |
|
"""
|
508 |
|
self.use_locking = use_locking
|
509 |
|
|
510 |
|
self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
|
511 |
|
namefield=self.SORT_FIELD)
|
512 |
|
self.requested_data = self.query.RequestedData()
|
513 |
|
self.names = self.query.RequestedNames()
|
514 |
|
|
515 |
|
# Sort only if no names were requested
|
516 |
|
self.sort_by_name = not self.names
|
517 |
|
|
518 |
|
self.do_locking = None
|
519 |
|
self.wanted = None
|
520 |
|
|
521 |
|
def _GetNames(self, lu, all_names, lock_level):
|
522 |
|
"""Helper function to determine names asked for in the query.
|
523 |
|
|
524 |
|
"""
|
525 |
|
if self.do_locking:
|
526 |
|
names = lu.owned_locks(lock_level)
|
527 |
|
else:
|
528 |
|
names = all_names
|
529 |
|
|
530 |
|
if self.wanted == locking.ALL_SET:
|
531 |
|
assert not self.names
|
532 |
|
# caller didn't specify names, so ordering is not important
|
533 |
|
return utils.NiceSort(names)
|
534 |
|
|
535 |
|
# caller specified names and we must keep the same order
|
536 |
|
assert self.names
|
537 |
|
assert not self.do_locking or lu.glm.is_owned(lock_level)
|
538 |
|
|
539 |
|
missing = set(self.wanted).difference(names)
|
540 |
|
if missing:
|
541 |
|
raise errors.OpExecError("Some items were removed before retrieving"
|
542 |
|
" their data: %s" % missing)
|
543 |
|
|
544 |
|
# Return expanded names
|
545 |
|
return self.wanted
|
546 |
|
|
547 |
|
def ExpandNames(self, lu):
|
548 |
|
"""Expand names for this query.
|
549 |
|
|
550 |
|
See L{LogicalUnit.ExpandNames}.
|
551 |
|
|
552 |
|
"""
|
553 |
|
raise NotImplementedError()
|
554 |
|
|
555 |
|
def DeclareLocks(self, lu, level):
|
556 |
|
"""Declare locks for this query.
|
557 |
|
|
558 |
|
See L{LogicalUnit.DeclareLocks}.
|
559 |
|
|
560 |
|
"""
|
561 |
|
raise NotImplementedError()
|
562 |
|
|
563 |
|
def _GetQueryData(self, lu):
|
564 |
|
"""Collects all data for this query.
|
565 |
|
|
566 |
|
@return: Query data object
|
567 |
|
|
568 |
|
"""
|
569 |
|
raise NotImplementedError()
|
570 |
|
|
571 |
|
def NewStyleQuery(self, lu):
|
572 |
|
"""Collect data and execute query.
|
573 |
|
|
574 |
|
"""
|
575 |
|
return query.GetQueryResponse(self.query, self._GetQueryData(lu),
|
576 |
|
sort_by_name=self.sort_by_name)
|
577 |
|
|
578 |
|
def OldStyleQuery(self, lu):
|
579 |
|
"""Collect data and execute query.
|
580 |
|
|
581 |
|
"""
|
582 |
|
return self.query.OldStyleQuery(self._GetQueryData(lu),
|
583 |
|
sort_by_name=self.sort_by_name)
|
584 |
|
|
585 |
|
|
586 |
|
def _ShareAll():
|
587 |
|
"""Returns a dict declaring all lock levels shared.
|
588 |
|
|
589 |
|
"""
|
590 |
|
return dict.fromkeys(locking.LEVELS, 1)
|
591 |
|
|
592 |
|
|
593 |
|
def _AnnotateDiskParams(instance, devs, cfg):
|
594 |
|
"""Little helper wrapper to the rpc annotation method.
|
595 |
|
|
596 |
|
@param instance: The instance object
|
597 |
|
@type devs: List of L{objects.Disk}
|
598 |
|
@param devs: The root devices (not any of its children!)
|
599 |
|
@param cfg: The config object
|
600 |
|
@returns The annotated disk copies
|
601 |
|
@see L{rpc.AnnotateDiskParams}
|
602 |
|
|
603 |
|
"""
|
604 |
|
return rpc.AnnotateDiskParams(instance.disk_template, devs,
|
605 |
|
cfg.GetInstanceDiskParams(instance))
|
606 |
|
|
607 |
|
|
608 |
|
def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
|
609 |
|
cur_group_uuid):
|
610 |
|
"""Checks if node groups for locked instances are still correct.
|
611 |
|
|
612 |
|
@type cfg: L{config.ConfigWriter}
|
613 |
|
@param cfg: Cluster configuration
|
614 |
|
@type instances: dict; string as key, L{objects.Instance} as value
|
615 |
|
@param instances: Dictionary, instance name as key, instance object as value
|
616 |
|
@type owned_groups: iterable of string
|
617 |
|
@param owned_groups: List of owned groups
|
618 |
|
@type owned_nodes: iterable of string
|
619 |
|
@param owned_nodes: List of owned nodes
|
620 |
|
@type cur_group_uuid: string or None
|
621 |
|
@param cur_group_uuid: Optional group UUID to check against instance's groups
|
622 |
|
|
623 |
|
"""
|
624 |
|
for (name, inst) in instances.items():
|
625 |
|
assert owned_nodes.issuperset(inst.all_nodes), \
|
626 |
|
"Instance %s's nodes changed while we kept the lock" % name
|
627 |
|
|
628 |
|
inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
|
629 |
|
|
630 |
|
assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
|
631 |
|
"Instance %s has no node in group %s" % (name, cur_group_uuid)
|
632 |
|
|
633 |
|
|
634 |
|
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
|
635 |
|
primary_only=False):
|
636 |
|
"""Checks if the owned node groups are still correct for an instance.
|
637 |
|
|
638 |
|
@type cfg: L{config.ConfigWriter}
|
639 |
|
@param cfg: The cluster configuration
|
640 |
|
@type instance_name: string
|
641 |
|
@param instance_name: Instance name
|
642 |
|
@type owned_groups: set or frozenset
|
643 |
|
@param owned_groups: List of currently owned node groups
|
644 |
|
@type primary_only: boolean
|
645 |
|
@param primary_only: Whether to check node groups for only the primary node
|
646 |
|
|
647 |
|
"""
|
648 |
|
inst_groups = cfg.GetInstanceNodeGroups(instance_name, primary_only)
|
649 |
|
|
650 |
|
if not owned_groups.issuperset(inst_groups):
|
651 |
|
raise errors.OpPrereqError("Instance %s's node groups changed since"
|
652 |
|
" locks were acquired, current groups are"
|
653 |
|
" are '%s', owning groups '%s'; retry the"
|
654 |
|
" operation" %
|
655 |
|
(instance_name,
|
656 |
|
utils.CommaJoin(inst_groups),
|
657 |
|
utils.CommaJoin(owned_groups)),
|
658 |
|
errors.ECODE_STATE)
|
659 |
|
|
660 |
|
return inst_groups
|
661 |
|
|
662 |
|
|
663 |
|
def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
|
664 |
|
"""Checks if the instances in a node group are still correct.
|
665 |
|
|
666 |
|
@type cfg: L{config.ConfigWriter}
|
667 |
|
@param cfg: The cluster configuration
|
668 |
|
@type group_uuid: string
|
669 |
|
@param group_uuid: Node group UUID
|
670 |
|
@type owned_instances: set or frozenset
|
671 |
|
@param owned_instances: List of currently owned instances
|
672 |
|
|
673 |
|
"""
|
674 |
|
wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
|
675 |
|
if owned_instances != wanted_instances:
|
676 |
|
raise errors.OpPrereqError("Instances in node group '%s' changed since"
|
677 |
|
" locks were acquired, wanted '%s', have '%s';"
|
678 |
|
" retry the operation" %
|
679 |
|
(group_uuid,
|
680 |
|
utils.CommaJoin(wanted_instances),
|
681 |
|
utils.CommaJoin(owned_instances)),
|
682 |
|
errors.ECODE_STATE)
|
683 |
|
|
684 |
|
return wanted_instances
|
685 |
|
|
686 |
|
|
687 |
|
def _SupportsOob(cfg, node):
|
688 |
|
"""Tells if node supports OOB.
|
689 |
|
|
690 |
|
@type cfg: L{config.ConfigWriter}
|
691 |
|
@param cfg: The cluster configuration
|
692 |
|
@type node: L{objects.Node}
|
693 |
|
@param node: The node
|
694 |
|
@return: The OOB script if supported or an empty string otherwise
|
695 |
|
|
696 |
|
"""
|
697 |
|
return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
|
698 |
|
|
699 |
|
|
700 |
|
def _IsExclusiveStorageEnabledNode(cfg, node):
|
701 |
|
"""Whether exclusive_storage is in effect for the given node.
|
702 |
|
|
703 |
|
@type cfg: L{config.ConfigWriter}
|
704 |
|
@param cfg: The cluster configuration
|
705 |
|
@type node: L{objects.Node}
|
706 |
|
@param node: The node
|
707 |
|
@rtype: bool
|
708 |
|
@return: The effective value of exclusive_storage
|
709 |
|
|
710 |
|
"""
|
711 |
|
return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
|
712 |
|
|
713 |
|
|
714 |
|
def _IsExclusiveStorageEnabledNodeName(cfg, nodename):
|
715 |
|
"""Whether exclusive_storage is in effect for the given node.
|
716 |
|
|
717 |
|
@type cfg: L{config.ConfigWriter}
|
718 |
|
@param cfg: The cluster configuration
|
719 |
|
@type nodename: string
|
720 |
|
@param nodename: The node
|
721 |
|
@rtype: bool
|
722 |
|
@return: The effective value of exclusive_storage
|
723 |
|
@raise errors.OpPrereqError: if no node exists with the given name
|
724 |
|
|
725 |
|
"""
|
726 |
|
ni = cfg.GetNodeInfo(nodename)
|
727 |
|
if ni is None:
|
728 |
|
raise errors.OpPrereqError("Invalid node name %s" % nodename,
|
729 |
|
errors.ECODE_NOENT)
|
730 |
|
return _IsExclusiveStorageEnabledNode(cfg, ni)
|
731 |
|
|
732 |
|
|
733 |
|
def _CopyLockList(names):
|
734 |
|
"""Makes a copy of a list of lock names.
|
735 |
|
|
736 |
|
Handles L{locking.ALL_SET} correctly.
|
737 |
|
|
738 |
|
"""
|
739 |
|
if names == locking.ALL_SET:
|
740 |
|
return locking.ALL_SET
|
741 |
|
else:
|
742 |
|
return names[:]
|
743 |
|
|
744 |
|
|
745 |
|
def _GetWantedNodes(lu, nodes):
|
746 |
|
"""Returns list of checked and expanded node names.
|
747 |
|
|
748 |
|
@type lu: L{LogicalUnit}
|
749 |
|
@param lu: the logical unit on whose behalf we execute
|
750 |
|
@type nodes: list
|
751 |
|
@param nodes: list of node names or None for all nodes
|
752 |
|
@rtype: list
|
753 |
|
@return: the list of nodes, sorted
|
754 |
|
@raise errors.ProgrammerError: if the nodes parameter is wrong type
|
755 |
|
|
756 |
|
"""
|
757 |
|
if nodes:
|
758 |
|
return [_ExpandNodeName(lu.cfg, name) for name in nodes]
|
759 |
|
|
760 |
|
return utils.NiceSort(lu.cfg.GetNodeList())
|
761 |
|
|
762 |
|
|
763 |
|
def _GetWantedInstances(lu, instances):
|
764 |
|
"""Returns list of checked and expanded instance names.
|
765 |
|
|
766 |
|
@type lu: L{LogicalUnit}
|
767 |
|
@param lu: the logical unit on whose behalf we execute
|
768 |
|
@type instances: list
|
769 |
|
@param instances: list of instance names or None for all instances
|
770 |
|
@rtype: list
|
771 |
|
@return: the list of instances, sorted
|
772 |
|
@raise errors.OpPrereqError: if the instances parameter is wrong type
|
773 |
|
@raise errors.OpPrereqError: if any of the passed instances is not found
|
774 |
|
|
775 |
|
"""
|
776 |
|
if instances:
|
777 |
|
wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
|
778 |
|
else:
|
779 |
|
wanted = utils.NiceSort(lu.cfg.GetInstanceList())
|
780 |
|
return wanted
|
781 |
|
|
782 |
|
|
783 |
|
def _GetUpdatedParams(old_params, update_dict,
|
784 |
|
use_default=True, use_none=False):
|
785 |
|
"""Return the new version of a parameter dictionary.
|
786 |
|
|
787 |
|
@type old_params: dict
|
788 |
|
@param old_params: old parameters
|
789 |
|
@type update_dict: dict
|
790 |
|
@param update_dict: dict containing new parameter values, or
|
791 |
|
constants.VALUE_DEFAULT to reset the parameter to its default
|
792 |
|
value
|
793 |
|
@param use_default: boolean
|
794 |
|
@type use_default: whether to recognise L{constants.VALUE_DEFAULT}
|
795 |
|
values as 'to be deleted' values
|
796 |
|
@param use_none: boolean
|
797 |
|
@type use_none: whether to recognise C{None} values as 'to be
|
798 |
|
deleted' values
|
799 |
|
@rtype: dict
|
800 |
|
@return: the new parameter dictionary
|
801 |
|
|
802 |
|
"""
|
803 |
|
params_copy = copy.deepcopy(old_params)
|
804 |
|
for key, val in update_dict.iteritems():
|
805 |
|
if ((use_default and val == constants.VALUE_DEFAULT) or
|
806 |
|
(use_none and val is None)):
|
807 |
|
try:
|
808 |
|
del params_copy[key]
|
809 |
|
except KeyError:
|
810 |
|
pass
|
811 |
|
else:
|
812 |
|
params_copy[key] = val
|
813 |
|
return params_copy
|
814 |
|
|
815 |
|
|
816 |
|
def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
|
817 |
|
"""Return the new version of an instance policy.
|
818 |
|
|
819 |
|
@param group_policy: whether this policy applies to a group and thus
|
820 |
|
we should support removal of policy entries
|
821 |
|
|
822 |
|
"""
|
823 |
|
ipolicy = copy.deepcopy(old_ipolicy)
|
824 |
|
for key, value in new_ipolicy.items():
|
825 |
|
if key not in constants.IPOLICY_ALL_KEYS:
|
826 |
|
raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
|
827 |
|
errors.ECODE_INVAL)
|
828 |
|
if (not value or value == [constants.VALUE_DEFAULT] or
|
829 |
|
value == constants.VALUE_DEFAULT):
|
830 |
|
if group_policy:
|
831 |
|
if key in ipolicy:
|
832 |
|
del ipolicy[key]
|
833 |
|
else:
|
834 |
|
raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
|
835 |
|
" on the cluster'" % key,
|
836 |
|
errors.ECODE_INVAL)
|
837 |
|
else:
|
838 |
|
if key in constants.IPOLICY_PARAMETERS:
|
839 |
|
# FIXME: we assume all such values are float
|
840 |
|
try:
|
841 |
|
ipolicy[key] = float(value)
|
842 |
|
except (TypeError, ValueError), err:
|
843 |
|
raise errors.OpPrereqError("Invalid value for attribute"
|
844 |
|
" '%s': '%s', error: %s" %
|
845 |
|
(key, value, err), errors.ECODE_INVAL)
|
846 |
|
elif key == constants.ISPECS_MINMAX:
|
847 |
|
for minmax in value:
|
848 |
|
for k in minmax.keys():
|
849 |
|
utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
|
850 |
|
ipolicy[key] = value
|
851 |
|
elif key == constants.ISPECS_STD:
|
852 |
|
if group_policy:
|
853 |
|
msg = "%s cannot appear in group instance specs" % key
|
854 |
|
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
855 |
|
ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
|
856 |
|
use_none=False, use_default=False)
|
857 |
|
utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
|
858 |
|
else:
|
859 |
|
# FIXME: we assume all others are lists; this should be redone
|
860 |
|
# in a nicer way
|
861 |
|
ipolicy[key] = list(value)
|
862 |
|
try:
|
863 |
|
objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
|
864 |
|
except errors.ConfigurationError, err:
|
865 |
|
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
|
866 |
|
errors.ECODE_INVAL)
|
867 |
|
return ipolicy
|
868 |
|
|
869 |
|
|
870 |
|
def _UpdateAndVerifySubDict(base, updates, type_check):
|
871 |
|
"""Updates and verifies a dict with sub dicts of the same type.
|
872 |
|
|
873 |
|
@param base: The dict with the old data
|
874 |
|
@param updates: The dict with the new data
|
875 |
|
@param type_check: Dict suitable to ForceDictType to verify correct types
|
876 |
|
@returns: A new dict with updated and verified values
|
877 |
|
|
878 |
|
"""
|
879 |
|
def fn(old, value):
|
880 |
|
new = _GetUpdatedParams(old, value)
|
881 |
|
utils.ForceDictType(new, type_check)
|
882 |
|
return new
|
883 |
|
|
884 |
|
ret = copy.deepcopy(base)
|
885 |
|
ret.update(dict((key, fn(base.get(key, {}), value))
|
886 |
|
for key, value in updates.items()))
|
887 |
|
return ret
|
888 |
|
|
889 |
|
|
890 |
|
def _MergeAndVerifyHvState(op_input, obj_input):
|
891 |
|
"""Combines the hv state from an opcode with the one of the object
|
892 |
|
|
893 |
|
@param op_input: The input dict from the opcode
|
894 |
|
@param obj_input: The input dict from the objects
|
895 |
|
@return: The verified and updated dict
|
896 |
|
|
897 |
|
"""
|
898 |
|
if op_input:
|
899 |
|
invalid_hvs = set(op_input) - constants.HYPER_TYPES
|
900 |
|
if invalid_hvs:
|
901 |
|
raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
|
902 |
|
" %s" % utils.CommaJoin(invalid_hvs),
|
903 |
|
errors.ECODE_INVAL)
|
904 |
|
if obj_input is None:
|
905 |
|
obj_input = {}
|
906 |
|
type_check = constants.HVSTS_PARAMETER_TYPES
|
907 |
|
return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
|
908 |
|
|
909 |
|
return None
|
910 |
|
|
911 |
|
|
912 |
|
def _MergeAndVerifyDiskState(op_input, obj_input):
|
913 |
|
"""Combines the disk state from an opcode with the one of the object
|
914 |
|
|
915 |
|
@param op_input: The input dict from the opcode
|
916 |
|
@param obj_input: The input dict from the objects
|
917 |
|
@return: The verified and updated dict
|
918 |
|
"""
|
919 |
|
if op_input:
|
920 |
|
invalid_dst = set(op_input) - constants.DS_VALID_TYPES
|
921 |
|
if invalid_dst:
|
922 |
|
raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
|
923 |
|
utils.CommaJoin(invalid_dst),
|
924 |
|
errors.ECODE_INVAL)
|
925 |
|
type_check = constants.DSS_PARAMETER_TYPES
|
926 |
|
if obj_input is None:
|
927 |
|
obj_input = {}
|
928 |
|
return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
|
929 |
|
type_check))
|
930 |
|
for key, value in op_input.items())
|
931 |
|
|
932 |
|
return None
|
933 |
|
|
934 |
|
|
935 |
|
def _ReleaseLocks(lu, level, names=None, keep=None):
|
936 |
|
"""Releases locks owned by an LU.
|
937 |
|
|
938 |
|
@type lu: L{LogicalUnit}
|
939 |
|
@param level: Lock level
|
940 |
|
@type names: list or None
|
941 |
|
@param names: Names of locks to release
|
942 |
|
@type keep: list or None
|
943 |
|
@param keep: Names of locks to retain
|
944 |
|
|
945 |
|
"""
|
946 |
|
assert not (keep is not None and names is not None), \
|
947 |
|
"Only one of the 'names' and the 'keep' parameters can be given"
|
948 |
|
|
949 |
|
if names is not None:
|
950 |
|
should_release = names.__contains__
|
951 |
|
elif keep:
|
952 |
|
should_release = lambda name: name not in keep
|
953 |
|
else:
|
954 |
|
should_release = None
|
955 |
|
|
956 |
|
owned = lu.owned_locks(level)
|
957 |
|
if not owned:
|
958 |
|
# Not owning any lock at this level, do nothing
|
959 |
|
pass
|
960 |
|
|
961 |
|
elif should_release:
|
962 |
|
retain = []
|
963 |
|
release = []
|
964 |
|
|
965 |
|
# Determine which locks to release
|
966 |
|
for name in owned:
|
967 |
|
if should_release(name):
|
968 |
|
release.append(name)
|
969 |
|
else:
|
970 |
|
retain.append(name)
|
971 |
|
|
972 |
|
assert len(lu.owned_locks(level)) == (len(retain) + len(release))
|
973 |
|
|
974 |
|
# Release just some locks
|
975 |
|
lu.glm.release(level, names=release)
|
976 |
|
|
977 |
|
assert frozenset(lu.owned_locks(level)) == frozenset(retain)
|
978 |
|
else:
|
979 |
|
# Release everything
|
980 |
|
lu.glm.release(level)
|
981 |
|
|
982 |
|
assert not lu.glm.is_owned(level), "No locks should be owned"
|
983 |
|
|
984 |
|
|
985 |
|
def _MapInstanceDisksToNodes(instances):
|
986 |
|
"""Creates a map from (node, volume) to instance name.
|
987 |
|
|
988 |
|
@type instances: list of L{objects.Instance}
|
989 |
|
@rtype: dict; tuple of (node name, volume name) as key, instance name as value
|
990 |
|
|
991 |
|
"""
|
992 |
|
return dict(((node, vol), inst.name)
|
993 |
|
for inst in instances
|
994 |
|
for (node, vols) in inst.MapLVsByNode().items()
|
995 |
|
for vol in vols)
|
996 |
|
|
997 |
|
|
998 |
|
def _RunPostHook(lu, node_name):
|
999 |
|
"""Runs the post-hook for an opcode on a single node.
|
1000 |
|
|
1001 |
|
"""
|
1002 |
|
hm = lu.proc.BuildHooksManager(lu)
|
1003 |
|
try:
|
1004 |
|
hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
|
1005 |
|
except Exception, err: # pylint: disable=W0703
|
1006 |
|
lu.LogWarning("Errors occurred running hooks on %s: %s",
|
1007 |
|
node_name, err)
|
1008 |
|
|
1009 |
|
|
1010 |
|
def _CheckOutputFields(static, dynamic, selected):
|
1011 |
|
"""Checks whether all selected fields are valid.
|
1012 |
|
|
1013 |
|
@type static: L{utils.FieldSet}
|
1014 |
|
@param static: static fields set
|
1015 |
|
@type dynamic: L{utils.FieldSet}
|
1016 |
|
@param dynamic: dynamic fields set
|
1017 |
|
|
1018 |
|
"""
|
1019 |
|
f = utils.FieldSet()
|
1020 |
|
f.Extend(static)
|
1021 |
|
f.Extend(dynamic)
|
1022 |
|
|
1023 |
|
delta = f.NonMatching(selected)
|
1024 |
|
if delta:
|
1025 |
|
raise errors.OpPrereqError("Unknown output fields selected: %s"
|
1026 |
|
% ",".join(delta), errors.ECODE_INVAL)
|
1027 |
|
|
1028 |
|
|
1029 |
|
def _CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
|
1030 |
|
"""Make sure that none of the given paramters is global.
|
1031 |
|
|
1032 |
|
If a global parameter is found, an L{errors.OpPrereqError} exception is
|
1033 |
|
raised. This is used to avoid setting global parameters for individual nodes.
|
1034 |
|
|
1035 |
|
@type params: dictionary
|
1036 |
|
@param params: Parameters to check
|
1037 |
|
@type glob_pars: dictionary
|
1038 |
|
@param glob_pars: Forbidden parameters
|
1039 |
|
@type kind: string
|
1040 |
|
@param kind: Kind of parameters (e.g. "node")
|
1041 |
|
@type bad_levels: string
|
1042 |
|
@param bad_levels: Level(s) at which the parameters are forbidden (e.g.
|
1043 |
|
"instance")
|
1044 |
|
@type good_levels: strings
|
1045 |
|
@param good_levels: Level(s) at which the parameters are allowed (e.g.
|
1046 |
|
"cluster or group")
|
1047 |
|
|
1048 |
|
"""
|
1049 |
|
used_globals = glob_pars.intersection(params)
|
1050 |
|
if used_globals:
|
1051 |
|
msg = ("The following %s parameters are global and cannot"
|
1052 |
|
" be customized at %s level, please modify them at"
|
1053 |
|
" %s level: %s" %
|
1054 |
|
(kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
|
1055 |
|
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
1056 |
|
|
1057 |
|
|
1058 |
|
def _CheckNodeOnline(lu, node, msg=None):
|
1059 |
|
"""Ensure that a given node is online.
|
1060 |
|
|
1061 |
|
@param lu: the LU on behalf of which we make the check
|
1062 |
|
@param node: the node to check
|
1063 |
|
@param msg: if passed, should be a message to replace the default one
|
1064 |
|
@raise errors.OpPrereqError: if the node is offline
|
1065 |
|
|
1066 |
|
"""
|
1067 |
|
if msg is None:
|
1068 |
|
msg = "Can't use offline node"
|
1069 |
|
if lu.cfg.GetNodeInfo(node).offline:
|
1070 |
|
raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
|
1071 |
|
|
1072 |
|
|
1073 |
|
def _CheckNodeNotDrained(lu, node):
|
1074 |
|
"""Ensure that a given node is not drained.
|
1075 |
|
|
1076 |
|
@param lu: the LU on behalf of which we make the check
|
1077 |
|
@param node: the node to check
|
1078 |
|
@raise errors.OpPrereqError: if the node is drained
|
1079 |
|
|
1080 |
|
"""
|
1081 |
|
if lu.cfg.GetNodeInfo(node).drained:
|
1082 |
|
raise errors.OpPrereqError("Can't use drained node %s" % node,
|
1083 |
|
errors.ECODE_STATE)
|
1084 |
|
|
1085 |
|
|
1086 |
|
def _CheckNodeVmCapable(lu, node):
|
1087 |
|
"""Ensure that a given node is vm capable.
|
1088 |
|
|
1089 |
|
@param lu: the LU on behalf of which we make the check
|
1090 |
|
@param node: the node to check
|
1091 |
|
@raise errors.OpPrereqError: if the node is not vm capable
|
1092 |
|
|
1093 |
|
"""
|
1094 |
|
if not lu.cfg.GetNodeInfo(node).vm_capable:
|
1095 |
|
raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
|
1096 |
|
errors.ECODE_STATE)
|
1097 |
|
|
1098 |
|
|
1099 |
|
def _CheckNodeHasOS(lu, node, os_name, force_variant):
|
1100 |
|
"""Ensure that a node supports a given OS.
|
1101 |
|
|
1102 |
|
@param lu: the LU on behalf of which we make the check
|
1103 |
|
@param node: the node to check
|
1104 |
|
@param os_name: the OS to query about
|
1105 |
|
@param force_variant: whether to ignore variant errors
|
1106 |
|
@raise errors.OpPrereqError: if the node is not supporting the OS
|
1107 |
|
|
1108 |
|
"""
|
1109 |
|
result = lu.rpc.call_os_get(node, os_name)
|
1110 |
|
result.Raise("OS '%s' not in supported OS list for node %s" %
|
1111 |
|
(os_name, node),
|
1112 |
|
prereq=True, ecode=errors.ECODE_INVAL)
|
1113 |
|
if not force_variant:
|
1114 |
|
_CheckOSVariant(result.payload, os_name)
|
1115 |
|
|
1116 |
|
|
1117 |
|
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
|
1118 |
|
"""Ensure that a node has the given secondary ip.
|
1119 |
|
|
1120 |
|
@type lu: L{LogicalUnit}
|
1121 |
|
@param lu: the LU on behalf of which we make the check
|
1122 |
|
@type node: string
|
1123 |
|
@param node: the node to check
|
1124 |
|
@type secondary_ip: string
|
1125 |
|
@param secondary_ip: the ip to check
|
1126 |
|
@type prereq: boolean
|
1127 |
|
@param prereq: whether to throw a prerequisite or an execute error
|
1128 |
|
@raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
|
1129 |
|
@raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
|
1130 |
|
|
1131 |
|
"""
|
1132 |
|
result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
|
1133 |
|
result.Raise("Failure checking secondary ip on node %s" % node,
|
1134 |
|
prereq=prereq, ecode=errors.ECODE_ENVIRON)
|
1135 |
|
if not result.payload:
|
1136 |
|
msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
|
1137 |
|
" please fix and re-run this command" % secondary_ip)
|
1138 |
|
if prereq:
|
1139 |
|
raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
|
1140 |
|
else:
|
1141 |
|
raise errors.OpExecError(msg)
|
1142 |
|
|
1143 |
|
|
1144 |
|
def _CheckNodePVs(nresult, exclusive_storage):
|
1145 |
|
"""Check node PVs.
|
1146 |
|
|
1147 |
|
"""
|
1148 |
|
pvlist_dict = nresult.get(constants.NV_PVLIST, None)
|
1149 |
|
if pvlist_dict is None:
|
1150 |
|
return (["Can't get PV list from node"], None)
|
1151 |
|
pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
|
1152 |
|
errlist = []
|
1153 |
|
# check that ':' is not present in PV names, since it's a
|
1154 |
|
# special character for lvcreate (denotes the range of PEs to
|
1155 |
|
# use on the PV)
|
1156 |
|
for pv in pvlist:
|
1157 |
|
if ":" in pv.name:
|
1158 |
|
errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
|
1159 |
|
(pv.name, pv.vg_name))
|
1160 |
|
es_pvinfo = None
|
1161 |
|
if exclusive_storage:
|
1162 |
|
(errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
|
1163 |
|
errlist.extend(errmsgs)
|
1164 |
|
shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
|
1165 |
|
if shared_pvs:
|
1166 |
|
for (pvname, lvlist) in shared_pvs:
|
1167 |
|
# TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
|
1168 |
|
errlist.append("PV %s is shared among unrelated LVs (%s)" %
|
1169 |
|
(pvname, utils.CommaJoin(lvlist)))
|
1170 |
|
return (errlist, es_pvinfo)
|
1171 |
|
|
1172 |
|
|
1173 |
|
def _GetClusterDomainSecret():
|
1174 |
|
"""Reads the cluster domain secret.
|
1175 |
|
|
1176 |
|
"""
|
1177 |
|
return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
|
1178 |
|
strict=True)
|
1179 |
|
|
1180 |
|
|
1181 |
|
def _CheckInstanceState(lu, instance, req_states, msg=None):
|
1182 |
|
"""Ensure that an instance is in one of the required states.
|
1183 |
|
|
1184 |
|
@param lu: the LU on behalf of which we make the check
|
1185 |
|
@param instance: the instance to check
|
1186 |
|
@param msg: if passed, should be a message to replace the default one
|
1187 |
|
@raise errors.OpPrereqError: if the instance is not in the required state
|
1188 |
|
|
1189 |
|
"""
|
1190 |
|
if msg is None:
|
1191 |
|
msg = ("can't use instance from outside %s states" %
|
1192 |
|
utils.CommaJoin(req_states))
|
1193 |
|
if instance.admin_state not in req_states:
|
1194 |
|
raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
|
1195 |
|
(instance.name, instance.admin_state, msg),
|
1196 |
|
errors.ECODE_STATE)
|
1197 |
|
|
1198 |
|
if constants.ADMINST_UP not in req_states:
|
1199 |
|
pnode = instance.primary_node
|
1200 |
|
if not lu.cfg.GetNodeInfo(pnode).offline:
|
1201 |
|
ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
|
1202 |
|
ins_l.Raise("Can't contact node %s for instance information" % pnode,
|
1203 |
|
prereq=True, ecode=errors.ECODE_ENVIRON)
|
1204 |
|
if instance.name in ins_l.payload:
|
1205 |
|
raise errors.OpPrereqError("Instance %s is running, %s" %
|
1206 |
|
(instance.name, msg), errors.ECODE_STATE)
|
1207 |
|
else:
|
1208 |
|
lu.LogWarning("Primary node offline, ignoring check that instance"
|
1209 |
|
" is down")
|
1210 |
|
|
1211 |
|
|
1212 |
|
def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
|
1213 |
|
"""Computes if value is in the desired range.
|
1214 |
|
|
1215 |
|
@param name: name of the parameter for which we perform the check
|
1216 |
|
@param qualifier: a qualifier used in the error message (e.g. 'disk/1',
|
1217 |
|
not just 'disk')
|
1218 |
|
@param ispecs: dictionary containing min and max values
|
1219 |
|
@param value: actual value that we want to use
|
1220 |
|
@return: None or an error string
|
1221 |
|
|
1222 |
|
"""
|
1223 |
|
if value in [None, constants.VALUE_AUTO]:
|
1224 |
|
return None
|
1225 |
|
max_v = ispecs[constants.ISPECS_MAX].get(name, value)
|
1226 |
|
min_v = ispecs[constants.ISPECS_MIN].get(name, value)
|
1227 |
|
if value > max_v or min_v > value:
|
1228 |
|
if qualifier:
|
1229 |
|
fqn = "%s/%s" % (name, qualifier)
|
1230 |
|
else:
|
1231 |
|
fqn = name
|
1232 |
|
return ("%s value %s is not in range [%s, %s]" %
|
1233 |
|
(fqn, value, min_v, max_v))
|
1234 |
|
return None
|
1235 |
|
|
1236 |
|
|
1237 |
|
def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
|
1238 |
|
nic_count, disk_sizes, spindle_use,
|
1239 |
|
disk_template,
|
1240 |
|
_compute_fn=_ComputeMinMaxSpec):
|
1241 |
|
"""Verifies ipolicy against provided specs.
|
1242 |
|
|
1243 |
|
@type ipolicy: dict
|
1244 |
|
@param ipolicy: The ipolicy
|
1245 |
|
@type mem_size: int
|
1246 |
|
@param mem_size: The memory size
|
1247 |
|
@type cpu_count: int
|
1248 |
|
@param cpu_count: Used cpu cores
|
1249 |
|
@type disk_count: int
|
1250 |
|
@param disk_count: Number of disks used
|
1251 |
|
@type nic_count: int
|
1252 |
|
@param nic_count: Number of nics used
|
1253 |
|
@type disk_sizes: list of ints
|
1254 |
|
@param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
|
1255 |
|
@type spindle_use: int
|
1256 |
|
@param spindle_use: The number of spindles this instance uses
|
1257 |
|
@type disk_template: string
|
1258 |
|
@param disk_template: The disk template of the instance
|
1259 |
|
@param _compute_fn: The compute function (unittest only)
|
1260 |
|
@return: A list of violations, or an empty list of no violations are found
|
1261 |
|
|
1262 |
|
"""
|
1263 |
|
assert disk_count == len(disk_sizes)
|
1264 |
|
|
1265 |
|
test_settings = [
|
1266 |
|
(constants.ISPEC_MEM_SIZE, "", mem_size),
|
1267 |
|
(constants.ISPEC_CPU_COUNT, "", cpu_count),
|
1268 |
|
(constants.ISPEC_NIC_COUNT, "", nic_count),
|
1269 |
|
(constants.ISPEC_SPINDLE_USE, "", spindle_use),
|
1270 |
|
] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
|
1271 |
|
for idx, d in enumerate(disk_sizes)]
|
1272 |
|
if disk_template != constants.DT_DISKLESS:
|
1273 |
|
# This check doesn't make sense for diskless instances
|
1274 |
|
test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
|
1275 |
|
ret = []
|
1276 |
|
allowed_dts = ipolicy[constants.IPOLICY_DTS]
|
1277 |
|
if disk_template not in allowed_dts:
|
1278 |
|
ret.append("Disk template %s is not allowed (allowed templates: %s)" %
|
1279 |
|
(disk_template, utils.CommaJoin(allowed_dts)))
|
1280 |
|
|
1281 |
|
min_errs = None
|
1282 |
|
for minmax in ipolicy[constants.ISPECS_MINMAX]:
|
1283 |
|
errs = filter(None,
|
1284 |
|
(_compute_fn(name, qualifier, minmax, value)
|
1285 |
|
for (name, qualifier, value) in test_settings))
|
1286 |
|
if min_errs is None or len(errs) < len(min_errs):
|
1287 |
|
min_errs = errs
|
1288 |
|
assert min_errs is not None
|
1289 |
|
return ret + min_errs
|
1290 |
|
|
1291 |
|
|
1292 |
|
def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
|
1293 |
|
_compute_fn=_ComputeIPolicySpecViolation):
|
1294 |
|
"""Compute if instance meets the specs of ipolicy.
|
1295 |
|
|
1296 |
|
@type ipolicy: dict
|
1297 |
|
@param ipolicy: The ipolicy to verify against
|
1298 |
|
@type instance: L{objects.Instance}
|
1299 |
|
@param instance: The instance to verify
|
1300 |
|
@type cfg: L{config.ConfigWriter}
|
1301 |
|
@param cfg: Cluster configuration
|
1302 |
|
@param _compute_fn: The function to verify ipolicy (unittest only)
|
1303 |
|
@see: L{_ComputeIPolicySpecViolation}
|
1304 |
|
|
1305 |
|
"""
|
1306 |
|
be_full = cfg.GetClusterInfo().FillBE(instance)
|
1307 |
|
mem_size = be_full[constants.BE_MAXMEM]
|
1308 |
|
cpu_count = be_full[constants.BE_VCPUS]
|
1309 |
|
spindle_use = be_full[constants.BE_SPINDLE_USE]
|
1310 |
|
disk_count = len(instance.disks)
|
1311 |
|
disk_sizes = [disk.size for disk in instance.disks]
|
1312 |
|
nic_count = len(instance.nics)
|
1313 |
|
disk_template = instance.disk_template
|
1314 |
|
|
1315 |
|
return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
|
1316 |
|
disk_sizes, spindle_use, disk_template)
|
1317 |
|
|
1318 |
|
|
1319 |
|
def _ComputeIPolicyInstanceSpecViolation(
|
1320 |
|
ipolicy, instance_spec, disk_template,
|
1321 |
|
_compute_fn=_ComputeIPolicySpecViolation):
|
1322 |
|
"""Compute if instance specs meets the specs of ipolicy.
|
1323 |
|
|
1324 |
|
@type ipolicy: dict
|
1325 |
|
@param ipolicy: The ipolicy to verify against
|
1326 |
|
@param instance_spec: dict
|
1327 |
|
@param instance_spec: The instance spec to verify
|
1328 |
|
@type disk_template: string
|
1329 |
|
@param disk_template: the disk template of the instance
|
1330 |
|
@param _compute_fn: The function to verify ipolicy (unittest only)
|
1331 |
|
@see: L{_ComputeIPolicySpecViolation}
|
1332 |
|
|
1333 |
|
"""
|
1334 |
|
mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
|
1335 |
|
cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
|
1336 |
|
disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
|
1337 |
|
disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
|
1338 |
|
nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
|
1339 |
|
spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
|
1340 |
|
|
1341 |
|
return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
|
1342 |
|
disk_sizes, spindle_use, disk_template)
|
1343 |
|
|
1344 |
|
|
1345 |
|
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
|
1346 |
|
target_group, cfg,
|
1347 |
|
_compute_fn=_ComputeIPolicyInstanceViolation):
|
1348 |
|
"""Compute if instance meets the specs of the new target group.
|
1349 |
|
|
1350 |
|
@param ipolicy: The ipolicy to verify
|
1351 |
|
@param instance: The instance object to verify
|
1352 |
|
@param current_group: The current group of the instance
|
1353 |
|
@param target_group: The new group of the instance
|
1354 |
|
@type cfg: L{config.ConfigWriter}
|
1355 |
|
@param cfg: Cluster configuration
|
1356 |
|
@param _compute_fn: The function to verify ipolicy (unittest only)
|
1357 |
|
@see: L{_ComputeIPolicySpecViolation}
|
1358 |
|
|
1359 |
|
"""
|
1360 |
|
if current_group == target_group:
|
1361 |
|
return []
|
1362 |
|
else:
|
1363 |
|
return _compute_fn(ipolicy, instance, cfg)
|
1364 |
|
|
1365 |
|
|
1366 |
|
def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
|
1367 |
|
_compute_fn=_ComputeIPolicyNodeViolation):
|
1368 |
|
"""Checks that the target node is correct in terms of instance policy.
|
1369 |
|
|
1370 |
|
@param ipolicy: The ipolicy to verify
|
1371 |
|
@param instance: The instance object to verify
|
1372 |
|
@param node: The new node to relocate
|
1373 |
|
@type cfg: L{config.ConfigWriter}
|
1374 |
|
@param cfg: Cluster configuration
|
1375 |
|
@param ignore: Ignore violations of the ipolicy
|
1376 |
|
@param _compute_fn: The function to verify ipolicy (unittest only)
|
1377 |
|
@see: L{_ComputeIPolicySpecViolation}
|
1378 |
|
|
1379 |
|
"""
|
1380 |
|
primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
|
1381 |
|
res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
|
1382 |
|
|
1383 |
|
if res:
|
1384 |
|
msg = ("Instance does not meet target node group's (%s) instance"
|
1385 |
|
" policy: %s") % (node.group, utils.CommaJoin(res))
|
1386 |
|
if ignore:
|
1387 |
|
lu.LogWarning(msg)
|
1388 |
|
else:
|
1389 |
|
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
|
1390 |
|
|
1391 |
|
|
1392 |
|
def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
|
1393 |
|
"""Computes a set of any instances that would violate the new ipolicy.
|
1394 |
|
|
1395 |
|
@param old_ipolicy: The current (still in-place) ipolicy
|
1396 |
|
@param new_ipolicy: The new (to become) ipolicy
|
1397 |
|
@param instances: List of instances to verify
|
1398 |
|
@type cfg: L{config.ConfigWriter}
|
1399 |
|
@param cfg: Cluster configuration
|
1400 |
|
@return: A list of instances which violates the new ipolicy but
|
1401 |
|
did not before
|
1402 |
|
|
1403 |
|
"""
|
1404 |
|
return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
|
1405 |
|
_ComputeViolatingInstances(old_ipolicy, instances, cfg))
|
1406 |
|
|
1407 |
|
|
1408 |
|
def _ExpandItemName(fn, name, kind):
|
1409 |
|
"""Expand an item name.
|
1410 |
|
|
1411 |
|
@param fn: the function to use for expansion
|
1412 |
|
@param name: requested item name
|
1413 |
|
@param kind: text description ('Node' or 'Instance')
|
1414 |
|
@return: the resolved (full) name
|
1415 |
|
@raise errors.OpPrereqError: if the item is not found
|
1416 |
|
|
1417 |
|
"""
|
1418 |
|
full_name = fn(name)
|
1419 |
|
if full_name is None:
|
1420 |
|
raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
|
1421 |
|
errors.ECODE_NOENT)
|
1422 |
|
return full_name
|
1423 |
|
|
1424 |
|
|
1425 |
|
def _ExpandNodeName(cfg, name):
|
1426 |
|
"""Wrapper over L{_ExpandItemName} for nodes."""
|
1427 |
|
return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
|
1428 |
|
|
1429 |
|
|
1430 |
|
def _ExpandInstanceName(cfg, name):
|
1431 |
|
"""Wrapper over L{_ExpandItemName} for instance."""
|
1432 |
|
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
|
1433 |
|
|
1434 |
|
|
1435 |
|
def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
|
1436 |
|
mac_prefix, tags):
|