root / lib / cmdlib / __init__.py @ 1be6b00e
History | View | Annotate | Download (16.8 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Module implementing the master-side code."""
|
23 |
|
24 |
# pylint: disable=W0201,C0302
|
25 |
|
26 |
# W0201 since most LU attributes are defined in CheckPrereq or similar
|
27 |
# functions
|
28 |
|
29 |
# C0302: since we have waaaay too many lines in this module
|
30 |
|
31 |
import time |
32 |
import logging |
33 |
|
34 |
from ganeti import utils |
35 |
from ganeti import errors |
36 |
from ganeti import locking |
37 |
from ganeti import constants |
38 |
from ganeti import compat |
39 |
from ganeti import query |
40 |
from ganeti import qlang |
41 |
|
42 |
from ganeti.cmdlib.base import ResultWithJobs, LogicalUnit, NoHooksLU, \ |
43 |
Tasklet, _QueryBase |
44 |
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_ONLINE, \ |
45 |
INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, \ |
46 |
_ExpandInstanceName, _ExpandItemName, \ |
47 |
_ExpandNodeName, _ShareAll, _CheckNodeGroupInstances, _GetWantedNodes, \ |
48 |
_GetWantedInstances, _RunPostHook, _RedistributeAncillaryFiles, \ |
49 |
_MergeAndVerifyHvState, _MergeAndVerifyDiskState, _GetUpdatedIPolicy, \ |
50 |
_ComputeNewInstanceViolations, _GetUpdatedParams, _CheckOSParams, \ |
51 |
_CheckHVParams, _AdjustCandidatePool, _CheckNodePVs, \ |
52 |
_ComputeIPolicyInstanceViolation, _AnnotateDiskParams, _SupportsOob, \ |
53 |
_ComputeIPolicySpecViolation, _GetDefaultIAllocator, \ |
54 |
_CheckInstancesNodeGroups, _LoadNodeEvacResult, _MapInstanceDisksToNodes, \ |
55 |
_CheckInstanceNodeGroups, _CheckParamsNotGlobal, \ |
56 |
_IsExclusiveStorageEnabledNode, _CheckInstanceState, \ |
57 |
_CheckIAllocatorOrNode, _FindFaultyInstanceDisks, _CheckNodeOnline |
58 |
from ganeti.cmdlib.instance_utils import _AssembleInstanceDisks, \ |
59 |
_BuildInstanceHookEnvByObject, _GetClusterDomainSecret, \ |
60 |
_CheckNodeNotDrained, _RemoveDisks, _ShutdownInstanceDisks, \ |
61 |
_StartInstanceDisks, _RemoveInstance |
62 |
|
63 |
from ganeti.cmdlib.cluster import LUClusterActivateMasterIp, \ |
64 |
LUClusterDeactivateMasterIp, LUClusterConfigQuery, LUClusterDestroy, \ |
65 |
LUClusterPostInit, LUClusterQuery, LUClusterRedistConf, LUClusterRename, \ |
66 |
LUClusterRepairDiskSizes, LUClusterSetParams, LUClusterVerify, \ |
67 |
LUClusterVerifyConfig, LUClusterVerifyGroup, LUClusterVerifyDisks |
68 |
from ganeti.cmdlib.group import LUGroupAdd, LUGroupAssignNodes, \ |
69 |
LUGroupQuery, LUGroupSetParams, LUGroupRemove, LUGroupRename, \ |
70 |
LUGroupEvacuate, LUGroupVerifyDisks |
71 |
from ganeti.cmdlib.node import LUNodeAdd, LUNodeSetParams, \ |
72 |
LUNodePowercycle, LUNodeEvacuate, LUNodeMigrate, LUNodeModifyStorage, \ |
73 |
LUNodeQuery, LUNodeQueryvols, LUNodeQueryStorage, LUNodeRemove, \ |
74 |
LURepairNodeStorage
|
75 |
from ganeti.cmdlib.instance import LUInstanceCreate, LUInstanceRename, \ |
76 |
LUInstanceRemove, LUInstanceMove, LUInstanceQuery, LUInstanceQueryData, \ |
77 |
LUInstanceRecreateDisks, LUInstanceGrowDisk, LUInstanceReplaceDisks, \ |
78 |
LUInstanceActivateDisks, LUInstanceDeactivateDisks, LUInstanceStartup, \ |
79 |
LUInstanceShutdown, LUInstanceReinstall, LUInstanceReboot, \ |
80 |
LUInstanceConsole, LUInstanceFailover, LUInstanceMigrate, \ |
81 |
LUInstanceMultiAlloc, LUInstanceSetParams, LUInstanceChangeGroup |
82 |
from ganeti.cmdlib.backup import LUBackupQuery, LUBackupPrepare, \ |
83 |
LUBackupExport, LUBackupRemove |
84 |
from ganeti.cmdlib.query import LUQuery, LUQueryFields |
85 |
from ganeti.cmdlib.operating_system import LUOsDiagnose |
86 |
from ganeti.cmdlib.tags import LUTagsGet, LUTagsSearch, LUTagsSet, LUTagsDel |
87 |
from ganeti.cmdlib.network import LUNetworkAdd, LUNetworkRemove, \ |
88 |
LUNetworkSetParams, LUNetworkQuery, LUNetworkConnect, LUNetworkDisconnect |
89 |
from ganeti.cmdlib.test import LUTestDelay, LUTestJqueue, LUTestAllocator |
90 |
|
91 |
|
92 |
class LUOobCommand(NoHooksLU): |
93 |
"""Logical unit for OOB handling.
|
94 |
|
95 |
"""
|
96 |
REQ_BGL = False
|
97 |
_SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE) |
98 |
|
99 |
def ExpandNames(self): |
100 |
"""Gather locks we need.
|
101 |
|
102 |
"""
|
103 |
if self.op.node_names: |
104 |
self.op.node_names = _GetWantedNodes(self, self.op.node_names) |
105 |
lock_names = self.op.node_names
|
106 |
else:
|
107 |
lock_names = locking.ALL_SET |
108 |
|
109 |
self.needed_locks = {
|
110 |
locking.LEVEL_NODE: lock_names, |
111 |
} |
112 |
|
113 |
self.share_locks[locking.LEVEL_NODE_ALLOC] = 1 |
114 |
|
115 |
if not self.op.node_names: |
116 |
# Acquire node allocation lock only if all nodes are affected
|
117 |
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
|
118 |
|
119 |
def CheckPrereq(self): |
120 |
"""Check prerequisites.
|
121 |
|
122 |
This checks:
|
123 |
- the node exists in the configuration
|
124 |
- OOB is supported
|
125 |
|
126 |
Any errors are signaled by raising errors.OpPrereqError.
|
127 |
|
128 |
"""
|
129 |
self.nodes = []
|
130 |
self.master_node = self.cfg.GetMasterNode() |
131 |
|
132 |
assert self.op.power_delay >= 0.0 |
133 |
|
134 |
if self.op.node_names: |
135 |
if (self.op.command in self._SKIP_MASTER and |
136 |
self.master_node in self.op.node_names): |
137 |
master_node_obj = self.cfg.GetNodeInfo(self.master_node) |
138 |
master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
|
139 |
|
140 |
if master_oob_handler:
|
141 |
additional_text = ("run '%s %s %s' if you want to operate on the"
|
142 |
" master regardless") % (master_oob_handler,
|
143 |
self.op.command,
|
144 |
self.master_node)
|
145 |
else:
|
146 |
additional_text = "it does not support out-of-band operations"
|
147 |
|
148 |
raise errors.OpPrereqError(("Operating on the master node %s is not" |
149 |
" allowed for %s; %s") %
|
150 |
(self.master_node, self.op.command, |
151 |
additional_text), errors.ECODE_INVAL) |
152 |
else:
|
153 |
self.op.node_names = self.cfg.GetNodeList() |
154 |
if self.op.command in self._SKIP_MASTER: |
155 |
self.op.node_names.remove(self.master_node) |
156 |
|
157 |
if self.op.command in self._SKIP_MASTER: |
158 |
assert self.master_node not in self.op.node_names |
159 |
|
160 |
for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names): |
161 |
if node is None: |
162 |
raise errors.OpPrereqError("Node %s not found" % node_name, |
163 |
errors.ECODE_NOENT) |
164 |
else:
|
165 |
self.nodes.append(node)
|
166 |
|
167 |
if (not self.op.ignore_status and |
168 |
(self.op.command == constants.OOB_POWER_OFF and not node.offline)): |
169 |
raise errors.OpPrereqError(("Cannot power off node %s because it is" |
170 |
" not marked offline") % node_name,
|
171 |
errors.ECODE_STATE) |
172 |
|
173 |
def Exec(self, feedback_fn): |
174 |
"""Execute OOB and return result if we expect any.
|
175 |
|
176 |
"""
|
177 |
master_node = self.master_node
|
178 |
ret = [] |
179 |
|
180 |
for idx, node in enumerate(utils.NiceSort(self.nodes, |
181 |
key=lambda node: node.name)):
|
182 |
node_entry = [(constants.RS_NORMAL, node.name)] |
183 |
ret.append(node_entry) |
184 |
|
185 |
oob_program = _SupportsOob(self.cfg, node)
|
186 |
|
187 |
if not oob_program: |
188 |
node_entry.append((constants.RS_UNAVAIL, None))
|
189 |
continue
|
190 |
|
191 |
logging.info("Executing out-of-band command '%s' using '%s' on %s",
|
192 |
self.op.command, oob_program, node.name)
|
193 |
result = self.rpc.call_run_oob(master_node, oob_program,
|
194 |
self.op.command, node.name,
|
195 |
self.op.timeout)
|
196 |
|
197 |
if result.fail_msg:
|
198 |
self.LogWarning("Out-of-band RPC failed on node '%s': %s", |
199 |
node.name, result.fail_msg) |
200 |
node_entry.append((constants.RS_NODATA, None))
|
201 |
else:
|
202 |
try:
|
203 |
self._CheckPayload(result)
|
204 |
except errors.OpExecError, err:
|
205 |
self.LogWarning("Payload returned by node '%s' is not valid: %s", |
206 |
node.name, err) |
207 |
node_entry.append((constants.RS_NODATA, None))
|
208 |
else:
|
209 |
if self.op.command == constants.OOB_HEALTH: |
210 |
# For health we should log important events
|
211 |
for item, status in result.payload: |
212 |
if status in [constants.OOB_STATUS_WARNING, |
213 |
constants.OOB_STATUS_CRITICAL]: |
214 |
self.LogWarning("Item '%s' on node '%s' has status '%s'", |
215 |
item, node.name, status) |
216 |
|
217 |
if self.op.command == constants.OOB_POWER_ON: |
218 |
node.powered = True
|
219 |
elif self.op.command == constants.OOB_POWER_OFF: |
220 |
node.powered = False
|
221 |
elif self.op.command == constants.OOB_POWER_STATUS: |
222 |
powered = result.payload[constants.OOB_POWER_STATUS_POWERED] |
223 |
if powered != node.powered:
|
224 |
logging.warning(("Recorded power state (%s) of node '%s' does not"
|
225 |
" match actual power state (%s)"), node.powered,
|
226 |
node.name, powered) |
227 |
|
228 |
# For configuration changing commands we should update the node
|
229 |
if self.op.command in (constants.OOB_POWER_ON, |
230 |
constants.OOB_POWER_OFF): |
231 |
self.cfg.Update(node, feedback_fn)
|
232 |
|
233 |
node_entry.append((constants.RS_NORMAL, result.payload)) |
234 |
|
235 |
if (self.op.command == constants.OOB_POWER_ON and |
236 |
idx < len(self.nodes) - 1): |
237 |
time.sleep(self.op.power_delay)
|
238 |
|
239 |
return ret
|
240 |
|
241 |
def _CheckPayload(self, result): |
242 |
"""Checks if the payload is valid.
|
243 |
|
244 |
@param result: RPC result
|
245 |
@raises errors.OpExecError: If payload is not valid
|
246 |
|
247 |
"""
|
248 |
errs = [] |
249 |
if self.op.command == constants.OOB_HEALTH: |
250 |
if not isinstance(result.payload, list): |
251 |
errs.append("command 'health' is expected to return a list but got %s" %
|
252 |
type(result.payload))
|
253 |
else:
|
254 |
for item, status in result.payload: |
255 |
if status not in constants.OOB_STATUSES: |
256 |
errs.append("health item '%s' has invalid status '%s'" %
|
257 |
(item, status)) |
258 |
|
259 |
if self.op.command == constants.OOB_POWER_STATUS: |
260 |
if not isinstance(result.payload, dict): |
261 |
errs.append("power-status is expected to return a dict but got %s" %
|
262 |
type(result.payload))
|
263 |
|
264 |
if self.op.command in [ |
265 |
constants.OOB_POWER_ON, |
266 |
constants.OOB_POWER_OFF, |
267 |
constants.OOB_POWER_CYCLE, |
268 |
]: |
269 |
if result.payload is not None: |
270 |
errs.append("%s is expected to not return payload but got '%s'" %
|
271 |
(self.op.command, result.payload))
|
272 |
|
273 |
if errs:
|
274 |
raise errors.OpExecError("Check of out-of-band payload failed due to %s" % |
275 |
utils.CommaJoin(errs)) |
276 |
|
277 |
|
278 |
class _ExtStorageQuery(_QueryBase): |
279 |
FIELDS = query.EXTSTORAGE_FIELDS |
280 |
|
281 |
def ExpandNames(self, lu): |
282 |
# Lock all nodes in shared mode
|
283 |
# Temporary removal of locks, should be reverted later
|
284 |
# TODO: reintroduce locks when they are lighter-weight
|
285 |
lu.needed_locks = {} |
286 |
#self.share_locks[locking.LEVEL_NODE] = 1
|
287 |
#self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
288 |
|
289 |
# The following variables interact with _QueryBase._GetNames
|
290 |
if self.names: |
291 |
self.wanted = self.names |
292 |
else:
|
293 |
self.wanted = locking.ALL_SET
|
294 |
|
295 |
self.do_locking = self.use_locking |
296 |
|
297 |
def DeclareLocks(self, lu, level): |
298 |
pass
|
299 |
|
300 |
@staticmethod
|
301 |
def _DiagnoseByProvider(rlist): |
302 |
"""Remaps a per-node return list into an a per-provider per-node dictionary
|
303 |
|
304 |
@param rlist: a map with node names as keys and ExtStorage objects as values
|
305 |
|
306 |
@rtype: dict
|
307 |
@return: a dictionary with extstorage providers as keys and as
|
308 |
value another map, with nodes as keys and tuples of
|
309 |
(path, status, diagnose, parameters) as values, eg::
|
310 |
|
311 |
{"provider1": {"node1": [(/usr/lib/..., True, "", [])]
|
312 |
"node2": [(/srv/..., False, "missing file")]
|
313 |
"node3": [(/srv/..., True, "", [])]
|
314 |
}
|
315 |
|
316 |
"""
|
317 |
all_es = {} |
318 |
# we build here the list of nodes that didn't fail the RPC (at RPC
|
319 |
# level), so that nodes with a non-responding node daemon don't
|
320 |
# make all OSes invalid
|
321 |
good_nodes = [node_name for node_name in rlist |
322 |
if not rlist[node_name].fail_msg] |
323 |
for node_name, nr in rlist.items(): |
324 |
if nr.fail_msg or not nr.payload: |
325 |
continue
|
326 |
for (name, path, status, diagnose, params) in nr.payload: |
327 |
if name not in all_es: |
328 |
# build a list of nodes for this os containing empty lists
|
329 |
# for each node in node_list
|
330 |
all_es[name] = {} |
331 |
for nname in good_nodes: |
332 |
all_es[name][nname] = [] |
333 |
# convert params from [name, help] to (name, help)
|
334 |
params = [tuple(v) for v in params] |
335 |
all_es[name][node_name].append((path, status, diagnose, params)) |
336 |
return all_es
|
337 |
|
338 |
def _GetQueryData(self, lu): |
339 |
"""Computes the list of nodes and their attributes.
|
340 |
|
341 |
"""
|
342 |
# Locking is not used
|
343 |
assert not (compat.any(lu.glm.is_owned(level) |
344 |
for level in locking.LEVELS |
345 |
if level != locking.LEVEL_CLUSTER) or |
346 |
self.do_locking or self.use_locking) |
347 |
|
348 |
valid_nodes = [node.name |
349 |
for node in lu.cfg.GetAllNodesInfo().values() |
350 |
if not node.offline and node.vm_capable] |
351 |
pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
|
352 |
|
353 |
data = {} |
354 |
|
355 |
nodegroup_list = lu.cfg.GetNodeGroupList() |
356 |
|
357 |
for (es_name, es_data) in pol.items(): |
358 |
# For every provider compute the nodegroup validity.
|
359 |
# To do this we need to check the validity of each node in es_data
|
360 |
# and then construct the corresponding nodegroup dict:
|
361 |
# { nodegroup1: status
|
362 |
# nodegroup2: status
|
363 |
# }
|
364 |
ndgrp_data = {} |
365 |
for nodegroup in nodegroup_list: |
366 |
ndgrp = lu.cfg.GetNodeGroup(nodegroup) |
367 |
|
368 |
nodegroup_nodes = ndgrp.members |
369 |
nodegroup_name = ndgrp.name |
370 |
node_statuses = [] |
371 |
|
372 |
for node in nodegroup_nodes: |
373 |
if node in valid_nodes: |
374 |
if es_data[node] != []:
|
375 |
node_status = es_data[node][0][1] |
376 |
node_statuses.append(node_status) |
377 |
else:
|
378 |
node_statuses.append(False)
|
379 |
|
380 |
if False in node_statuses: |
381 |
ndgrp_data[nodegroup_name] = False
|
382 |
else:
|
383 |
ndgrp_data[nodegroup_name] = True
|
384 |
|
385 |
# Compute the provider's parameters
|
386 |
parameters = set()
|
387 |
for idx, esl in enumerate(es_data.values()): |
388 |
valid = bool(esl and esl[0][1]) |
389 |
if not valid: |
390 |
break
|
391 |
|
392 |
node_params = esl[0][3] |
393 |
if idx == 0: |
394 |
# First entry
|
395 |
parameters.update(node_params) |
396 |
else:
|
397 |
# Filter out inconsistent values
|
398 |
parameters.intersection_update(node_params) |
399 |
|
400 |
params = list(parameters)
|
401 |
|
402 |
# Now fill all the info for this provider
|
403 |
info = query.ExtStorageInfo(name=es_name, node_status=es_data, |
404 |
nodegroup_status=ndgrp_data, |
405 |
parameters=params) |
406 |
|
407 |
data[es_name] = info |
408 |
|
409 |
# Prepare data in requested order
|
410 |
return [data[name] for name in self._GetNames(lu, pol.keys(), None) |
411 |
if name in data] |
412 |
|
413 |
|
414 |
class LUExtStorageDiagnose(NoHooksLU): |
415 |
"""Logical unit for ExtStorage diagnose/query.
|
416 |
|
417 |
"""
|
418 |
REQ_BGL = False
|
419 |
|
420 |
def CheckArguments(self): |
421 |
self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names), |
422 |
self.op.output_fields, False) |
423 |
|
424 |
def ExpandNames(self): |
425 |
self.eq.ExpandNames(self) |
426 |
|
427 |
def Exec(self, feedback_fn): |
428 |
return self.eq.OldStyleQuery(self) |
429 |
|
430 |
|
431 |
class LURestrictedCommand(NoHooksLU): |
432 |
"""Logical unit for executing restricted commands.
|
433 |
|
434 |
"""
|
435 |
REQ_BGL = False
|
436 |
|
437 |
def ExpandNames(self): |
438 |
if self.op.nodes: |
439 |
self.op.nodes = _GetWantedNodes(self, self.op.nodes) |
440 |
|
441 |
self.needed_locks = {
|
442 |
locking.LEVEL_NODE: self.op.nodes,
|
443 |
} |
444 |
self.share_locks = {
|
445 |
locking.LEVEL_NODE: not self.op.use_locking, |
446 |
} |
447 |
|
448 |
def CheckPrereq(self): |
449 |
"""Check prerequisites.
|
450 |
|
451 |
"""
|
452 |
|
453 |
def Exec(self, feedback_fn): |
454 |
"""Execute restricted command and return output.
|
455 |
|
456 |
"""
|
457 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
458 |
|
459 |
# Check if correct locks are held
|
460 |
assert set(self.op.nodes).issubset(owned_nodes) |
461 |
|
462 |
rpcres = self.rpc.call_restricted_command(self.op.nodes, self.op.command) |
463 |
|
464 |
result = [] |
465 |
|
466 |
for node_name in self.op.nodes: |
467 |
nres = rpcres[node_name] |
468 |
if nres.fail_msg:
|
469 |
msg = ("Command '%s' on node '%s' failed: %s" %
|
470 |
(self.op.command, node_name, nres.fail_msg))
|
471 |
result.append((False, msg))
|
472 |
else:
|
473 |
result.append((True, nres.payload))
|
474 |
|
475 |
return result
|