Revision 814386b7

b/Makefile.am
321 321
	lib/cmdlib/operating_system.py \
322 322
	lib/cmdlib/tags.py \
323 323
	lib/cmdlib/network.py \
324
	lib/cmdlib/misc.py \
324 325
	lib/cmdlib/test.py
325 326

  
326 327
hypervisor_PYTHON = \
b/lib/cmdlib/__init__.py
28 28

  
29 29
# C0302: since we have waaaay too many lines in this module
30 30

  
31
import time
32
import logging
33

  
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import locking
37
from ganeti import constants
38
from ganeti import compat
39
from ganeti import query
40
from ganeti import qlang
41

  
42 31
from ganeti.cmdlib.base import ResultWithJobs, LogicalUnit, NoHooksLU, \
43 32
  Tasklet, _QueryBase
44 33
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_ONLINE, \
......
86 75
from ganeti.cmdlib.tags import LUTagsGet, LUTagsSearch, LUTagsSet, LUTagsDel
87 76
from ganeti.cmdlib.network import LUNetworkAdd, LUNetworkRemove, \
88 77
  LUNetworkSetParams, LUNetworkQuery, LUNetworkConnect, LUNetworkDisconnect
78
from ganeti.cmdlib.misc import LUOobCommand, LUExtStorageDiagnose, \
79
  LURestrictedCommand
89 80
from ganeti.cmdlib.test import LUTestDelay, LUTestJqueue, LUTestAllocator
90

  
91

  
92
class LUOobCommand(NoHooksLU):
93
  """Logical unit for OOB handling.
94

  
95
  """
96
  REQ_BGL = False
97
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
98

  
99
  def ExpandNames(self):
100
    """Gather locks we need.
101

  
102
    """
103
    if self.op.node_names:
104
      self.op.node_names = _GetWantedNodes(self, self.op.node_names)
105
      lock_names = self.op.node_names
106
    else:
107
      lock_names = locking.ALL_SET
108

  
109
    self.needed_locks = {
110
      locking.LEVEL_NODE: lock_names,
111
      }
112

  
113
    self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
114

  
115
    if not self.op.node_names:
116
      # Acquire node allocation lock only if all nodes are affected
117
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
118

  
119
  def CheckPrereq(self):
120
    """Check prerequisites.
121

  
122
    This checks:
123
     - the node exists in the configuration
124
     - OOB is supported
125

  
126
    Any errors are signaled by raising errors.OpPrereqError.
127

  
128
    """
129
    self.nodes = []
130
    self.master_node = self.cfg.GetMasterNode()
131

  
132
    assert self.op.power_delay >= 0.0
133

  
134
    if self.op.node_names:
135
      if (self.op.command in self._SKIP_MASTER and
136
          self.master_node in self.op.node_names):
137
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
138
        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
139

  
140
        if master_oob_handler:
141
          additional_text = ("run '%s %s %s' if you want to operate on the"
142
                             " master regardless") % (master_oob_handler,
143
                                                      self.op.command,
144
                                                      self.master_node)
145
        else:
146
          additional_text = "it does not support out-of-band operations"
147

  
148
        raise errors.OpPrereqError(("Operating on the master node %s is not"
149
                                    " allowed for %s; %s") %
150
                                   (self.master_node, self.op.command,
151
                                    additional_text), errors.ECODE_INVAL)
152
    else:
153
      self.op.node_names = self.cfg.GetNodeList()
154
      if self.op.command in self._SKIP_MASTER:
155
        self.op.node_names.remove(self.master_node)
156

  
157
    if self.op.command in self._SKIP_MASTER:
158
      assert self.master_node not in self.op.node_names
159

  
160
    for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
161
      if node is None:
162
        raise errors.OpPrereqError("Node %s not found" % node_name,
163
                                   errors.ECODE_NOENT)
164
      else:
165
        self.nodes.append(node)
166

  
167
      if (not self.op.ignore_status and
168
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
169
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
170
                                    " not marked offline") % node_name,
171
                                   errors.ECODE_STATE)
172

  
173
  def Exec(self, feedback_fn):
174
    """Execute OOB and return result if we expect any.
175

  
176
    """
177
    master_node = self.master_node
178
    ret = []
179

  
180
    for idx, node in enumerate(utils.NiceSort(self.nodes,
181
                                              key=lambda node: node.name)):
182
      node_entry = [(constants.RS_NORMAL, node.name)]
183
      ret.append(node_entry)
184

  
185
      oob_program = _SupportsOob(self.cfg, node)
186

  
187
      if not oob_program:
188
        node_entry.append((constants.RS_UNAVAIL, None))
189
        continue
190

  
191
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
192
                   self.op.command, oob_program, node.name)
193
      result = self.rpc.call_run_oob(master_node, oob_program,
194
                                     self.op.command, node.name,
195
                                     self.op.timeout)
196

  
197
      if result.fail_msg:
198
        self.LogWarning("Out-of-band RPC failed on node '%s': %s",
199
                        node.name, result.fail_msg)
200
        node_entry.append((constants.RS_NODATA, None))
201
      else:
202
        try:
203
          self._CheckPayload(result)
204
        except errors.OpExecError, err:
205
          self.LogWarning("Payload returned by node '%s' is not valid: %s",
206
                          node.name, err)
207
          node_entry.append((constants.RS_NODATA, None))
208
        else:
209
          if self.op.command == constants.OOB_HEALTH:
210
            # For health we should log important events
211
            for item, status in result.payload:
212
              if status in [constants.OOB_STATUS_WARNING,
213
                            constants.OOB_STATUS_CRITICAL]:
214
                self.LogWarning("Item '%s' on node '%s' has status '%s'",
215
                                item, node.name, status)
216

  
217
          if self.op.command == constants.OOB_POWER_ON:
218
            node.powered = True
219
          elif self.op.command == constants.OOB_POWER_OFF:
220
            node.powered = False
221
          elif self.op.command == constants.OOB_POWER_STATUS:
222
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
223
            if powered != node.powered:
224
              logging.warning(("Recorded power state (%s) of node '%s' does not"
225
                               " match actual power state (%s)"), node.powered,
226
                              node.name, powered)
227

  
228
          # For configuration changing commands we should update the node
229
          if self.op.command in (constants.OOB_POWER_ON,
230
                                 constants.OOB_POWER_OFF):
231
            self.cfg.Update(node, feedback_fn)
232

  
233
          node_entry.append((constants.RS_NORMAL, result.payload))
234

  
235
          if (self.op.command == constants.OOB_POWER_ON and
236
              idx < len(self.nodes) - 1):
237
            time.sleep(self.op.power_delay)
238

  
239
    return ret
240

  
241
  def _CheckPayload(self, result):
242
    """Checks if the payload is valid.
243

  
244
    @param result: RPC result
245
    @raises errors.OpExecError: If payload is not valid
246

  
247
    """
248
    errs = []
249
    if self.op.command == constants.OOB_HEALTH:
250
      if not isinstance(result.payload, list):
251
        errs.append("command 'health' is expected to return a list but got %s" %
252
                    type(result.payload))
253
      else:
254
        for item, status in result.payload:
255
          if status not in constants.OOB_STATUSES:
256
            errs.append("health item '%s' has invalid status '%s'" %
257
                        (item, status))
258

  
259
    if self.op.command == constants.OOB_POWER_STATUS:
260
      if not isinstance(result.payload, dict):
261
        errs.append("power-status is expected to return a dict but got %s" %
262
                    type(result.payload))
263

  
264
    if self.op.command in [
265
      constants.OOB_POWER_ON,
266
      constants.OOB_POWER_OFF,
267
      constants.OOB_POWER_CYCLE,
268
      ]:
269
      if result.payload is not None:
270
        errs.append("%s is expected to not return payload but got '%s'" %
271
                    (self.op.command, result.payload))
272

  
273
    if errs:
274
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
275
                               utils.CommaJoin(errs))
276

  
277

  
278
class _ExtStorageQuery(_QueryBase):
279
  FIELDS = query.EXTSTORAGE_FIELDS
280

  
281
  def ExpandNames(self, lu):
282
    # Lock all nodes in shared mode
283
    # Temporary removal of locks, should be reverted later
284
    # TODO: reintroduce locks when they are lighter-weight
285
    lu.needed_locks = {}
286
    #self.share_locks[locking.LEVEL_NODE] = 1
287
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
288

  
289
    # The following variables interact with _QueryBase._GetNames
290
    if self.names:
291
      self.wanted = self.names
292
    else:
293
      self.wanted = locking.ALL_SET
294

  
295
    self.do_locking = self.use_locking
296

  
297
  def DeclareLocks(self, lu, level):
298
    pass
299

  
300
  @staticmethod
301
  def _DiagnoseByProvider(rlist):
302
    """Remaps a per-node return list into an a per-provider per-node dictionary
303

  
304
    @param rlist: a map with node names as keys and ExtStorage objects as values
305

  
306
    @rtype: dict
307
    @return: a dictionary with extstorage providers as keys and as
308
        value another map, with nodes as keys and tuples of
309
        (path, status, diagnose, parameters) as values, eg::
310

  
311
          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
312
                         "node2": [(/srv/..., False, "missing file")]
313
                         "node3": [(/srv/..., True, "", [])]
314
          }
315

  
316
    """
317
    all_es = {}
318
    # we build here the list of nodes that didn't fail the RPC (at RPC
319
    # level), so that nodes with a non-responding node daemon don't
320
    # make all OSes invalid
321
    good_nodes = [node_name for node_name in rlist
322
                  if not rlist[node_name].fail_msg]
323
    for node_name, nr in rlist.items():
324
      if nr.fail_msg or not nr.payload:
325
        continue
326
      for (name, path, status, diagnose, params) in nr.payload:
327
        if name not in all_es:
328
          # build a list of nodes for this os containing empty lists
329
          # for each node in node_list
330
          all_es[name] = {}
331
          for nname in good_nodes:
332
            all_es[name][nname] = []
333
        # convert params from [name, help] to (name, help)
334
        params = [tuple(v) for v in params]
335
        all_es[name][node_name].append((path, status, diagnose, params))
336
    return all_es
337

  
338
  def _GetQueryData(self, lu):
339
    """Computes the list of nodes and their attributes.
340

  
341
    """
342
    # Locking is not used
343
    assert not (compat.any(lu.glm.is_owned(level)
344
                           for level in locking.LEVELS
345
                           if level != locking.LEVEL_CLUSTER) or
346
                self.do_locking or self.use_locking)
347

  
348
    valid_nodes = [node.name
349
                   for node in lu.cfg.GetAllNodesInfo().values()
350
                   if not node.offline and node.vm_capable]
351
    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
352

  
353
    data = {}
354

  
355
    nodegroup_list = lu.cfg.GetNodeGroupList()
356

  
357
    for (es_name, es_data) in pol.items():
358
      # For every provider compute the nodegroup validity.
359
      # To do this we need to check the validity of each node in es_data
360
      # and then construct the corresponding nodegroup dict:
361
      #      { nodegroup1: status
362
      #        nodegroup2: status
363
      #      }
364
      ndgrp_data = {}
365
      for nodegroup in nodegroup_list:
366
        ndgrp = lu.cfg.GetNodeGroup(nodegroup)
367

  
368
        nodegroup_nodes = ndgrp.members
369
        nodegroup_name = ndgrp.name
370
        node_statuses = []
371

  
372
        for node in nodegroup_nodes:
373
          if node in valid_nodes:
374
            if es_data[node] != []:
375
              node_status = es_data[node][0][1]
376
              node_statuses.append(node_status)
377
            else:
378
              node_statuses.append(False)
379

  
380
        if False in node_statuses:
381
          ndgrp_data[nodegroup_name] = False
382
        else:
383
          ndgrp_data[nodegroup_name] = True
384

  
385
      # Compute the provider's parameters
386
      parameters = set()
387
      for idx, esl in enumerate(es_data.values()):
388
        valid = bool(esl and esl[0][1])
389
        if not valid:
390
          break
391

  
392
        node_params = esl[0][3]
393
        if idx == 0:
394
          # First entry
395
          parameters.update(node_params)
396
        else:
397
          # Filter out inconsistent values
398
          parameters.intersection_update(node_params)
399

  
400
      params = list(parameters)
401

  
402
      # Now fill all the info for this provider
403
      info = query.ExtStorageInfo(name=es_name, node_status=es_data,
404
                                  nodegroup_status=ndgrp_data,
405
                                  parameters=params)
406

  
407
      data[es_name] = info
408

  
409
    # Prepare data in requested order
410
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
411
            if name in data]
412

  
413

  
414
class LUExtStorageDiagnose(NoHooksLU):
415
  """Logical unit for ExtStorage diagnose/query.
416

  
417
  """
418
  REQ_BGL = False
419

  
420
  def CheckArguments(self):
421
    self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
422
                               self.op.output_fields, False)
423

  
424
  def ExpandNames(self):
425
    self.eq.ExpandNames(self)
426

  
427
  def Exec(self, feedback_fn):
428
    return self.eq.OldStyleQuery(self)
429

  
430

  
431
class LURestrictedCommand(NoHooksLU):
432
  """Logical unit for executing restricted commands.
433

  
434
  """
435
  REQ_BGL = False
436

  
437
  def ExpandNames(self):
438
    if self.op.nodes:
439
      self.op.nodes = _GetWantedNodes(self, self.op.nodes)
440

  
441
    self.needed_locks = {
442
      locking.LEVEL_NODE: self.op.nodes,
443
      }
444
    self.share_locks = {
445
      locking.LEVEL_NODE: not self.op.use_locking,
446
      }
447

  
448
  def CheckPrereq(self):
449
    """Check prerequisites.
450

  
451
    """
452

  
453
  def Exec(self, feedback_fn):
454
    """Execute restricted command and return output.
455

  
456
    """
457
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
458

  
459
    # Check if correct locks are held
460
    assert set(self.op.nodes).issubset(owned_nodes)
461

  
462
    rpcres = self.rpc.call_restricted_command(self.op.nodes, self.op.command)
463

  
464
    result = []
465

  
466
    for node_name in self.op.nodes:
467
      nres = rpcres[node_name]
468
      if nres.fail_msg:
469
        msg = ("Command '%s' on node '%s' failed: %s" %
470
               (self.op.command, node_name, nres.fail_msg))
471
        result.append((False, msg))
472
      else:
473
        result.append((True, nres.payload))
474

  
475
    return result
b/lib/cmdlib/misc.py
1
#
2
#
3

  
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

  
21

  
22
"""Miscellaneous logical units that don't fit into any category."""
23

  
24
import logging
25
import time
26

  
27
from ganeti import compat
28
from ganeti import constants
29
from ganeti import errors
30
from ganeti import locking
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti import utils
34
from ganeti.cmdlib.base import NoHooksLU, _QueryBase
35
from ganeti.cmdlib.common import _GetWantedNodes, _SupportsOob
36

  
37

  
38
class LUOobCommand(NoHooksLU):
39
  """Logical unit for OOB handling.
40

  
41
  """
42
  REQ_BGL = False
43
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
44

  
45
  def ExpandNames(self):
46
    """Gather locks we need.
47

  
48
    """
49
    if self.op.node_names:
50
      self.op.node_names = _GetWantedNodes(self, self.op.node_names)
51
      lock_names = self.op.node_names
52
    else:
53
      lock_names = locking.ALL_SET
54

  
55
    self.needed_locks = {
56
      locking.LEVEL_NODE: lock_names,
57
      }
58

  
59
    self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
60

  
61
    if not self.op.node_names:
62
      # Acquire node allocation lock only if all nodes are affected
63
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
64

  
65
  def CheckPrereq(self):
66
    """Check prerequisites.
67

  
68
    This checks:
69
     - the node exists in the configuration
70
     - OOB is supported
71

  
72
    Any errors are signaled by raising errors.OpPrereqError.
73

  
74
    """
75
    self.nodes = []
76
    self.master_node = self.cfg.GetMasterNode()
77

  
78
    assert self.op.power_delay >= 0.0
79

  
80
    if self.op.node_names:
81
      if (self.op.command in self._SKIP_MASTER and
82
          self.master_node in self.op.node_names):
83
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
84
        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
85

  
86
        if master_oob_handler:
87
          additional_text = ("run '%s %s %s' if you want to operate on the"
88
                             " master regardless") % (master_oob_handler,
89
                                                      self.op.command,
90
                                                      self.master_node)
91
        else:
92
          additional_text = "it does not support out-of-band operations"
93

  
94
        raise errors.OpPrereqError(("Operating on the master node %s is not"
95
                                    " allowed for %s; %s") %
96
                                   (self.master_node, self.op.command,
97
                                    additional_text), errors.ECODE_INVAL)
98
    else:
99
      self.op.node_names = self.cfg.GetNodeList()
100
      if self.op.command in self._SKIP_MASTER:
101
        self.op.node_names.remove(self.master_node)
102

  
103
    if self.op.command in self._SKIP_MASTER:
104
      assert self.master_node not in self.op.node_names
105

  
106
    for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
107
      if node is None:
108
        raise errors.OpPrereqError("Node %s not found" % node_name,
109
                                   errors.ECODE_NOENT)
110
      else:
111
        self.nodes.append(node)
112

  
113
      if (not self.op.ignore_status and
114
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
115
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
116
                                    " not marked offline") % node_name,
117
                                   errors.ECODE_STATE)
118

  
119
  def Exec(self, feedback_fn):
120
    """Execute OOB and return result if we expect any.
121

  
122
    """
123
    master_node = self.master_node
124
    ret = []
125

  
126
    for idx, node in enumerate(utils.NiceSort(self.nodes,
127
                                              key=lambda node: node.name)):
128
      node_entry = [(constants.RS_NORMAL, node.name)]
129
      ret.append(node_entry)
130

  
131
      oob_program = _SupportsOob(self.cfg, node)
132

  
133
      if not oob_program:
134
        node_entry.append((constants.RS_UNAVAIL, None))
135
        continue
136

  
137
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
138
                   self.op.command, oob_program, node.name)
139
      result = self.rpc.call_run_oob(master_node, oob_program,
140
                                     self.op.command, node.name,
141
                                     self.op.timeout)
142

  
143
      if result.fail_msg:
144
        self.LogWarning("Out-of-band RPC failed on node '%s': %s",
145
                        node.name, result.fail_msg)
146
        node_entry.append((constants.RS_NODATA, None))
147
      else:
148
        try:
149
          self._CheckPayload(result)
150
        except errors.OpExecError, err:
151
          self.LogWarning("Payload returned by node '%s' is not valid: %s",
152
                          node.name, err)
153
          node_entry.append((constants.RS_NODATA, None))
154
        else:
155
          if self.op.command == constants.OOB_HEALTH:
156
            # For health we should log important events
157
            for item, status in result.payload:
158
              if status in [constants.OOB_STATUS_WARNING,
159
                            constants.OOB_STATUS_CRITICAL]:
160
                self.LogWarning("Item '%s' on node '%s' has status '%s'",
161
                                item, node.name, status)
162

  
163
          if self.op.command == constants.OOB_POWER_ON:
164
            node.powered = True
165
          elif self.op.command == constants.OOB_POWER_OFF:
166
            node.powered = False
167
          elif self.op.command == constants.OOB_POWER_STATUS:
168
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
169
            if powered != node.powered:
170
              logging.warning(("Recorded power state (%s) of node '%s' does not"
171
                               " match actual power state (%s)"), node.powered,
172
                              node.name, powered)
173

  
174
          # For configuration changing commands we should update the node
175
          if self.op.command in (constants.OOB_POWER_ON,
176
                                 constants.OOB_POWER_OFF):
177
            self.cfg.Update(node, feedback_fn)
178

  
179
          node_entry.append((constants.RS_NORMAL, result.payload))
180

  
181
          if (self.op.command == constants.OOB_POWER_ON and
182
              idx < len(self.nodes) - 1):
183
            time.sleep(self.op.power_delay)
184

  
185
    return ret
186

  
187
  def _CheckPayload(self, result):
188
    """Checks if the payload is valid.
189

  
190
    @param result: RPC result
191
    @raises errors.OpExecError: If payload is not valid
192

  
193
    """
194
    errs = []
195
    if self.op.command == constants.OOB_HEALTH:
196
      if not isinstance(result.payload, list):
197
        errs.append("command 'health' is expected to return a list but got %s" %
198
                    type(result.payload))
199
      else:
200
        for item, status in result.payload:
201
          if status not in constants.OOB_STATUSES:
202
            errs.append("health item '%s' has invalid status '%s'" %
203
                        (item, status))
204

  
205
    if self.op.command == constants.OOB_POWER_STATUS:
206
      if not isinstance(result.payload, dict):
207
        errs.append("power-status is expected to return a dict but got %s" %
208
                    type(result.payload))
209

  
210
    if self.op.command in [
211
      constants.OOB_POWER_ON,
212
      constants.OOB_POWER_OFF,
213
      constants.OOB_POWER_CYCLE,
214
      ]:
215
      if result.payload is not None:
216
        errs.append("%s is expected to not return payload but got '%s'" %
217
                    (self.op.command, result.payload))
218

  
219
    if errs:
220
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
221
                               utils.CommaJoin(errs))
222

  
223

  
224
class _ExtStorageQuery(_QueryBase):
225
  FIELDS = query.EXTSTORAGE_FIELDS
226

  
227
  def ExpandNames(self, lu):
228
    # Lock all nodes in shared mode
229
    # Temporary removal of locks, should be reverted later
230
    # TODO: reintroduce locks when they are lighter-weight
231
    lu.needed_locks = {}
232
    #self.share_locks[locking.LEVEL_NODE] = 1
233
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
234

  
235
    # The following variables interact with _QueryBase._GetNames
236
    if self.names:
237
      self.wanted = self.names
238
    else:
239
      self.wanted = locking.ALL_SET
240

  
241
    self.do_locking = self.use_locking
242

  
243
  def DeclareLocks(self, lu, level):
244
    pass
245

  
246
  @staticmethod
247
  def _DiagnoseByProvider(rlist):
248
    """Remaps a per-node return list into an a per-provider per-node dictionary
249

  
250
    @param rlist: a map with node names as keys and ExtStorage objects as values
251

  
252
    @rtype: dict
253
    @return: a dictionary with extstorage providers as keys and as
254
        value another map, with nodes as keys and tuples of
255
        (path, status, diagnose, parameters) as values, eg::
256

  
257
          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
258
                         "node2": [(/srv/..., False, "missing file")]
259
                         "node3": [(/srv/..., True, "", [])]
260
          }
261

  
262
    """
263
    all_es = {}
264
    # we build here the list of nodes that didn't fail the RPC (at RPC
265
    # level), so that nodes with a non-responding node daemon don't
266
    # make all OSes invalid
267
    good_nodes = [node_name for node_name in rlist
268
                  if not rlist[node_name].fail_msg]
269
    for node_name, nr in rlist.items():
270
      if nr.fail_msg or not nr.payload:
271
        continue
272
      for (name, path, status, diagnose, params) in nr.payload:
273
        if name not in all_es:
274
          # build a list of nodes for this os containing empty lists
275
          # for each node in node_list
276
          all_es[name] = {}
277
          for nname in good_nodes:
278
            all_es[name][nname] = []
279
        # convert params from [name, help] to (name, help)
280
        params = [tuple(v) for v in params]
281
        all_es[name][node_name].append((path, status, diagnose, params))
282
    return all_es
283

  
284
  def _GetQueryData(self, lu):
285
    """Computes the list of nodes and their attributes.
286

  
287
    """
288
    # Locking is not used
289
    assert not (compat.any(lu.glm.is_owned(level)
290
                           for level in locking.LEVELS
291
                           if level != locking.LEVEL_CLUSTER) or
292
                self.do_locking or self.use_locking)
293

  
294
    valid_nodes = [node.name
295
                   for node in lu.cfg.GetAllNodesInfo().values()
296
                   if not node.offline and node.vm_capable]
297
    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
298

  
299
    data = {}
300

  
301
    nodegroup_list = lu.cfg.GetNodeGroupList()
302

  
303
    for (es_name, es_data) in pol.items():
304
      # For every provider compute the nodegroup validity.
305
      # To do this we need to check the validity of each node in es_data
306
      # and then construct the corresponding nodegroup dict:
307
      #      { nodegroup1: status
308
      #        nodegroup2: status
309
      #      }
310
      ndgrp_data = {}
311
      for nodegroup in nodegroup_list:
312
        ndgrp = lu.cfg.GetNodeGroup(nodegroup)
313

  
314
        nodegroup_nodes = ndgrp.members
315
        nodegroup_name = ndgrp.name
316
        node_statuses = []
317

  
318
        for node in nodegroup_nodes:
319
          if node in valid_nodes:
320
            if es_data[node] != []:
321
              node_status = es_data[node][0][1]
322
              node_statuses.append(node_status)
323
            else:
324
              node_statuses.append(False)
325

  
326
        if False in node_statuses:
327
          ndgrp_data[nodegroup_name] = False
328
        else:
329
          ndgrp_data[nodegroup_name] = True
330

  
331
      # Compute the provider's parameters
332
      parameters = set()
333
      for idx, esl in enumerate(es_data.values()):
334
        valid = bool(esl and esl[0][1])
335
        if not valid:
336
          break
337

  
338
        node_params = esl[0][3]
339
        if idx == 0:
340
          # First entry
341
          parameters.update(node_params)
342
        else:
343
          # Filter out inconsistent values
344
          parameters.intersection_update(node_params)
345

  
346
      params = list(parameters)
347

  
348
      # Now fill all the info for this provider
349
      info = query.ExtStorageInfo(name=es_name, node_status=es_data,
350
                                  nodegroup_status=ndgrp_data,
351
                                  parameters=params)
352

  
353
      data[es_name] = info
354

  
355
    # Prepare data in requested order
356
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
357
            if name in data]
358

  
359

  
360
class LUExtStorageDiagnose(NoHooksLU):
361
  """Logical unit for ExtStorage diagnose/query.
362

  
363
  """
364
  REQ_BGL = False
365

  
366
  def CheckArguments(self):
367
    self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
368
                               self.op.output_fields, False)
369

  
370
  def ExpandNames(self):
371
    self.eq.ExpandNames(self)
372

  
373
  def Exec(self, feedback_fn):
374
    return self.eq.OldStyleQuery(self)
375

  
376

  
377
class LURestrictedCommand(NoHooksLU):
378
  """Logical unit for executing restricted commands.
379

  
380
  """
381
  REQ_BGL = False
382

  
383
  def ExpandNames(self):
384
    if self.op.nodes:
385
      self.op.nodes = _GetWantedNodes(self, self.op.nodes)
386

  
387
    self.needed_locks = {
388
      locking.LEVEL_NODE: self.op.nodes,
389
      }
390
    self.share_locks = {
391
      locking.LEVEL_NODE: not self.op.use_locking,
392
      }
393

  
394
  def CheckPrereq(self):
395
    """Check prerequisites.
396

  
397
    """
398

  
399
  def Exec(self, feedback_fn):
400
    """Execute restricted command and return output.
401

  
402
    """
403
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
404

  
405
    # Check if correct locks are held
406
    assert set(self.op.nodes).issubset(owned_nodes)
407

  
408
    rpcres = self.rpc.call_restricted_command(self.op.nodes, self.op.command)
409

  
410
    result = []
411

  
412
    for node_name in self.op.nodes:
413
      nres = rpcres[node_name]
414
      if nres.fail_msg:
415
        msg = ("Command '%s' on node '%s' failed: %s" %
416
               (self.op.command, node_name, nres.fail_msg))
417
        result.append((False, msg))
418
      else:
419
        result.append((True, nres.payload))
420

  
421
    return result
b/lib/cmdlib/query.py
24 24
from ganeti import constants
25 25
from ganeti import errors
26 26
from ganeti import query
27
from ganeti.cmdlib import _ExtStorageQuery
28 27
from ganeti.cmdlib.backup import _ExportQuery
29 28
from ganeti.cmdlib.base import NoHooksLU
30 29
from ganeti.cmdlib.cluster import _ClusterQuery
31 30
from ganeti.cmdlib.group import _GroupQuery
32 31
from ganeti.cmdlib.instance import _InstanceQuery
32
from ganeti.cmdlib.misc import _ExtStorageQuery
33 33
from ganeti.cmdlib.network import _NetworkQuery
34 34
from ganeti.cmdlib.node import _NodeQuery
35 35
from ganeti.cmdlib.operating_system import _OsQuery

Also available in: Unified diff