Revision 9e5442ce lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
2306 | 2306 |
return output |
2307 | 2307 |
|
2308 | 2308 |
|
2309 |
class LUQueryNodeStorage(NoHooksLU): |
|
2310 |
"""Logical unit for getting information on storage units on node(s). |
|
2311 |
|
|
2312 |
""" |
|
2313 |
_OP_REQP = ["nodes", "storage_type", "output_fields"] |
|
2314 |
REQ_BGL = False |
|
2315 |
_FIELDS_STATIC = utils.FieldSet("node") |
|
2316 |
|
|
2317 |
def ExpandNames(self): |
|
2318 |
storage_type = self.op.storage_type |
|
2319 |
|
|
2320 |
if storage_type not in constants.VALID_STORAGE_FIELDS: |
|
2321 |
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type) |
|
2322 |
|
|
2323 |
dynamic_fields = constants.VALID_STORAGE_FIELDS[storage_type] |
|
2324 |
|
|
2325 |
_CheckOutputFields(static=self._FIELDS_STATIC, |
|
2326 |
dynamic=utils.FieldSet(*dynamic_fields), |
|
2327 |
selected=self.op.output_fields) |
|
2328 |
|
|
2329 |
self.needed_locks = {} |
|
2330 |
self.share_locks[locking.LEVEL_NODE] = 1 |
|
2331 |
|
|
2332 |
if self.op.nodes: |
|
2333 |
self.needed_locks[locking.LEVEL_NODE] = \ |
|
2334 |
_GetWantedNodes(self, self.op.nodes) |
|
2335 |
else: |
|
2336 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET |
|
2337 |
|
|
2338 |
def CheckPrereq(self): |
|
2339 |
"""Check prerequisites. |
|
2340 |
|
|
2341 |
This checks that the fields required are valid output fields. |
|
2342 |
|
|
2343 |
""" |
|
2344 |
self.op.name = getattr(self.op, "name", None) |
|
2345 |
|
|
2346 |
self.nodes = self.acquired_locks[locking.LEVEL_NODE] |
|
2347 |
|
|
2348 |
def Exec(self, feedback_fn): |
|
2349 |
"""Computes the list of nodes and their attributes. |
|
2350 |
|
|
2351 |
""" |
|
2352 |
# Special case for file storage |
|
2353 |
if self.op.storage_type == constants.ST_FILE: |
|
2354 |
st_args = [self.cfg.GetFileStorageDir()] |
|
2355 |
else: |
|
2356 |
st_args = [] |
|
2357 |
|
|
2358 |
# Always get name to sort by |
|
2359 |
if constants.SF_NAME in self.op.output_fields: |
|
2360 |
fields = self.op.output_fields[:] |
|
2361 |
else: |
|
2362 |
fields = [constants.SF_NAME] + self.op.output_fields |
|
2363 |
|
|
2364 |
# Never ask for node as it's only known to the LU |
|
2365 |
while "node" in fields: |
|
2366 |
fields.remove("node") |
|
2367 |
|
|
2368 |
field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)]) |
|
2369 |
name_idx = field_idx[constants.SF_NAME] |
|
2370 |
|
|
2371 |
data = self.rpc.call_storage_list(self.nodes, |
|
2372 |
self.op.storage_type, st_args, |
|
2373 |
self.op.name, fields) |
|
2374 |
|
|
2375 |
result = [] |
|
2376 |
|
|
2377 |
for node in utils.NiceSort(self.nodes): |
|
2378 |
nresult = data[node] |
|
2379 |
if nresult.offline: |
|
2380 |
continue |
|
2381 |
|
|
2382 |
msg = nresult.fail_msg |
|
2383 |
if msg: |
|
2384 |
self.LogWarning("Can't get storage data from node %s: %s", node, msg) |
|
2385 |
continue |
|
2386 |
|
|
2387 |
rows = dict([(row[name_idx], row) for row in nresult.payload]) |
|
2388 |
|
|
2389 |
for name in utils.NiceSort(rows.keys()): |
|
2390 |
row = rows[name] |
|
2391 |
|
|
2392 |
out = [] |
|
2393 |
|
|
2394 |
for field in self.op.output_fields: |
|
2395 |
if field == "node": |
|
2396 |
val = node |
|
2397 |
elif field in field_idx: |
|
2398 |
val = row[field_idx[field]] |
|
2399 |
else: |
|
2400 |
raise errors.ParameterError(field) |
|
2401 |
|
|
2402 |
out.append(val) |
|
2403 |
|
|
2404 |
result.append(out) |
|
2405 |
|
|
2406 |
return result |
|
2407 |
|
|
2408 |
|
|
2309 | 2409 |
class LUAddNode(LogicalUnit): |
2310 | 2410 |
"""Logical unit for adding node to the cluster. |
2311 | 2411 |
|
Also available in: Unified diff