+ # TODO: Lock names are not always hostnames. Should QFF_HOSTNAME be used?
+ (_MakeField("name", "Name", QFT_TEXT, "Lock name"), None, 0,
+ lambda ctx, (name, mode, owners, pending): name),
+ (_MakeField("mode", "Mode", QFT_OTHER,
+ "Mode in which the lock is currently acquired"
+ " (exclusive or shared)"),
+ LQ_MODE, 0, lambda ctx, (name, mode, owners, pending): mode),
+ (_MakeField("owner", "Owner", QFT_OTHER, "Current lock owner(s)"),
+ LQ_OWNER, 0, _GetLockOwners),
+ (_MakeField("pending", "Pending", QFT_OTHER,
+ "Threads waiting for the lock"),
+ LQ_PENDING, 0, _GetLockPending),
+ ], [])
+
+
+class GroupQueryData:
+ """Data container for node group data queries.
+
+ """
+ def __init__(self, cluster, groups, group_to_nodes, group_to_instances,
+ want_diskparams):
+ """Initializes this class.
+
+ @param cluster: Cluster object
+ @param groups: List of node group objects
+ @type group_to_nodes: dict; group UUID as key
+ @param group_to_nodes: Per-group list of nodes
+ @type group_to_instances: dict; group UUID as key
+ @param group_to_instances: Per-group list of (primary) instances
+ @type want_diskparams: bool
+ @param want_diskparams: Whether diskparamters should be calculated
+
+ """
+ self.groups = groups
+ self.group_to_nodes = group_to_nodes
+ self.group_to_instances = group_to_instances
+ self.cluster = cluster
+ self.want_diskparams = want_diskparams
+
+ # Used for individual rows
+ self.group_ipolicy = None
+ self.ndparams = None
+ self.group_dp = None
+
+ def __iter__(self):
+ """Iterate over all node groups.
+
+ This function has side-effects and only one instance of the resulting
+ generator should be used at a time.
+
+ """
+ for group in self.groups:
+ self.group_ipolicy = self.cluster.SimpleFillIPolicy(group.ipolicy)
+ self.ndparams = self.cluster.SimpleFillND(group.ndparams)
+ if self.want_diskparams:
+ self.group_dp = self.cluster.SimpleFillDP(group.diskparams)
+ else:
+ self.group_dp = None
+ yield group
+
+
+_GROUP_SIMPLE_FIELDS = {
+ "alloc_policy": ("AllocPolicy", QFT_TEXT, "Allocation policy for group"),
+ "name": ("Group", QFT_TEXT, "Group name"),
+ "serial_no": ("SerialNo", QFT_NUMBER, _SERIAL_NO_DOC % "Group"),
+ "uuid": ("UUID", QFT_TEXT, "Group UUID"),
+ }
+
+
+def _BuildGroupFields():
+ """Builds list of fields for node group queries.
+
+ """
+ # Add simple fields
+ fields = [(_MakeField(name, title, kind, doc), GQ_CONFIG, 0,
+ _GetItemAttr(name))
+ for (name, (title, kind, doc)) in _GROUP_SIMPLE_FIELDS.items()]
+
+ def _GetLength(getter):
+ return lambda ctx, group: len(getter(ctx)[group.uuid])
+
+ def _GetSortedList(getter):
+ return lambda ctx, group: utils.NiceSort(getter(ctx)[group.uuid])
+
+ group_to_nodes = operator.attrgetter("group_to_nodes")
+ group_to_instances = operator.attrgetter("group_to_instances")
+
+ # Add fields for nodes
+ fields.extend([
+ (_MakeField("node_cnt", "Nodes", QFT_NUMBER, "Number of nodes"),
+ GQ_NODE, 0, _GetLength(group_to_nodes)),
+ (_MakeField("node_list", "NodeList", QFT_OTHER, "List of nodes"),
+ GQ_NODE, 0, _GetSortedList(group_to_nodes)),
+ ])
+
+ # Add fields for instances
+ fields.extend([
+ (_MakeField("pinst_cnt", "Instances", QFT_NUMBER,
+ "Number of primary instances"),
+ GQ_INST, 0, _GetLength(group_to_instances)),
+ (_MakeField("pinst_list", "InstanceList", QFT_OTHER,
+ "List of primary instances"),
+ GQ_INST, 0, _GetSortedList(group_to_instances)),
+ ])
+
+ # Other fields
+ fields.extend([
+ (_MakeField("tags", "Tags", QFT_OTHER, "Tags"), GQ_CONFIG, 0,
+ lambda ctx, group: list(group.GetTags())),
+ (_MakeField("ipolicy", "InstancePolicy", QFT_OTHER,
+ "Instance policy limitations (merged)"),
+ GQ_CONFIG, 0, lambda ctx, _: ctx.group_ipolicy),
+ (_MakeField("custom_ipolicy", "CustomInstancePolicy", QFT_OTHER,
+ "Custom instance policy limitations"),
+ GQ_CONFIG, 0, _GetItemAttr("ipolicy")),
+ (_MakeField("custom_ndparams", "CustomNDParams", QFT_OTHER,
+ "Custom node parameters"),
+ GQ_CONFIG, 0, _GetItemAttr("ndparams")),
+ (_MakeField("ndparams", "NDParams", QFT_OTHER,
+ "Node parameters"),
+ GQ_CONFIG, 0, lambda ctx, _: ctx.ndparams),
+ (_MakeField("diskparams", "DiskParameters", QFT_OTHER,
+ "Disk parameters (merged)"),
+ GQ_DISKPARAMS, 0, lambda ctx, _: ctx.group_dp),
+ (_MakeField("custom_diskparams", "CustomDiskParameters", QFT_OTHER,
+ "Custom disk parameters"),
+ GQ_CONFIG, 0, _GetItemAttr("diskparams")),
+ ])
+
+ # ND parameters
+ fields.extend(_BuildNDFields(True))
+
+ fields.extend(_GetItemTimestampFields(GQ_CONFIG))
+
+ return _PrepareFieldList(fields, [])
+
+
+class OsInfo(objects.ConfigObject):
+ __slots__ = [
+ "name",
+ "valid",
+ "hidden",
+ "blacklisted",
+ "variants",
+ "api_versions",
+ "parameters",
+ "node_status",
+ ]
+
+
+def _BuildOsFields():
+ """Builds list of fields for operating system queries.
+
+ """
+ fields = [
+ (_MakeField("name", "Name", QFT_TEXT, "Operating system name"),
+ None, 0, _GetItemAttr("name")),
+ (_MakeField("valid", "Valid", QFT_BOOL,
+ "Whether operating system definition is valid"),
+ None, 0, _GetItemAttr("valid")),
+ (_MakeField("hidden", "Hidden", QFT_BOOL,
+ "Whether operating system is hidden"),
+ None, 0, _GetItemAttr("hidden")),
+ (_MakeField("blacklisted", "Blacklisted", QFT_BOOL,
+ "Whether operating system is blacklisted"),
+ None, 0, _GetItemAttr("blacklisted")),
+ (_MakeField("variants", "Variants", QFT_OTHER,
+ "Operating system variants"),
+ None, 0, _ConvWrap(utils.NiceSort, _GetItemAttr("variants"))),
+ (_MakeField("api_versions", "ApiVersions", QFT_OTHER,
+ "Operating system API versions"),
+ None, 0, _ConvWrap(sorted, _GetItemAttr("api_versions"))),
+ (_MakeField("parameters", "Parameters", QFT_OTHER,
+ "Operating system parameters"),
+ None, 0, _ConvWrap(compat.partial(utils.NiceSort, key=compat.fst),
+ _GetItemAttr("parameters"))),
+ (_MakeField("node_status", "NodeStatus", QFT_OTHER,
+ "Status from node"),
+ None, 0, _GetItemAttr("node_status")),
+ ]
+
+ return _PrepareFieldList(fields, [])
+
+
+def _JobUnavailInner(fn, ctx, (job_id, job)): # pylint: disable=W0613
+ """Return L{_FS_UNAVAIL} if job is None.
+
+ When listing specifc jobs (e.g. "gnt-job list 1 2 3"), a job may not be
+ found, in which case this function converts it to L{_FS_UNAVAIL}.
+
+ """
+ if job is None:
+ return _FS_UNAVAIL
+ else:
+ return fn(job)
+
+
+def _JobUnavail(inner):
+ """Wrapper for L{_JobUnavailInner}.
+
+ """
+ return compat.partial(_JobUnavailInner, inner)
+
+
+def _PerJobOpInner(fn, job):
+ """Executes a function per opcode in a job.
+
+ """
+ return map(fn, job.ops)
+
+
+def _PerJobOp(fn):
+ """Wrapper for L{_PerJobOpInner}.
+
+ """
+ return _JobUnavail(compat.partial(_PerJobOpInner, fn))
+
+
+def _JobTimestampInner(fn, job):
+ """Converts unavailable timestamp to L{_FS_UNAVAIL}.
+
+ """
+ timestamp = fn(job)
+
+ if timestamp is None:
+ return _FS_UNAVAIL
+ else:
+ return timestamp
+
+
+def _JobTimestamp(fn):
+ """Wrapper for L{_JobTimestampInner}.
+
+ """
+ return _JobUnavail(compat.partial(_JobTimestampInner, fn))
+
+
+def _BuildJobFields():
+ """Builds list of fields for job queries.
+
+ """
+ fields = [
+ (_MakeField("id", "ID", QFT_TEXT, "Job ID"),
+ None, 0, lambda _, (job_id, job): job_id),
+ (_MakeField("status", "Status", QFT_TEXT, "Job status"),
+ None, 0, _JobUnavail(lambda job: job.CalcStatus())),
+ (_MakeField("priority", "Priority", QFT_NUMBER,
+ ("Current job priority (%s to %s)" %
+ (constants.OP_PRIO_LOWEST, constants.OP_PRIO_HIGHEST))),
+ None, 0, _JobUnavail(lambda job: job.CalcPriority())),
+ (_MakeField("ops", "OpCodes", QFT_OTHER, "List of all opcodes"),
+ None, 0, _PerJobOp(lambda op: op.input.__getstate__())),
+ (_MakeField("opresult", "OpCode_result", QFT_OTHER,
+ "List of opcodes results"),
+ None, 0, _PerJobOp(operator.attrgetter("result"))),
+ (_MakeField("opstatus", "OpCode_status", QFT_OTHER,
+ "List of opcodes status"),
+ None, 0, _PerJobOp(operator.attrgetter("status"))),
+ (_MakeField("oplog", "OpCode_log", QFT_OTHER,
+ "List of opcode output logs"),
+ None, 0, _PerJobOp(operator.attrgetter("log"))),
+ (_MakeField("opstart", "OpCode_start", QFT_OTHER,
+ "List of opcode start timestamps (before acquiring locks)"),
+ None, 0, _PerJobOp(operator.attrgetter("start_timestamp"))),
+ (_MakeField("opexec", "OpCode_exec", QFT_OTHER,
+ "List of opcode execution start timestamps (after acquiring"
+ " locks)"),
+ None, 0, _PerJobOp(operator.attrgetter("exec_timestamp"))),
+ (_MakeField("opend", "OpCode_end", QFT_OTHER,
+ "List of opcode execution end timestamps"),
+ None, 0, _PerJobOp(operator.attrgetter("end_timestamp"))),
+ (_MakeField("oppriority", "OpCode_prio", QFT_OTHER,
+ "List of opcode priorities"),
+ None, 0, _PerJobOp(operator.attrgetter("priority"))),
+ (_MakeField("received_ts", "Received", QFT_OTHER,
+ "Timestamp of when job was received"),
+ None, 0, _JobTimestamp(operator.attrgetter("received_timestamp"))),
+ (_MakeField("start_ts", "Start", QFT_OTHER,
+ "Timestamp of job start"),
+ None, 0, _JobTimestamp(operator.attrgetter("start_timestamp"))),
+ (_MakeField("end_ts", "End", QFT_OTHER,
+ "Timestamp of job end"),
+ None, 0, _JobTimestamp(operator.attrgetter("end_timestamp"))),
+ (_MakeField("summary", "Summary", QFT_OTHER,
+ "List of per-opcode summaries"),
+ None, 0, _PerJobOp(lambda op: op.input.Summary())),
+ ]
+
+ return _PrepareFieldList(fields, [])
+
+
+def _GetExportName(_, (node_name, expname)): # pylint: disable=W0613
+ """Returns an export name if available.
+
+ """
+ if expname is None:
+ return _FS_UNAVAIL
+ else:
+ return expname
+
+
+def _BuildExportFields():
+ """Builds list of fields for exports.
+
+ """
+ fields = [
+ (_MakeField("node", "Node", QFT_TEXT, "Node name"),
+ None, QFF_HOSTNAME, lambda _, (node_name, expname): node_name),
+ (_MakeField("export", "Export", QFT_TEXT, "Export name"),
+ None, 0, _GetExportName),
+ ]
+
+ return _PrepareFieldList(fields, [])
+
+
+_CLUSTER_VERSION_FIELDS = {
+ "software_version": ("SoftwareVersion", QFT_TEXT, constants.RELEASE_VERSION,
+ "Software version"),
+ "protocol_version": ("ProtocolVersion", QFT_NUMBER,
+ constants.PROTOCOL_VERSION,
+ "RPC protocol version"),
+ "config_version": ("ConfigVersion", QFT_NUMBER, constants.CONFIG_VERSION,
+ "Configuration format version"),
+ "os_api_version": ("OsApiVersion", QFT_NUMBER, max(constants.OS_API_VERSIONS),
+ "API version for OS template scripts"),
+ "export_version": ("ExportVersion", QFT_NUMBER, constants.EXPORT_VERSION,
+ "Import/export file format version"),
+ }
+
+
+_CLUSTER_SIMPLE_FIELDS = {
+ "cluster_name": ("Name", QFT_TEXT, QFF_HOSTNAME, "Cluster name"),
+ "master_node": ("Master", QFT_TEXT, QFF_HOSTNAME, "Master node name"),
+ "volume_group_name": ("VgName", QFT_TEXT, 0, "LVM volume group name"),
+ }
+
+
+class ClusterQueryData:
+ def __init__(self, cluster, drain_flag, watcher_pause):
+ """Initializes this class.
+
+ @type cluster: L{objects.Cluster}
+ @param cluster: Instance of cluster object
+ @type drain_flag: bool
+ @param drain_flag: Whether job queue is drained
+ @type watcher_pause: number
+ @param watcher_pause: Until when watcher is paused (Unix timestamp)
+
+ """
+ self._cluster = cluster
+ self.drain_flag = drain_flag
+ self.watcher_pause = watcher_pause
+
+ def __iter__(self):
+ return iter([self._cluster])
+
+
+def _ClusterWatcherPause(ctx, _):
+ """Returns until when watcher is paused (if available).
+
+ """
+ if ctx.watcher_pause is None:
+ return _FS_UNAVAIL
+ else:
+ return ctx.watcher_pause
+
+
+def _BuildClusterFields():
+ """Builds list of fields for cluster information.
+
+ """
+ fields = [
+ (_MakeField("tags", "Tags", QFT_OTHER, "Tags"), CQ_CONFIG, 0,
+ lambda ctx, cluster: list(cluster.GetTags())),
+ (_MakeField("architecture", "ArchInfo", QFT_OTHER,
+ "Architecture information"), None, 0,
+ lambda ctx, _: runtime.GetArchInfo()),
+ (_MakeField("drain_flag", "QueueDrained", QFT_BOOL,
+ "Flag whether job queue is drained"), CQ_QUEUE_DRAINED, 0,
+ lambda ctx, _: ctx.drain_flag),
+ (_MakeField("watcher_pause", "WatcherPause", QFT_TIMESTAMP,
+ "Until when watcher is paused"), CQ_WATCHER_PAUSE, 0,
+ _ClusterWatcherPause),
+ ]
+
+ # Simple fields
+ fields.extend([
+ (_MakeField(name, title, kind, doc), CQ_CONFIG, flags, _GetItemAttr(name))
+ for (name, (title, kind, flags, doc)) in _CLUSTER_SIMPLE_FIELDS.items()