elif method == luxi.REQ_QUERY_JOBS:
(job_ids, fields) = args
if isinstance(job_ids, (tuple, list)) and job_ids:
- msg = ", ".join(job_ids)
+ msg = utils.CommaJoin(job_ids)
else:
msg = str(job_ids)
logging.info("Received job query request for %s", msg)
# nothing to do
return
logging.debug("Will activate disks for instances %s",
- ", ".join(offline_disk_instances))
+ utils.CommaJoin(offline_disk_instances))
# we submit only one job, and wait for it. not optimal, but spams
# less the job queue
job = [opcodes.OpActivateInstanceDisks(instance_name=name)
missing.append(bridge)
if missing:
- _Fail("Missing bridges %s", ", ".join(missing))
+ _Fail("Missing bridges %s", utils.CommaJoin(missing))
def GetInstanceList(hypervisor_list):
use_locking=False)
offline = [row[0] for row in result if row[1]]
if offline and not nowarn:
- ToStderr("Note: skipping offline node(s): %s" % ", ".join(offline))
+ ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
return [row[0] for row in result if not row[1]]
if self.verbose:
ok_jobs = [row[1] for row in self.jobs if row[0]]
if ok_jobs:
- ToStdout("Submitted jobs %s", ", ".join(ok_jobs))
+ ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
for submit_status, jid, name in self.jobs:
if not submit_status:
ToStderr("Failed to submit job for %s: %s", name, jid)
if used_globals:
msg = ("The following hypervisor parameters are global and cannot"
" be customized at instance level, please modify them at"
- " cluster level: %s" % ", ".join(used_globals))
+ " cluster level: %s" % utils.CommaJoin(used_globals))
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
mod_list = lu.cfg.MaintainCandidatePool(exceptions)
if mod_list:
lu.LogInfo("Promoted nodes to master candidate role: %s",
- ", ".join(node.name for node in mod_list))
+ utils.CommaJoin(node.name for node in mod_list))
for name in mod_list:
lu.context.ReaddNode(name)
mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
# warn that the instance lives on offline nodes
_ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
"instance lives on offline node(s) %s",
- ", ".join(inst_nodes_offline))
+ utils.CommaJoin(inst_nodes_offline))
feedback_fn("* Verifying orphan volumes")
self._VerifyOrphanVolumes(node_vol_should, node_volume)
self.op.pnode = ial.nodes[0]
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
- ", ".join(ial.nodes))
+ utils.CommaJoin(ial.nodes))
if ial.required_nodes == 2:
self.op.snode = ial.nodes[1]
return
feedback_fn("Replacing disk(s) %s for %s" %
- (", ".join([str(i) for i in self.disks]), self.instance.name))
+ (utils.CommaJoin(self.disks), self.instance.name))
activate_disks = (not self.instance.admin_up)
for pnum in keys:
pdata = ports[pnum]
if len(pdata) > 1:
- txt = ", ".join(["%s/%s" % val for val in pdata])
+ txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
# highest used tcp port check
for ip, owners in ips.items():
if len(owners) > 1:
result.append("IP address %s is used by multiple owners: %s" %
- (ip, ", ".join(owners)))
+ (ip, utils.CommaJoin(owners)))
return result
config_errors = self._UnlockedVerifyConfig()
if config_errors:
errmsg = ("Configuration data is not consistent: %s" %
- (", ".join(config_errors)))
+ (utils.CommaJoin(config_errors)))
logging.critical(errmsg)
if feedback_fn:
feedback_fn(errmsg)
self._temporary_ids.DropECReservations(ec_id)
self._temporary_macs.DropECReservations(ec_id)
self._temporary_secrets.DropECReservations(ec_id)
-
self._RenameFilesUnlocked(rename_files)
logging.debug("Successfully archived job(s) %s",
- ", ".join(job.id for job in archive_jobs))
+ utils.CommaJoin(job.id for job in archive_jobs))
return len(archive_jobs)
not self._upper_owned(LEVEL_CLUSTER)), (
"Cannot release the Big Ganeti Lock while holding something"
" at upper levels (%r)" %
- (", ".join(["%s=%r" % (LEVEL_NAMES[i], self._list_owned(i))
- for i in self.__keyring.keys()]), ))
+ (utils.CommaJoin(["%s=%r" % (LEVEL_NAMES[i], self._list_owned(i))
+ for i in self.__keyring.keys()]), ))
# Release will complain if we don't own the locks already
return self.__keyring[level].release(names)
@return: a string with the formatted results
"""
- return ", ".join(["'%s'" % val for val in names])
+ return ", ".join([str(val) for val in names])
def BytesToMebibyte(value):
result["architecture"][0], result["architecture"][1])
if result["tags"]:
- tags = ", ".join(utils.NiceSort(result["tags"]))
+ tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
else:
tags = "(none)"
ToStdout("Tags: %s", tags)
ToStdout("Default hypervisor: %s", result["default_hypervisor"])
- ToStdout("Enabled hypervisors: %s", ", ".join(result["enabled_hypervisors"]))
+ ToStdout("Enabled hypervisors: %s",
+ utils.CommaJoin(result["enabled_hypervisors"]))
ToStdout("Hypervisor parameters:")
_PrintGroupedParams(result["hvparams"])
## instance["auto_balance"])
buf.write(" Nodes:\n")
buf.write(" - primary: %s\n" % instance["pnode"])
- buf.write(" - secondaries: %s\n" % ", ".join(instance["snodes"]))
+ buf.write(" - secondaries: %s\n" % utils.CommaJoin(instance["snodes"]))
buf.write(" Operating system: %s\n" % instance["os"])
if instance.has_key("network_port"):
buf.write(" Allocated network port: %s\n" % instance["network_port"])
" hv/NAME, be/memory, be/vcpus, be/auto_balance,"
" hypervisor."
" The default field"
- " list is (in order): %s." % ", ".join(_LIST_DEF_FIELDS),
+ " list is (in order): %s." % utils.CommaJoin(_LIST_DEF_FIELDS),
),
'reinstall': (
ReinstallInstance, [ArgInstance()],
def result_helper(value):
"""Format a result field in a nice way."""
if isinstance(value, (tuple, list)):
- return "[%s]" % (", ".join(str(elem) for elem in value))
+ return "[%s]" % utils.CommaJoin(value)
else:
return str(value)
" (see the man page for details): id, status, op_list,"
" op_status, op_result."
" The default field"
- " list is (in order): %s." % ", ".join(_LIST_DEF_FIELDS)),
+ " list is (in order): %s." % utils.CommaJoin(_LIST_DEF_FIELDS)),
'archive': (
ArchiveJobs, [ArgJobId(min=1)], [],
"<job-id> [<job-id> ...]", "Archive specified jobs"),
"[nodes...]",
"Lists the nodes in the cluster. The available fields are (see the man"
" page for details): %s. The default field list is (in order): %s." %
- (", ".join(_LIST_HEADERS), ", ".join(_LIST_DEF_FIELDS))),
+ (utils.CommaJoin(_LIST_HEADERS), utils.CommaJoin(_LIST_DEF_FIELDS))),
'modify': (
SetNodeParams, ARGS_ONE_NODE,
[FORCE_OPT, SUBMIT_OPT, MC_OPT, DRAINED_OPT, OFFLINE_OPT],
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, _STORAGE_TYPE_OPT],
"[<node_name>...]", "List physical volumes on node(s). The available"
" fields are (see the man page for details): %s." %
- (", ".join(_LIST_STOR_HEADERS))),
+ (utils.CommaJoin(_LIST_STOR_HEADERS))),
'modify-storage': (
ModifyStorage,
[ArgNode(min=1, max=1),
first_os_variants = []
first_os_msg = ("%s (path: %s) [variants: %s]" %
(_OsStatus(first_os_status, first_os_msg),
- first_os_path, ', '.join(first_os_variants)))
+ first_os_path, utils.CommaJoin(first_os_variants)))
if first_os_status:
nodes_valid[node_name] = first_os_msg
else:
ToStdout("OS: %s [global status: %s]", os_name, status)
if os_variants:
- ToStdout(" Variants: [%s]" % ', '.join(os_variants))
+ ToStdout(" Variants: [%s]" % utils.CommaJoin(os_variants))
_OutputPerNodeOSStatus(nodes_valid)
_OutputPerNodeOSStatus(nodes_bad)
ToStdout("")
"""
self.ClearFeedbackBuf()
job_ids = [cli.SendJob(row[0], cl=self.cl) for row in jobs]
- Log("Submitted job ID(s) %s" % ", ".join(job_ids), indent=1)
+ Log("Submitted job ID(s) %s" % utils.CommaJoin(job_ids), indent=1)
results = []
for jid, (_, iname) in zip(job_ids, jobs):
Log("waiting for job %s for %s" % (jid, iname), indent=2)