Revision 3a24c527

b/scripts/gnt-backup
43 43
  """
44 44
  exports = GetClient().QueryExports(opts.nodes)
45 45
  for node in exports:
46
    print ("Node: %s" % node)
47
    print ("Exports:")
46
    ToStdout("Node: %s", node)
47
    ToStdout("Exports:")
48 48
    if isinstance(exports[node], list):
49 49
      for instance_name in exports[node]:
50
        print ("\t%s" % instance_name)
50
        ToStdout("\t%s", instance_name)
51 51
    else:
52
      print ("  Could not get exports list")
52
      ToStdout("  Could not get exports list")
53 53

  
54 54

  
55 55
def ExportInstance(opts, args):
b/scripts/gnt-cluster
21 21

  
22 22
import sys
23 23
from optparse import make_option
24
import pprint
25 24
import os.path
26 25

  
27 26
from ganeti.cli import *
......
43 42

  
44 43
  """
45 44
  if not opts.lvm_storage and opts.vg_name:
46
    print ("Options --no-lvm-storage and --vg-name conflict.")
45
    ToStderr("Options --no-lvm-storage and --vg-name conflict.")
47 46
    return 1
48 47

  
49 48
  vg_name = opts.vg_name
......
119 118

  
120 119
  """
121 120
  if not opts.yes_do_it:
122
    print ("Destroying a cluster is irreversibly. If you really want destroy"
123
           " this cluster, supply the --yes-do-it option.")
121
    ToStderr("Destroying a cluster is irreversible. If you really want"
122
             " destroy this cluster, supply the --yes-do-it option.")
124 123
    return 1
125 124

  
126 125
  op = opcodes.OpDestroyCluster()
......
162 161
  """
163 162
  op = opcodes.OpQueryClusterInfo()
164 163
  result = SubmitOpCode(op)
165
  print ("Software version: %s" % result["software_version"])
166
  print ("Internode protocol: %s" % result["protocol_version"])
167
  print ("Configuration format: %s" % result["config_version"])
168
  print ("OS api version: %s" % result["os_api_version"])
169
  print ("Export interface: %s" % result["export_version"])
164
  ToStdout("Software version: %s", result["software_version"])
165
  ToStdout("Internode protocol: %s", result["protocol_version"])
166
  ToStdout("Configuration format: %s", result["config_version"])
167
  ToStdout("OS api version: %s", result["os_api_version"])
168
  ToStdout("Export interface: %s", result["export_version"])
170 169
  return 0
171 170

  
172 171

  
......
177 176
    opts - class with options as members
178 177

  
179 178
  """
180
  print GetClient().QueryConfigValues(["master_node"])[0]
179
  ToStdout("%s", GetClient().QueryConfigValues(["master_node"])[0])
181 180
  return 0
182 181

  
183 182

  
......
188 187
  op = opcodes.OpQueryClusterInfo()
189 188
  result = SubmitOpCode(op)
190 189

  
191
  print ("Cluster name: %s" % result["name"])
190
  ToStdout("Cluster name: %s", result["name"])
192 191

  
193
  print ("Master node: %s" % result["master"])
192
  ToStdout("Master node: %s", result["master"])
194 193

  
195
  print ("Architecture (this node): %s (%s)" %
196
         (result["architecture"][0], result["architecture"][1]))
194
  ToStdout("Architecture (this node): %s (%s)",
195
           result["architecture"][0], result["architecture"][1])
197 196

  
198
  print ("Default hypervisor: %s" % result["hypervisor_type"])
199
  print ("Enabled hypervisors: %s" % ", ".join(result["enabled_hypervisors"]))
197
  ToStdout("Default hypervisor: %s", result["hypervisor_type"])
198
  ToStdout("Enabled hypervisors: %s", ", ".join(result["enabled_hypervisors"]))
200 199

  
201
  print "Hypervisor parameters:"
200
  ToStdout("Hypervisor parameters:")
202 201
  for hv_name, hv_dict in result["hvparams"].items():
203
    print "  - %s:" % hv_name
202
    ToStdout("  - %s:", hv_name)
204 203
    for item, val in hv_dict.iteritems():
205
      print "      %s: %s" % (item, val)
204
      ToStdout("      %s: %s", item, val)
206 205

  
207
  print "Cluster parameters:"
206
  ToStdout("Cluster parameters:")
208 207
  for gr_name, gr_dict in result["beparams"].items():
209
    print "  - %s:" % gr_name
208
    ToStdout("  - %s:", gr_name)
210 209
    for item, val in gr_dict.iteritems():
211
      print "      %s: %s" % (item, val)
210
      ToStdout("      %s: %s", item, val)
212 211

  
213 212
  return 0
214 213

  
......
239 238
  srun = ssh.SshRunner(cluster_name=cluster_name)
240 239
  for node in results:
241 240
    if not srun.CopyFileToNode(node, filename):
242
      print >> sys.stderr, ("Copy of file %s to node %s failed" %
243
                            (filename, node))
241
      ToStderr("Copy of file %s to node %s failed", filename, node)
244 242

  
245 243
  return 0
246 244

  
......
273 271

  
274 272
  for name in nodes:
275 273
    result = srun.Run(name, "root", command)
276
    print ("------------------------------------------------")
277
    print ("node: %s" % name)
278
    print ("%s" % result.output)
279
    print ("return code = %s" % result.exit_code)
274
    ToStdout("------------------------------------------------")
275
    ToStdout("node: %s", name)
276
    ToStdout("%s", result.output)
277
    ToStdout("return code = %s", result.exit_code)
280 278

  
281 279
  return 0
282 280

  
......
313 311
  nodes, nlvm, instances, missing = result
314 312

  
315 313
  if nodes:
316
    print "Nodes unreachable or with bad data:"
314
    ToStdout("Nodes unreachable or with bad data:")
317 315
    for name in nodes:
318
      print "\t%s" % name
316
      ToStdout("\t%s", name)
319 317
  retcode = constants.EXIT_SUCCESS
320 318

  
321 319
  if nlvm:
322 320
    for node, text in nlvm.iteritems():
323
      print ("Error on node %s: LVM error: %s" %
324
             (node, text[-400:].encode('string_escape')))
321
      ToStdout("Error on node %s: LVM error: %s",
322
               node, text[-400:].encode('string_escape'))
325 323
      retcode |= 1
326
      print "You need to fix these nodes first before fixing instances"
324
      ToStdout("You need to fix these nodes first before fixing instances")
327 325

  
328 326
  if instances:
329 327
    for iname in instances:
......
331 329
        continue
332 330
      op = opcodes.OpActivateInstanceDisks(instance_name=iname)
333 331
      try:
334
        print "Activating disks for instance '%s'" % iname
332
        ToStdout("Activating disks for instance '%s'", iname)
335 333
        SubmitOpCode(op)
336 334
      except errors.GenericError, err:
337 335
        nret, msg = FormatError(err)
338 336
        retcode |= nret
339
        print >> sys.stderr, ("Error activating disks for instance %s: %s" %
340
                              (iname, msg))
337
        ToStderr("Error activating disks for instance %s: %s", iname, msg)
341 338

  
342 339
  if missing:
343 340
    for iname, ival in missing.iteritems():
344 341
      all_missing = utils.all(ival, lambda x: x[0] in nlvm)
345 342
      if all_missing:
346
        print ("Instance %s cannot be verified as it lives on"
347
               " broken nodes" % iname)
343
        ToStdout("Instance %s cannot be verified as it lives on"
344
                 " broken nodes", iname)
348 345
      else:
349
        print "Instance %s has missing logical volumes:" % iname
346
        ToStdout("Instance %s has missing logical volumes:", iname)
350 347
        ival.sort()
351 348
        for node, vol in ival:
352 349
          if node in nlvm:
353
            print ("\tbroken node %s /dev/xenvg/%s" % (node, vol))
350
            ToStdout("\tbroken node %s /dev/xenvg/%s", node, vol)
354 351
          else:
355
            print ("\t%s /dev/xenvg/%s" % (node, vol))
356
    print ("You need to run replace_disks for all the above"
352
            ToStdout("\t%s /dev/xenvg/%s", node, vol)
353
    ToStdout("You need to run replace_disks for all the above"
357 354
           " instances, if this message persist after fixing nodes.")
358 355
    retcode |= 1
359 356

  
......
382 379
  result = list(result)
383 380
  result.sort()
384 381
  for path, tag in result:
385
    print "%s %s" % (path, tag)
382
    ToStdout("%s %s", path, tag)
386 383

  
387 384

  
388 385
def SetClusterParams(opts, args):
......
395 392
  if not (not opts.lvm_storage or opts.vg_name or
396 393
          opts.enabled_hypervisors or opts.hvparams or
397 394
          opts.beparams):
398
    print "Please give at least one of the parameters."
395
    ToStderr("Please give at least one of the parameters.")
399 396
    return 1
400 397

  
401 398
  vg_name = opts.vg_name
402 399
  if not opts.lvm_storage and opts.vg_name:
403
    print ("Options --no-lvm-storage and --vg-name conflict.")
400
    ToStdout("Options --no-lvm-storage and --vg-name conflict.")
404 401
    return 1
405 402

  
406 403
  hvlist = opts.enabled_hypervisors
......
433 430
    client.SetQueueDrainFlag(drain_flag)
434 431
  elif command == "info":
435 432
    result = client.QueryConfigValues(["drain_flag"])
436
    print "The drain flag is",
437 433
    if result[0]:
438
      print "set"
434
      val = "set"
439 435
    else:
440
      print "unset"
436
      val = "unset"
437
    ToStdout("The drain flag is %s" % val)
441 438
  return 0
442 439

  
443 440
# this is an option common to more than one command, so we declare
b/scripts/gnt-debug
30 30

  
31 31
from ganeti.cli import *
32 32
from ganeti import opcodes
33
from ganeti import logger
34 33
from ganeti import constants
35 34
from ganeti import utils
36 35
from ganeti import errors
......
58 57
  op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data]
59 58
  job = opcodes.Job(op_list=op_list)
60 59
  jid = SubmitJob(job)
61
  print "Job id:", jid
60
  ToStdout("Job id: %s", jid)
62 61
  query = {
63 62
    "object": "jobs",
64 63
    "fields": ["status"],
......
70 69
    jdata = SubmitQuery(query)
71 70
    if not jdata:
72 71
      # job not found, gone away!
73
      print "Job lost!"
72
      ToStderr("Job lost!")
74 73
      return 1
75 74

  
76 75
    status = jdata[0][0]
77
    print status
76
    ToStdout(status)
78 77
    if status in (opcodes.Job.STATUS_SUCCESS, opcodes.Job.STATUS_FAIL):
79 78
      break
80 79

  
......
86 85
  jdata = SubmitQuery(query)
87 86
  if not jdata:
88 87
    # job not found, gone away!
89
    print "Job lost!"
88
    ToStderr("Job lost!")
90 89
    return 1
91
  print jdata[0]
90
  ToStdout(jdata[0])
92 91
  status, op_list, op_status, op_result = jdata[0]
93 92
  for idx, op in enumerate(op_list):
94
    print idx, op.OP_ID, op_status[idx], op_result[idx]
93
    ToStdout("%s %s %s %s", idx, op.OP_ID, op_status[idx], op_result[idx])
95 94
  return 0
96 95

  
97 96

  
......
102 101
    disks = [{"size": utils.ParseUnit(val), "mode": 'w'}
103 102
             for val in opts.disks.split(",")]
104 103
  except errors.UnitParseError, err:
105
    print >> sys.stderr, "Invalid disks parameter '%s': %s" % (opts.disks, err)
104
    ToStderr("Invalid disks parameter '%s': %s", opts.disks, err)
106 105
    return 1
107 106

  
108 107
  nics = [val.split("/") for val in opts.nics.split(",")]
......
132 131
                               allocator=opts.allocator,
133 132
                               )
134 133
  result = SubmitOpCode(op)
135
  print result
134
  ToStdout("%s" % result)
136 135
  return 0
137 136

  
138 137

  
b/scripts/gnt-instance
29 29
from ganeti.cli import *
30 30
from ganeti import cli
31 31
from ganeti import opcodes
32
from ganeti import logger
33 32
from ganeti import constants
34 33
from ganeti import utils
35 34
from ganeti import errors
......
252 251
                       numfields=numfields, data=output)
253 252

  
254 253
  for line in data:
255
    logger.ToStdout(line)
254
    ToStdout(line)
256 255

  
257 256
  return 0
258 257

  
......
418 417
                                  file_storage_dir=specs['file_storage_dir'],
419 418
                                  file_driver=specs['file_driver'])
420 419

  
421
    print '%s: %s' % (name, cli.SendJob([op]))
420
    ToStdout("%s: %s", name, cli.SendJob([op]))
422 421

  
423 422
  return 0
424 423

  
......
438 437
    result = SubmitOpCode(op)
439 438

  
440 439
    if not result:
441
      logger.ToStdout("Can't get the OS list")
440
      ToStdout("Can't get the OS list")
442 441
      return 1
443 442

  
444
    logger.ToStdout("Available OS templates:")
443
    ToStdout("Available OS templates:")
445 444
    number = 0
446 445
    choices = []
447 446
    for entry in result:
448
      logger.ToStdout("%3s: %s" % (number, entry[0]))
447
      ToStdout("%3s: %s", number, entry[0])
449 448
      choices.append(("%s" % number, entry[0], entry[0]))
450 449
      number = number + 1
451 450

  
......
454 453
                       choices)
455 454

  
456 455
    if selected == 'exit':
457
      logger.ToStdout("User aborted reinstall, exiting")
456
      ToStdout("User aborted reinstall, exiting")
458 457
      return 1
459 458

  
460 459
    os = selected
......
526 525
  op = opcodes.OpActivateInstanceDisks(instance_name=instance_name)
527 526
  disks_info = SubmitOrSend(op, opts)
528 527
  for host, iname, nname in disks_info:
529
    print "%s:%s:%s" % (host, iname, nname)
528
    ToStdout("%s:%s:%s", host, iname, nname)
530 529
  return 0
531 530

  
532 531

  
......
581 580
                                   force=opts.force,
582 581
                                   extra_args=opts.extra_args)
583 582
    if multi_on:
584
      logger.ToStdout("Starting up %s" % name)
583
      ToStdout("Starting up %s", name)
585 584
    try:
586 585
      SubmitOrSend(op, opts)
587 586
    except JobSubmittedException, err:
588 587
      _, txt = FormatError(err)
589
      logger.ToStdout("%s" % txt)
588
      ToStdout("%s", txt)
590 589
  return 0
591 590

  
592 591

  
......
636 635
  for name in inames:
637 636
    op = opcodes.OpShutdownInstance(instance_name=name)
638 637
    if multi_on:
639
      logger.ToStdout("Shutting down %s" % name)
638
      ToStdout("Shutting down %s", name)
640 639
    try:
641 640
      SubmitOrSend(op, opts)
642 641
    except JobSubmittedException, err:
643 642
      _, txt = FormatError(err)
644
      logger.ToStdout("%s" % txt)
643
      ToStdout("%s", txt)
645 644
  return 0
646 645

  
647 646

  
......
721 720
  cmd = SubmitOpCode(op)
722 721

  
723 722
  if opts.show_command:
724
    print utils.ShellQuoteArgs(cmd)
723
    ToStdout("%s", utils.ShellQuoteArgs(cmd))
725 724
  else:
726 725
    try:
727 726
      os.execvp(cmd[0], cmd)
728 727
    finally:
729
      sys.stderr.write("Can't run console command %s with arguments:\n'%s'" %
730
                       (cmd, " ".join(argv)))
728
      ToStderr("Can't run console command %s with arguments:\n'%s'",
729
               cmd, " ".join(argv))
731 730
      os._exit(1)
732 731

  
733 732

  
......
812 811
  op = opcodes.OpQueryInstanceData(instances=args, static=opts.static)
813 812
  result = SubmitOpCode(op)
814 813
  if not result:
815
    logger.ToStdout("No instances.")
814
    ToStdout("No instances.")
816 815
    return 1
817 816

  
818 817
  buf = StringIO()
......
883 882
    for device in instance["disks"]:
884 883
      _FormatBlockDevInfo(buf, device, 1, opts.static)
885 884

  
886
  logger.ToStdout(buf.getvalue().rstrip('\n'))
885
  ToStdout(buf.getvalue().rstrip('\n'))
887 886
  return retcode
888 887

  
889 888

  
......
901 900
  """
902 901
  if not (opts.ip or opts.bridge or opts.mac or
903 902
          opts.hypervisor or opts.beparams):
904
    logger.ToStdout("Please give at least one of the parameters.")
903
    ToStderr("Please give at least one of the parameters.")
905 904
    return 1
906 905

  
907 906
  if constants.BE_MEMORY in opts.beparams:
......
919 918
  result = SubmitOrSend(op, opts)
920 919

  
921 920
  if result:
922
    logger.ToStdout("Modified instance %s" % args[0])
921
    ToStdout("Modified instance %s", args[0])
923 922
    for param, data in result:
924
      logger.ToStdout(" - %-5s -> %s" % (param, data))
925
    logger.ToStdout("Please don't forget that these parameters take effect"
926
                    " only at the next start of the instance.")
923
      ToStdout(" - %-5s -> %s", param, data)
924
    ToStdout("Please don't forget that these parameters take effect"
925
             " only at the next start of the instance.")
927 926
  return 0
928 927

  
929 928

  
b/scripts/gnt-job
28 28

  
29 29
from ganeti.cli import *
30 30
from ganeti import opcodes
31
from ganeti import logger
32 31
from ganeti import constants
33 32
from ganeti import utils
34 33
from ganeti import errors
......
103 102
                       fields=selected_fields, unitfields=unitfields,
104 103
                       numfields=numfields, data=output)
105 104
  for line in data:
106
    print line
105
    ToStdout(line)
107 106

  
108 107
  return 0
109 108

  
......
146 145
  """
147 146
  def format(level, text):
148 147
    """Display the text indented."""
149
    print "%s%s" % ("  " * level, text)
148
    ToStdout("%s%s", "  " * level, text)
150 149

  
151 150
  def result_helper(value):
152 151
    """Format a result field in a nice way."""
b/scripts/gnt-node
24 24

  
25 25
from ganeti.cli import *
26 26
from ganeti import opcodes
27
from ganeti import logger
28 27
from ganeti import utils
29 28
from ganeti import constants
30 29
from ganeti import errors
......
52 51
    except (errors.OpPrereqError, errors.OpExecError):
53 52
      pass
54 53
    else:
55
      logger.ToStderr("Node %s already in the cluster (as %s)"
56
                      " - please use --readd" % (args[0], output[0][0]))
54
      ToStderr("Node %s already in the cluster (as %s)"
55
               " - please use --readd", args[0], output[0][0])
57 56
      return 1
58 57

  
59
  logger.ToStderr("-- WARNING -- \n"
60
    "Performing this operation is going to replace the ssh daemon keypair\n"
61
    "on the target machine (%s) with the ones of the current one\n"
62
    "and grant full intra-cluster ssh root access to/from it\n" % node)
58
  ToStderr("-- WARNING -- \n"
59
           "Performing this operation is going to replace the ssh daemon"
60
           " keypair\n"
61
           "on the target machine (%s) with the ones of the"
62
           " current one\n"
63
           "and grant full intra-cluster ssh root access to/from it\n", node)
63 64

  
64 65
  bootstrap.SetupNodeDaemon(node, opts.ssh_key_check)
65 66

  
......
121 122
                       fields=selected_fields, unitfields=unitfields,
122 123
                       numfields=numfields, data=output)
123 124
  for line in data:
124
    logger.ToStdout(line)
125
    ToStdout(line)
125 126

  
126 127
  return 0
127 128

  
......
147 148
                               src_node)
148 149

  
149 150
  if not sinst:
150
    logger.ToStderr("No secondary instances on node %s, exiting." % src_node)
151
    ToStderr("No secondary instances on node %s, exiting.", src_node)
151 152
    return constants.EXIT_SUCCESS
152 153

  
153 154
  sinst = utils.NiceSort(sinst)
......
167 168
                                mode=constants.REPLACE_DISK_ALL,
168 169
                                disks=["sda", "sdb"])
169 170
    try:
170
      logger.ToStdout("Replacing disks for instance %s" % iname)
171
      ToStdout("Replacing disks for instance %s", iname)
171 172
      SubmitOpCode(op)
172
      logger.ToStdout("Instance %s has been relocated" % iname)
173
      ToStdout("Instance %s has been relocated", iname)
173 174
      good_cnt += 1
174 175
    except errors.GenericError, err:
175 176
      nret, msg = FormatError(err)
176 177
      retcode |= nret
177
      logger.ToStderr("Error replacing disks for instance %s: %s" %
178
                      (iname, msg))
178
      ToStderr("Error replacing disks for instance %s: %s", iname, msg)
179 179
      bad_cnt += 1
180 180

  
181 181
  if retcode == constants.EXIT_SUCCESS:
182
    logger.ToStdout("All %d instance(s) relocated successfully." % good_cnt)
182
    ToStdout("All %d instance(s) relocated successfully.", good_cnt)
183 183
  else:
184
    logger.ToStdout("There were errors during the relocation:\n"
185
                    "%d error(s) out of %d instance(s)." %
186
                    (bad_cnt, good_cnt + bad_cnt))
184
    ToStdout("There were errors during the relocation:\n"
185
             "%d error(s) out of %d instance(s).", bad_cnt, good_cnt + bad_cnt)
187 186
  return retcode
188 187

  
189 188

  
......
199 198
  node, pinst = result[0]
200 199

  
201 200
  if not pinst:
202
    logger.ToStderr("No primary instances on node %s, exiting." % node)
201
    ToStderr("No primary instances on node %s, exiting.", node)
203 202
    return 0
204 203

  
205 204
  pinst = utils.NiceSort(pinst)
......
215 214
    op = opcodes.OpFailoverInstance(instance_name=iname,
216 215
                                    ignore_consistency=opts.ignore_consistency)
217 216
    try:
218
      logger.ToStdout("Failing over instance %s" % iname)
217
      ToStdout("Failing over instance %s", iname)
219 218
      SubmitOpCode(op)
220
      logger.ToStdout("Instance %s has been failed over" % iname)
219
      ToStdout("Instance %s has been failed over", iname)
221 220
      good_cnt += 1
222 221
    except errors.GenericError, err:
223 222
      nret, msg = FormatError(err)
224 223
      retcode |= nret
225
      logger.ToStderr("Error failing over instance %s: %s" % (iname, msg))
224
      ToStderr("Error failing over instance %s: %s", iname, msg)
226 225
      bad_cnt += 1
227 226

  
228 227
  if retcode == 0:
229
    logger.ToStdout("All %d instance(s) failed over successfully." % good_cnt)
228
    ToStdout("All %d instance(s) failed over successfully.", good_cnt)
230 229
  else:
231
    logger.ToStdout("There were errors during the failover:\n"
232
                    "%d error(s) out of %d instance(s)." %
233
                    (bad_cnt, good_cnt + bad_cnt))
230
    ToStdout("There were errors during the failover:\n"
231
             "%d error(s) out of %d instance(s).", bad_cnt, good_cnt + bad_cnt)
234 232
  return retcode
235 233

  
236 234

  
......
244 242
  result = SubmitOpCode(op)
245 243

  
246 244
  for name, primary_ip, secondary_ip, pinst, sinst in result:
247
    logger.ToStdout("Node name: %s" % name)
248
    logger.ToStdout("  primary ip: %s" % primary_ip)
249
    logger.ToStdout("  secondary ip: %s" % secondary_ip)
245
    ToStdout("Node name: %s", name)
246
    ToStdout("  primary ip: %s", primary_ip)
247
    ToStdout("  secondary ip: %s", secondary_ip)
250 248
    if pinst:
251
      logger.ToStdout("  primary for instances:")
249
      ToStdout("  primary for instances:")
252 250
      for iname in pinst:
253
        logger.ToStdout("    - %s" % iname)
251
        ToStdout("    - %s", iname)
254 252
    else:
255
      logger.ToStdout("  primary for no instances")
253
      ToStdout("  primary for no instances")
256 254
    if sinst:
257
      logger.ToStdout("  secondary for instances:")
255
      ToStdout("  secondary for instances:")
258 256
      for iname in sinst:
259
        logger.ToStdout("    - %s" % iname)
257
        ToStdout("    - %s", iname)
260 258
    else:
261
      logger.ToStdout("  secondary for no instances")
259
      ToStdout("  secondary for no instances")
262 260

  
263 261
  return 0
264 262

  
......
301 299
                       numfields=numfields, data=output)
302 300

  
303 301
  for line in data:
304
    logger.ToStdout(line)
302
    ToStdout(line)
305 303

  
306 304
  return 0
307 305

  
b/scripts/gnt-os
24 24

  
25 25
from ganeti.cli import *
26 26
from ganeti import opcodes
27
from ganeti import logger
28 27
from ganeti import objects
29 28
from ganeti import utils
30 29
from ganeti import errors
......
39 38
  result = SubmitOpCode(op)
40 39

  
41 40
  if not result:
42
    logger.ToStdout("Can't get the OS list")
41
    ToStderr("Can't get the OS list")
43 42
    return 1
44 43

  
45 44
  if not opts.no_headers:
......
51 50
                       data=[[row[0]] for row in result if row[1]])
52 51

  
53 52
  for line in data:
54
    logger.ToStdout(line)
53
    ToStdout(line)
55 54

  
56 55
  return 0
57 56

  
......
65 64
  result = SubmitOpCode(op)
66 65

  
67 66
  if not result:
68
    logger.ToStdout("Can't get the OS list")
67
    ToStderr("Can't get the OS list")
69 68
    return 1
70 69

  
71 70
  has_bad = False
......
102 101
    def _OutputPerNodeOSStatus(msg_map):
103 102
      map_k = utils.NiceSort(msg_map.keys())
104 103
      for node_name in map_k:
105
        logger.ToStdout("  Node: %s, status: %s" %
106
                        (node_name, msg_map[node_name]))
104
        ToStdout("  Node: %s, status: %s", node_name, msg_map[node_name])
107 105
        for msg in nodes_hidden[node_name]:
108
          logger.ToStdout(msg)
106
          ToStdout(msg)
109 107

  
110
    logger.ToStdout("OS: %s [global status: %s]" % (os_name, status))
108
    ToStdout("OS: %s [global status: %s]", os_name, status)
111 109
    _OutputPerNodeOSStatus(nodes_valid)
112 110
    _OutputPerNodeOSStatus(nodes_bad)
113
    logger.ToStdout("")
111
    ToStdout("")
114 112

  
115 113
  return int(has_bad)
116 114

  

Also available in: Unified diff