Revision b7a1c816

b/lib/client/gnt_node.py
364 364
  selected_fields = ["name", "pinst_list"]
365 365

  
366 366
  result = cl.QueryNodes(names=args, fields=selected_fields, use_locking=False)
367
  node, pinst = result[0]
367
  ((node, pinst), ) = result
368 368

  
369 369
  if not pinst:
370 370
    ToStdout("No primary instances on node %s, exiting." % node)
......
372 372

  
373 373
  pinst = utils.NiceSort(pinst)
374 374

  
375
  if not force and not AskUser("Migrate instance(s) %s?" %
376
                               (",".join("'%s'" % name for name in pinst))):
377
    return 2
375
  if not (force or
376
          AskUser("Migrate instance(s) %s?" %
377
                  utils.CommaJoin(utils.NiceSort(pinst)))):
378
    return constants.EXIT_CONFIRMATION
378 379

  
379 380
  # this should be removed once --non-live is deprecated
380 381
  if not opts.live and opts.migration_mode is not None:
......
385 386
    mode = constants.HT_MIGRATION_NONLIVE
386 387
  else:
387 388
    mode = opts.migration_mode
389

  
388 390
  op = opcodes.OpNodeMigrate(node_name=args[0], mode=mode,
389 391
                             iallocator=opts.iallocator,
390 392
                             target_node=opts.dst_node)
391
  SubmitOpCode(op, cl=cl, opts=opts)
393

  
394
  result = SubmitOpCode(op, cl=cl, opts=opts)
395

  
396
  # Keep track of submitted jobs
397
  jex = JobExecutor(cl=cl, opts=opts)
398

  
399
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
400
    jex.AddJobId(None, status, job_id)
401

  
402
  results = jex.GetResults()
403
  bad_cnt = len([row for row in results if not row[0]])
404
  if bad_cnt == 0:
405
    ToStdout("All instances migrated successfully.")
406
    rcode = constants.EXIT_SUCCESS
407
  else:
408
    ToStdout("There were %s errors during the node migration.", bad_cnt)
409
    rcode = constants.EXIT_FAILURE
410

  
411
  return rcode
392 412

  
393 413

  
394 414
def ShowNodeConfig(opts, args):
b/lib/cmdlib.py
6653 6653
  def ExpandNames(self):
6654 6654
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6655 6655

  
6656
    self.needed_locks = {}
6657

  
6658
    # Create tasklets for migrating instances for all instances on this node
6659
    names = []
6660
    tasklets = []
6661

  
6662
    self.lock_all_nodes = False
6663

  
6664
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6665
      logging.debug("Migrating instance %s", inst.name)
6666
      names.append(inst.name)
6667

  
6668
      tasklets.append(TLMigrateInstance(self, inst.name, cleanup=False))
6669

  
6670
      if inst.disk_template in constants.DTS_EXT_MIRROR:
6671
        # We need to lock all nodes, as the iallocator will choose the
6672
        # destination nodes afterwards
6673
        self.lock_all_nodes = True
6674

  
6675
    self.tasklets = tasklets
6676

  
6677
    # Declare node locks
6678
    if self.lock_all_nodes:
6679
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6680
    else:
6681
      self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name]
6682
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6683

  
6684
    # Declare instance locks
6685
    self.needed_locks[locking.LEVEL_INSTANCE] = names
6686

  
6687
  def DeclareLocks(self, level):
6688
    if level == locking.LEVEL_NODE and not self.lock_all_nodes:
6689
      self._LockInstancesNodes()
6656
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
6657
    self.needed_locks = {
6658
      locking.LEVEL_NODE: [self.op.node_name],
6659
      }
6690 6660

  
6691 6661
  def BuildHooksEnv(self):
6692 6662
    """Build hooks env.
......
6705 6675
    nl = [self.cfg.GetMasterNode()]
6706 6676
    return (nl, nl)
6707 6677

  
6678
  def CheckPrereq(self):
6679
    pass
6680

  
6681
  def Exec(self, feedback_fn):
6682
    # Prepare jobs for migration instances
6683
    jobs = [
6684
      [opcodes.OpInstanceMigrate(instance_name=inst.name,
6685
                                 mode=self.op.mode,
6686
                                 live=self.op.live,
6687
                                 iallocator=self.op.iallocator,
6688
                                 target_node=self.op.target_node)]
6689
      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
6690
      ]
6691

  
6692
    # TODO: Run iallocator in this opcode and pass correct placement options to
6693
    # OpInstanceMigrate. Since other jobs can modify the cluster between
6694
    # running the iallocator and the actual migration, a good consistency model
6695
    # will have to be found.
6696

  
6697
    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
6698
            frozenset([self.op.node_name]))
6699

  
6700
    return ResultWithJobs(jobs)
6701

  
6708 6702

  
6709 6703
class TLMigrateInstance(Tasklet):
6710 6704
  """Tasklet class for instance migration.
b/lib/rapi/client.py
92 92
_REQ_DATA_VERSION_FIELD = "__version__"
93 93
_INST_CREATE_REQV1 = "instance-create-reqv1"
94 94
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
95
_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
95 96
_INST_NIC_PARAMS = frozenset(["mac", "ip", "mode", "link"])
96 97
_INST_CREATE_V0_DISK_PARAMS = frozenset(["size"])
97 98
_INST_CREATE_V0_PARAMS = frozenset([
......
1289 1290
                             ("/%s/nodes/%s/evacuate" %
1290 1291
                              (GANETI_RAPI_VERSION, node)), query, None)
1291 1292

  
1292
  def MigrateNode(self, node, mode=None, dry_run=False):
1293
  def MigrateNode(self, node, mode=None, dry_run=False, iallocator=None,
1294
                  target_node=None):
1293 1295
    """Migrates all primary instances from a node.
1294 1296

  
1295 1297
    @type node: str
......
1299 1301
        otherwise the hypervisor default will be used
1300 1302
    @type dry_run: bool
1301 1303
    @param dry_run: whether to perform a dry run
1304
    @type iallocator: string
1305
    @param iallocator: instance allocator to use
1306
    @type target_node: string
1307
    @param target_node: Target node for shared-storage instances
1302 1308

  
1303 1309
    @rtype: string
1304 1310
    @return: job id
1305 1311

  
1306 1312
    """
1307 1313
    query = []
1308
    if mode is not None:
1309
      query.append(("mode", mode))
1310 1314
    if dry_run:
1311 1315
      query.append(("dry-run", 1))
1312 1316

  
1313
    return self._SendRequest(HTTP_POST,
1314
                             ("/%s/nodes/%s/migrate" %
1315
                              (GANETI_RAPI_VERSION, node)), query, None)
1317
    if _NODE_MIGRATE_REQV1 in self.GetFeatures():
1318
      body = {}
1319

  
1320
      if mode is not None:
1321
        body["mode"] = mode
1322
      if iallocator is not None:
1323
        body["iallocator"] = iallocator
1324
      if target_node is not None:
1325
        body["target_node"] = target_node
1326

  
1327
      assert len(query) <= 1
1328

  
1329
      return self._SendRequest(HTTP_POST,
1330
                               ("/%s/nodes/%s/migrate" %
1331
                                (GANETI_RAPI_VERSION, node)), query, body)
1332
    else:
1333
      # Use old request format
1334
      if target_node is not None:
1335
        raise GanetiApiError("Server does not support specifying target node"
1336
                             " for node migration")
1337

  
1338
      if mode is not None:
1339
        query.append(("mode", mode))
1340

  
1341
      return self._SendRequest(HTTP_POST,
1342
                               ("/%s/nodes/%s/migrate" %
1343
                                (GANETI_RAPI_VERSION, node)), query, None)
1316 1344

  
1317 1345
  def GetNodeRole(self, node):
1318 1346
    """Gets the current role for a node.
b/lib/rapi/rlib2.py
104 104
# Feature string for instance reinstall request version 1
105 105
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
106 106

  
107
# Feature string for node migration version 1
108
_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
109

  
107 110
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
108 111
_WFJC_TIMEOUT = 10
109 112

  
......
145 148
    """Returns list of optional RAPI features implemented.
146 149

  
147 150
    """
148
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
151
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1, _NODE_MIGRATE_REQV1]
149 152

  
150 153

  
151 154
class R_2_os(baserlib.R_Generic):
......
455 458
    """
456 459
    node_name = self.items[0]
457 460

  
458
    if "live" in self.queryargs and "mode" in self.queryargs:
459
      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
460
                                " be passed")
461
    elif "live" in self.queryargs:
462
      if self._checkIntVariable("live", default=1):
463
        mode = constants.HT_MIGRATION_LIVE
461
    if self.queryargs:
462
      # Support old-style requests
463
      if "live" in self.queryargs and "mode" in self.queryargs:
464
        raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
465
                                  " be passed")
466

  
467
      if "live" in self.queryargs:
468
        if self._checkIntVariable("live", default=1):
469
          mode = constants.HT_MIGRATION_LIVE
470
        else:
471
          mode = constants.HT_MIGRATION_NONLIVE
464 472
      else:
465
        mode = constants.HT_MIGRATION_NONLIVE
473
        mode = self._checkStringVariable("mode", default=None)
474

  
475
      data = {
476
        "mode": mode,
477
        }
466 478
    else:
467
      mode = self._checkStringVariable("mode", default=None)
479
      data = self.request_body
468 480

  
469
    op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
481
    op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
482
      "node_name": node_name,
483
      })
470 484

  
471 485
    return baserlib.SubmitJob([op])
472 486

  
b/test/ganeti.rapi.client_unittest.py
151 151
    self.assertEqual(client._REQ_DATA_VERSION_FIELD, rlib2._REQ_DATA_VERSION)
152 152
    self.assertEqual(client._INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
153 153
    self.assertEqual(client._INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
154
    self.assertEqual(client._NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
154 155
    self.assertEqual(client._INST_NIC_PARAMS, constants.INIC_PARAMS)
155 156
    self.assertEqual(client.JOB_STATUS_QUEUED, constants.JOB_STATUS_QUEUED)
156 157
    self.assertEqual(client.JOB_STATUS_WAITLOCK, constants.JOB_STATUS_WAITLOCK)
......
835 836
                      "node-4", iallocator="hail", remote_node="node-5")
836 837

  
837 838
  def testMigrateNode(self):
839
    self.rapi.AddResponse(serializer.DumpJson([]))
838 840
    self.rapi.AddResponse("1111")
839 841
    self.assertEqual(1111, self.client.MigrateNode("node-a", dry_run=True))
840 842
    self.assertHandler(rlib2.R_2_nodes_name_migrate)
841 843
    self.assertItems(["node-a"])
842 844
    self.assert_("mode" not in self.rapi.GetLastHandler().queryargs)
843 845
    self.assertDryRun()
846
    self.assertFalse(self.rapi.GetLastRequestData())
844 847

  
848
    self.rapi.AddResponse(serializer.DumpJson([]))
845 849
    self.rapi.AddResponse("1112")
846 850
    self.assertEqual(1112, self.client.MigrateNode("node-a", dry_run=True,
847 851
                                                   mode="live"))
......
849 853
    self.assertItems(["node-a"])
850 854
    self.assertQuery("mode", ["live"])
851 855
    self.assertDryRun()
856
    self.assertFalse(self.rapi.GetLastRequestData())
857

  
858
    self.rapi.AddResponse(serializer.DumpJson([]))
859
    self.assertRaises(client.GanetiApiError, self.client.MigrateNode,
860
                      "node-c", target_node="foonode")
861
    self.assertEqual(self.rapi.CountPending(), 0)
862

  
863
  def testMigrateNodeBodyData(self):
864
    self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_MIGRATE_REQV1]))
865
    self.rapi.AddResponse("27539")
866
    self.assertEqual(27539, self.client.MigrateNode("node-a", dry_run=False,
867
                                                    mode="live"))
868
    self.assertHandler(rlib2.R_2_nodes_name_migrate)
869
    self.assertItems(["node-a"])
870
    self.assertFalse(self.rapi.GetLastHandler().queryargs)
871
    self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
872
                     { "mode": "live", })
873

  
874
    self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_MIGRATE_REQV1]))
875
    self.rapi.AddResponse("14219")
876
    self.assertEqual(14219, self.client.MigrateNode("node-x", dry_run=True,
877
                                                    target_node="node9",
878
                                                    iallocator="ial"))
879
    self.assertHandler(rlib2.R_2_nodes_name_migrate)
880
    self.assertItems(["node-x"])
881
    self.assertDryRun()
882
    self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
883
                     { "target_node": "node9", "iallocator": "ial", })
884

  
885
    self.assertEqual(self.rapi.CountPending(), 0)
852 886

  
853 887
  def testGetNodeRole(self):
854 888
    self.rapi.AddResponse("\"master\"")

Also available in: Unified diff