Revision 17733ecb qa/rapi-workload.py

b/qa/rapi-workload.py
34 34
from ganeti.rapi.client import GanetiApiError, NODE_EVAC_PRI, NODE_EVAC_SEC
35 35

  
36 36
import qa_config
37
import qa_error
37 38
import qa_node
38 39
import qa_rapi
39 40

  
......
690 691
  Finish(client, client.DeleteInstance, instance_name)
691 692

  
692 693

  
693
def TestJobCancellation(client, node_one, node_two, instance_one, instance_two):
694
  """ Test if jobs can be cancelled.
694
def ExtractAllNicInformationPossible(nics, replace_macs=True):
695
  """ Extracts NIC information as a dictionary.
695 696

  
696
  @type node_one string
697
  @param node_one The name of a node in the cluster.
698
  @type node_two string
699
  @param node_two The name of a node in the cluster.
700
  @type instance_one string
701
  @param instance_one An available instance name.
702
  @type instance_two string
703
  @param instance_two An available instance name.
697
  @type nics list of tuples of varying structure
698
  @param nics The network interfaces, as received from the instance info RAPI
699
              call.
700

  
701
  @rtype list of dict
702
  @return Dictionaries of NIC information.
703

  
704
  The NIC information is returned in a different format across versions, and to
705
  try and see if the execution of commands is still compatible, this function
706
  attempts to grab all the info that it can.
704 707

  
705 708
  """
706 709

  
707
  # Just in case, remove all previously present instances
708
  RemoveAllInstances(client)
710
  desired_entries = [
711
    constants.INIC_IP,
712
    constants.INIC_MAC,
713
    constants.INIC_MODE,
714
    constants.INIC_LINK,
715
    constants.INIC_VLAN,
716
    constants.INIC_NETWORK,
717
    constants.INIC_NAME,
718
    ]
709 719

  
710
  # Let us issue a job that is sure to both succeed and last for a while
711
  running_job = client.CreateInstance("create", instance_one, "drbd",
712
                                      [{"size": "5000"}], [{}],
713
                                      os="debian-image", pnode=node_one,
714
                                      snode=node_two)
720
  nic_dicts = []
721
  for nic_index in range(len(nics)):
722
    nic_raw_data = nics[nic_index]
715 723

  
716
  # And immediately afterwards, another very similar one
717
  job_to_cancel = client.CreateInstance("create", instance_two, "drbd",
718
                                        [{"size": "5000"}], [{}],
719
                                        os="debian-image", pnode=node_one,
720
                                        snode=node_two)
724
    # Fill dictionary with None-s as defaults
725
    nic_dict = dict([(key, None) for key in desired_entries])
726

  
727
    try:
728
      # The 2.6 format
729
      ip, mac, mode, link = nic_raw_data
730
    except ValueError:
731
      # If there is yet another ValueError here, let it go through as it is
732
      # legitimate - we are out of versions
733

  
734
      # The 2.11 format
735
      nic_name, _, ip, mac, mode, link, vlan, network, _ = nic_raw_data
736
      nic_dict[constants.INIC_VLAN] = vlan
737
      nic_dict[constants.INIC_NETWORK] = network
738
      nic_dict[constants.INIC_NAME] = nic_name
739

  
740
    # These attributes will be present in either version
741
    nic_dict[constants.INIC_IP] = ip
742
    nic_dict[constants.INIC_MAC] = mac
743
    nic_dict[constants.INIC_MODE] = mode
744
    nic_dict[constants.INIC_LINK] = link
745

  
746
    # Very simple mac generation, which should work as the setup cluster should
747
    # have no mac prefix restrictions in the default network, and there is a
748
    # hard and reasonable limit of only 8 NICs
749
    if replace_macs:
750
      nic_dict[constants.INIC_MAC] = "00:00:00:00:00:%02x" % nic_index
751

  
752
    nic_dicts.append(nic_dict)
753

  
754
  return nic_dicts
721 755

  
722
  # Try to cancel, which should fail as the job is already running
723
  success, msg = client.CancelJob(running_job)
724
  if success:
725
    print "Job succeeded: this should not have happened as it is running!"
726
    print "Message: %s" % msg
727 756

  
728
  success, msg = client.CancelJob(job_to_cancel)
757
def MoveInstance(client, src_instance, dst_instance, src_node, dst_node):
758
  """ Moves a single instance, compatible with 2.6.
759

  
760
  @rtype bool
761
  @return Whether the instance was moved successfully
762

  
763
  """
764
  success, inst_info_all = Finish(client, client.GetInstanceInfo,
765
                                  src_instance.name, static=True)
766

  
767
  if not success or src_instance.name not in inst_info_all:
768
    raise Exception("Did not find the source instance information!")
769

  
770
  inst_info = inst_info_all[src_instance.name]
771

  
772
  # Try to extract NICs first, as this is the operation most likely to fail
773
  try:
774
    nic_info = ExtractAllNicInformationPossible(inst_info["nics"])
775
  except ValueError:
776
    # Without the NIC info, there is very little we can do
777
    return False
778

  
779
  NIC_COMPONENTS_26 = [
780
    constants.INIC_IP,
781
    constants.INIC_MAC,
782
    constants.INIC_MODE,
783
    constants.INIC_LINK,
784
    ]
785

  
786
  nic_converter = lambda old: dict((k, old[k]) for k in NIC_COMPONENTS_26)
787
  nics = map(nic_converter, nic_info)
788

  
789
  # Prepare the parameters
790
  disks = []
791
  for idisk in inst_info["disks"]:
792
    odisk = {
793
      constants.IDISK_SIZE: idisk["size"],
794
      constants.IDISK_MODE: idisk["mode"],
795
      }
796

  
797
    spindles = idisk.get("spindles")
798
    if spindles is not None:
799
      odisk[constants.IDISK_SPINDLES] = spindles
800

  
801
    # Disk name may be present, but must not be supplied in 2.6!
802
    disks.append(odisk)
803

  
804
  # With all the parameters properly prepared, try the export
805
  success, exp_info = Finish(client, client.PrepareExport,
806
                             src_instance.name, constants.EXPORT_MODE_REMOTE)
807

  
729 808
  if not success:
730
    print "Job failed: this was unexpected as it was not a dry run"
731
    print "Message: %s" % msg
809
    # The instance will still have to be deleted
810
    return False
811

  
812
  success, _ = Finish(client, client.CreateInstance,
813
                      constants.INSTANCE_REMOTE_IMPORT, dst_instance.name,
814
                      inst_info["disk_template"], disks, nics,
815
                      os=inst_info["os"],
816
                      pnode=dst_node.primary,
817
                      snode=src_node.primary, # Ignored as no DRBD
818
                      start=(inst_info["config_state"] == "up"),
819
                      ip_check=False,
820
                      iallocator=inst_info.get("iallocator", None),
821
                      hypervisor=inst_info["hypervisor"],
822
                      source_handshake=exp_info["handshake"],
823
                      source_x509_ca=exp_info["x509_ca"],
824
                      source_instance_name=inst_info["name"],
825
                      beparams=inst_info["be_instance"],
826
                      hvparams=inst_info["hv_instance"],
827
                      osparams=inst_info["os_instance"])
828

  
829
  return success
830

  
831

  
832
def CreateInstanceForMoveTest(client, node, instance):
833
  """ Creates a single shutdown instance to move about in tests.
834

  
835
  @type node C{_QaNode}
836
  @param node A node configuration object.
837
  @type instance C{_QaInstance}
838
  @param instance An instance configuration object.
839

  
840
  """
841
  Finish(client, client.CreateInstance,
842
         "create", instance.name, "plain", [{"size": "2000"}], [{}],
843
         os="debian-image", pnode=node.primary)
732 844

  
733
  # And wait for the proper job
734
  client.WaitForJobCompletion(running_job)
845
  Finish(client, client.ShutdownInstance,
846
         instance.name, dry_run=False, no_remember=False)
735 847

  
736
  # Remove all the leftover instances, success or no success
737
  RemoveAllInstances(client)
848

  
849
def Test26InstanceMove(client, node_one, node_two, instance_to_create,
850
                       new_instance):
851
  """ Tests instance moves using commands that work in 2.6.
852

  
853
  """
854

  
855
  # First create the instance to move
856
  CreateInstanceForMoveTest(client, node_one, instance_to_create)
857

  
858
  # The cleanup should be conditional on operation success
859
  if MoveInstance(client, instance_to_create, new_instance, node_one, node_two):
860
    Finish(client, client.DeleteInstance, new_instance.name)
861
  else:
862
    Finish(client, client.DeleteInstance, instance_to_create.name)
863

  
864

  
865
def Test211InstanceMove(client, node_one, node_two, instance_to_create,
866
                        new_instance):
867
  """ Tests instance moves using the QA-provided move test.
868

  
869
  """
870

  
871
  # First create the instance to move
872
  CreateInstanceForMoveTest(client, node_one, instance_to_create)
873

  
874
  instance_to_create.SetDiskTemplate("plain")
875

  
876
  try:
877
    qa_rapi.TestInterClusterInstanceMove(instance_to_create, new_instance,
878
                                         [node_one], node_two,
879
                                         perform_checks=False)
880
  except qa_error.Error:
881
    # A failure is sad, but requires no special actions to be undertaken
882
    pass
883

  
884
  # Try to delete the instance when done - either the move has failed, or
885
  # a double move was performed - the instance to delete is one and the same
886
  Finish(client, client.DeleteInstance, instance_to_create.name)
738 887

  
739 888

  
740 889
def TestInstanceMoves(client, node_one, node_two, instance_to_create,
741 890
                      new_instance):
742
  """ Reuses a part of the QA to test instance moves.
891
  """ Performs two types of instance moves, one compatible with 2.6, the other
892
  with 2.11.
743 893

  
744 894
  @type node_one C{_QaNode}
745 895
  @param node_one A node configuration object.
......
752 902

  
753 903
  """
754 904

  
755
  # First create the instance to move
756
  Finish(client, client.CreateInstance,
757
         "create", instance_to_create.name, "plain", [{"size": "2000"}], [{}],
758
         os="debian-image", pnode=node_one.primary)
759

  
760
  Finish(client, client.ShutdownInstance,
761
         instance_to_create.name, dry_run=False, no_remember=False)
762

  
763
  instance_to_create.SetDiskTemplate("plain")
764

  
765
  qa_rapi.TestInterClusterInstanceMove(instance_to_create, new_instance,
766
                                       [node_one], node_two,
767
                                       perform_checks=False)
768

  
769
  # Finally, cleanup
770
  RemoveAllInstances(client)
905
  Test26InstanceMove(client, node_one, node_two, instance_to_create,
906
                     new_instance)
907
  Test211InstanceMove(client, node_one, node_two, instance_to_create,
908
                      new_instance)
771 909

  
772 910

  
773 911
def Workload(client):

Also available in: Unified diff