Revision c19ad1e9

b/snf-cyclades-app/conf/20-snf-cyclades-app-backend.conf
46 46
## than 'max:nic-count' option of Ganeti's ipolicy.
47 47
#GANETI_MAX_NICS_PER_INSTANCE = 8
48 48
#
49
## Maximum number of disks per Ganeti instance. This value must be less or
50
## equal than 'max:disk-count' option of Ganeti's ipolicy.
51
#GANETI_MAX_DISKS_PER_INSTANCE = 8
52
#
49 53
## The following setting defines a dictionary with key-value parameters to be
50 54
## passed to each Ganeti ExtStorage provider. The setting defines a mapping
51 55
## from the provider name, e.g. 'archipelago' to a dictionary with the actual
b/snf-cyclades-app/synnefo/app_settings/default/backend.py
46 46
# than 'max:nic-count' option of Ganeti's ipolicy.
47 47
GANETI_MAX_NICS_PER_INSTANCE = 8
48 48

  
49
# Maximum number of disks per Ganeti instance. This value must be less or equal
50
# than 'max:disk-count' option of Ganeti's ipolicy.
51
GANETI_MAX_DISKS_PER_INSTANCE = 8
52

  
49 53
# The following setting defines a dictionary with key-value parameters to be
50 54
# passed to each Ganeti ExtStorage provider. The setting defines a mapping from
51 55
# the provider name, e.g. 'archipelago' to a dictionary with the actual
b/snf-cyclades-app/synnefo/logic/servers.py
79 79
    elif (action == "START" and operstate != "STOPPED") or\
80 80
         (action == "STOP" and operstate != "STARTED") or\
81 81
         (action == "RESIZE" and operstate != "STOPPED") or\
82
         (action in ["CONNECT", "DISCONNECT"] and operstate != "STOPPED"
82
         (action in ["CONNECT", "DISCONNECT"]
83
          and operstate != "STOPPED"
84
          and not settings.GANETI_USE_HOTPLUG) or \
85
         (action in ["ATTACH_VOLUME", "DETACH_VOLUME"]
86
          and operstate != "STOPPED"
83 87
          and not settings.GANETI_USE_HOTPLUG):
84 88
        raise faults.BadRequest("Cannot perform '%s' action while server is"
85 89
                                " in '%s' state." % (action, operstate))
......
756 760
    else:
757 761
        raise faults.BadRequest("Network 'uuid' or 'port' attribute"
758 762
                                " is required.")
763

  
764

  
765
@server_command("ATTACH_VOLUME")
766
def attach_volume(vm, volume):
767
    """Attach a volume to a server.
768

  
769
    The volume must be in 'AVAILABLE' status in order to be attached. Also,
770
    number of the volumes that are attached to the server must remain less
771
    than 'GANETI_MAX_DISKS_PER_INSTANCE' setting. This function will send
772
    the corresponding job to Ganeti backend and update the status of the
773
    volume to 'ATTACHING'.
774

  
775
    """
776
    # Check volume state
777
    if volume.status not in ["AVAILABLE", "CREATING"]:
778
        raise faults.BadRequest("Cannot attach volume while volume is in"
779
                                " '%s' status." % volume.status)
780

  
781
    # Check that disk templates are the same
782
    if volume.disk_template != vm.flavor.disk_template:
783
        msg = ("Volume and server must have the same disk template. Volume has"
784
               " disk template '%s' while server has '%s'"
785
               % (volume.disk_template, vm.flavor.disk_template))
786
        raise faults.BadRequest(msg)
787

  
788
    # Check maximum disk per instance hard limit
789
    if vm.volumes.count() == settings.GANETI_MAX_DISKS_PER_INSTANCE:
790
        raise faults.BadRequest("Maximum volumes per server limit reached")
791

  
792
    jobid = backend.attach_volume(vm, volume)
793

  
794
    log.info("Attached volume '%s' to server '%s'. JobID: '%s'", volume.id,
795
             volume.machine_id, jobid)
796

  
797
    volume.backendjobid = jobid
798
    volume.machine = vm
799
    volume.status = "ATTACHING"
800
    volume.save()
801
    return jobid
802

  
803

  
804
@server_command("DETACH_VOLUME")
805
def detach_volume(vm, volume):
806
    """Detach a volume to a server.
807

  
808
    The volume must be in 'IN_USE' status in order to be detached. Also,
809
    the root volume of the instance (index=0) can not be detached. This
810
    function will send the corresponding job to Ganeti backend and update the
811
    status of the volume to 'DETACHING'.
812

  
813
    """
814

  
815
    _check_attachment(vm, volume)
816
    if volume.status != "IN_USE":
817
        #TODO: Maybe allow other statuses as well ?
818
        raise faults.BadRequest("Cannot detach volume while volume is in"
819
                                " '%s' status." % volume.status)
820
    if volume.index == 0:
821
        raise faults.BadRequest("Cannot detach the root volume of a server")
822
    jobid = backend.detach_volume(vm, volume)
823
    log.info("Detached volume '%s' from server '%s'. JobID: '%s'", volume.id,
824
             volume.machine_id, jobid)
825
    volume.backendjobid = jobid
826
    volume.status = "DETACHING"
827
    volume.save()
828
    return jobid
829

  
830

  
831
def _check_attachment(vm, volume):
832
    """Check that volume is attached to vm."""
833
    if volume.machine_id != vm.id:
834
        raise faults.BadRequest("Volume '%s' is not attached to server '%s'"
835
                                % volume.id, vm.id)
b/snf-cyclades-app/synnefo/volume/volumes.py
4 4
from synnefo.db.models import Volume
5 5
from snf_django.lib.api import faults
6 6
from synnefo.volume import util
7
from synnefo.logic import backend
7
from synnefo.logic import backend, servers
8 8

  
9 9
log = logging.getLogger(__name__)
10 10

  
......
43 43
            msg = ("Cannot take a snapshot while snapshot is in '%s' state"
44 44
                   % source_volume.status)
45 45
            raise faults.BadRequest(msg)
46
        source = Volume.SOURCE_VOLUME_PREFIX + str(source_volume_id)
46
        source = Volume.prefix_source(source_volume_id, source_type="volume")
47 47
        origin = source_volume.backend_volume_uuid
48 48
    elif source_snapshot_id is not None:
49 49
        source_snapshot = util.get_snapshot(user_id, source_snapshot_id,
50 50
                                            exception=faults.BadRequest)
51 51
        # TODO: Check the state of the snapshot!!
52
        source = Volume.prefix_source(source_snapshot_id,
53
                                      source_type="snapshot")
52 54
        origin = source_snapshot["checksum"]
53
        source = Volume.SOURCE_SNAPSHOT_PREFIX + str(source_snapshot_id)
54 55
    elif source_image_id is not None:
55 56
        source_image = util.get_image(user_id, source_image_id,
56 57
                                      exception=faults.BadRequest)
58
        source = Volume.prefix_source(source_image_id, source_type="image")
57 59
        origin = source_image["checksum"]
58
        source = Volume.SOURCE_IMAGE_PREFIX + str(source_image_id)
59 60

  
60 61
    volume = Volume.objects.create(userid=user_id,
61 62
                                   size=size,
......
71 72
        for meta_key, meta_val in metadata.items():
72 73
            volume.metadata.create(key=meta_key, value=meta_val)
73 74

  
74
    # Create the disk in the backend
75
    volume.backendjobid = backend.attach_volume(server, volume)
76
    volume.save()
75
    servers.attach_volume(server, volume)
77 76

  
78 77
    return volume
79 78

  
......
83 82
    """Delete a Volume"""
84 83
    # A volume is deleted by detaching it from the server that is attached.
85 84
    # Deleting a detached volume is not implemented.
86
    if volume.index == 0:
87
        raise faults.BadRequest("Cannot detach the root volume of a server")
88

  
89 85
    if volume.machine_id is not None:
90
        volume.backendjobid = backend.detach_volume(volume.machine, volume)
86
        servers.detach_volume(volume.machine, volume)
91 87
        log.info("Detach volume '%s' from server '%s', job: %s",
92 88
                 volume.id, volume.machine_id, volume.backendjobid)
93 89
    else:

Also available in: Unified diff