Revision 5d805533
b/snf-cyclades-app/synnefo/api/servers.py | ||
---|---|---|
66 | 66 |
(r'^/(\d+)/os-volume_attachments/(\d+)(?:.json)?$', 'demux_volumes_item'), |
67 | 67 |
) |
68 | 68 |
|
69 |
VOLUME_SOURCE_TYPES = [ |
|
70 |
"snapshot", |
|
71 |
"image", |
|
72 |
"volume", |
|
73 |
"blank" |
|
74 |
] |
|
75 |
|
|
69 | 76 |
|
70 | 77 |
def demux(request): |
71 | 78 |
if request.method == 'GET': |
... | ... | |
413 | 420 |
except (KeyError, AssertionError): |
414 | 421 |
raise faults.BadRequest("Malformed request") |
415 | 422 |
|
423 |
volumes = None |
|
424 |
dev_map = server.get("block_device_mapping_v2") |
|
425 |
if dev_map is not None: |
|
426 |
volumes = parse_block_device_mapping(dev_map) |
|
427 |
|
|
416 | 428 |
# Verify that personalities are well-formed |
417 | 429 |
util.verify_personality(personality) |
418 |
# Get image information |
|
419 |
image = util.get_image_dict(image_id, user_id) |
|
420 | 430 |
# Get flavor (ensure it is active) |
421 | 431 |
flavor = util.get_flavor(flavor_id, include_deleted=False) |
422 | 432 |
if not flavor.allow_create: |
... | ... | |
426 | 436 |
# Generate password |
427 | 437 |
password = util.random_password() |
428 | 438 |
|
429 |
vm = servers.create(user_id, name, password, flavor, image, |
|
439 |
vm = servers.create(user_id, name, password, flavor, image_id,
|
|
430 | 440 |
metadata=metadata, personality=personality, |
431 |
networks=networks) |
|
441 |
networks=networks, volumes=volumes)
|
|
432 | 442 |
|
433 | 443 |
server = vm_to_dict(vm, detail=True) |
434 | 444 |
server['status'] = 'BUILD' |
... | ... | |
439 | 449 |
return response |
440 | 450 |
|
441 | 451 |
|
452 |
def parse_block_device_mapping(dev_map): |
|
453 |
"""Parse 'block_device_mapping_v2' attribute""" |
|
454 |
if not isinstance(dev_map, list): |
|
455 |
raise faults.BadRequest("Block Device Mapping is Invalid") |
|
456 |
return [_parse_block_device(device) for device in dev_map] |
|
457 |
|
|
458 |
|
|
459 |
def _parse_block_device(device): |
|
460 |
"""Parse and validate a block device mapping""" |
|
461 |
if not isinstance(device, dict): |
|
462 |
raise faults.BadRequest("Block Device Mapping is Invalid") |
|
463 |
|
|
464 |
# Validate source type |
|
465 |
source_type = device.get("source_type") |
|
466 |
if source_type is None: |
|
467 |
raise faults.BadRequest("Block Device Mapping is Invalid: Invalid" |
|
468 |
" source_type field") |
|
469 |
elif source_type not in VOLUME_SOURCE_TYPES: |
|
470 |
raise faults.BadRequest("Block Device Mapping is Invalid: source_type" |
|
471 |
" must be on of %s" |
|
472 |
% ", ".join(VOLUME_SOURCE_TYPES)) |
|
473 |
|
|
474 |
# Validate source UUID |
|
475 |
uuid = device.get("uuid") |
|
476 |
if uuid is None and source_type != "blank": |
|
477 |
raise faults.BadRequest("Block Device Mapping is Invalid: uuid of" |
|
478 |
" %s is missing" % source_type) |
|
479 |
|
|
480 |
# Validate volume size |
|
481 |
size = device.get("volume_size") |
|
482 |
if size is not None: |
|
483 |
try: |
|
484 |
size = int(size) |
|
485 |
except (TypeError, ValueError): |
|
486 |
raise faults.BadRequest("Block Device Mapping is Invalid: Invalid" |
|
487 |
" size field") |
|
488 |
|
|
489 |
# Validate 'delete_on_termination' |
|
490 |
delete_on_termination = device.get("delete_on_termination") |
|
491 |
if delete_on_termination is not None: |
|
492 |
if not isinstance(delete_on_termination, bool): |
|
493 |
raise faults.BadRequest("Block Device Mapping is Invalid: Invalid" |
|
494 |
" delete_on_termination field") |
|
495 |
else: |
|
496 |
if source_type == "volume": |
|
497 |
delete_on_termination = False |
|
498 |
else: |
|
499 |
delete_on_termination = True |
|
500 |
|
|
501 |
# Unused API Attributes |
|
502 |
# boot_index = device.get("boot_index") |
|
503 |
# destination_type = device.get("destination_type") |
|
504 |
|
|
505 |
return {"source_type": source_type, |
|
506 |
"source_uuid": uuid, |
|
507 |
"size": size, |
|
508 |
"delete_on_termination": delete_on_termination} |
|
509 |
|
|
510 |
|
|
442 | 511 |
@api.api_method(http_method='GET', user_required=True, logger=log) |
443 | 512 |
def get_server_details(request, server_id): |
444 | 513 |
# Normal Response Codes: 200, 203 |
... | ... | |
984 | 1053 |
return {"id": volume.id, |
985 | 1054 |
"volumeId": volume.id, |
986 | 1055 |
"serverId": volume.machine_id, |
987 |
"device": ""} # TODO: What device to return?
|
|
1056 |
"device": ""} # TODO: What device to return? |
|
988 | 1057 |
|
989 | 1058 |
|
990 | 1059 |
@api.api_method(http_method='GET', user_required=True, logger=log) |
b/snf-cyclades-app/synnefo/api/tests/servers.py | ||
---|---|---|
37 | 37 |
from snf_django.utils.testing import (BaseAPITest, mocked_quotaholder, |
38 | 38 |
override_settings) |
39 | 39 |
from synnefo.db.models import (VirtualMachine, VirtualMachineMetadata, |
40 |
IPAddress, NetworkInterface) |
|
40 |
IPAddress, NetworkInterface, Volume)
|
|
41 | 41 |
from synnefo.db import models_factory as mfactory |
42 | 42 |
from synnefo.logic.utils import get_rsapi_state |
43 | 43 |
from synnefo.cyclades_settings import cyclades_services |
... | ... | |
320 | 320 |
'checksum': '1234', |
321 | 321 |
"id": 1, |
322 | 322 |
"name": "test_image", |
323 |
"size": "41242", |
|
323 |
"size": 1024, |
|
324 |
"is_snapshot": False, |
|
324 | 325 |
'disk_format': 'diskdump'} |
325 | 326 |
|
326 | 327 |
|
327 | 328 |
@patch('synnefo.api.util.get_image', fixed_image) |
329 |
@patch('synnefo.volume.util.get_snapshot', fixed_image) |
|
328 | 330 |
@patch('synnefo.logic.rapi_pool.GanetiRapiClient') |
329 | 331 |
class ServerCreateAPITest(ComputeAPITest): |
330 | 332 |
def setUp(self): |
... | ... | |
585 | 587 |
json.dumps(request), 'json') |
586 | 588 |
self.assertEqual(response.status_code, 404) |
587 | 589 |
|
590 |
def test_create_server_with_volumes(self, mrapi): |
|
591 |
user = "test_user" |
|
592 |
mrapi().CreateInstance.return_value = 42 |
|
593 |
# Test creation without any volumes. Server will use flavor+image |
|
594 |
request = deepcopy(self.request) |
|
595 |
request["server"]["block_device_mapping_v2"] = [] |
|
596 |
with mocked_quotaholder(): |
|
597 |
response = self.mypost("servers", user, |
|
598 |
json.dumps(request), 'json') |
|
599 |
self.assertEqual(response.status_code, 202, msg=response.content) |
|
600 |
vm_id = json.loads(response.content)["server"]["id"] |
|
601 |
volume = Volume.objects.get(machine_id=vm_id) |
|
602 |
self.assertEqual(volume.disk_template, self.flavor.disk_template) |
|
603 |
self.assertEqual(volume.size, self.flavor.disk) |
|
604 |
self.assertEqual(volume.source, "image:%s" % fixed_image()["id"]) |
|
605 |
self.assertEqual(volume.delete_on_termination, True) |
|
606 |
self.assertEqual(volume.userid, user) |
|
607 |
|
|
608 |
# Test using an image |
|
609 |
request["server"]["block_device_mapping_v2"] = [ |
|
610 |
{"source_type": "image", |
|
611 |
"uuid": fixed_image()["id"], |
|
612 |
"volume_size": 10, |
|
613 |
"delete_on_termination": False} |
|
614 |
] |
|
615 |
with mocked_quotaholder(): |
|
616 |
response = self.mypost("servers", user, |
|
617 |
json.dumps(request), 'json') |
|
618 |
self.assertEqual(response.status_code, 202, msg=response.content) |
|
619 |
vm_id = json.loads(response.content)["server"]["id"] |
|
620 |
volume = Volume.objects.get(machine_id=vm_id) |
|
621 |
self.assertEqual(volume.disk_template, self.flavor.disk_template) |
|
622 |
self.assertEqual(volume.size, 10) |
|
623 |
self.assertEqual(volume.source, "image:%s" % fixed_image()["id"]) |
|
624 |
self.assertEqual(volume.delete_on_termination, False) |
|
625 |
self.assertEqual(volume.userid, user) |
|
626 |
self.assertEqual(volume.origin, "pithos:" + fixed_image()["checksum"]) |
|
627 |
|
|
628 |
# Test using a snapshot |
|
629 |
request["server"]["block_device_mapping_v2"] = [ |
|
630 |
{"source_type": "snapshot", |
|
631 |
"uuid": fixed_image()["id"], |
|
632 |
"volume_size": 10, |
|
633 |
"delete_on_termination": False} |
|
634 |
] |
|
635 |
with mocked_quotaholder(): |
|
636 |
response = self.mypost("servers", user, |
|
637 |
json.dumps(request), 'json') |
|
638 |
self.assertEqual(response.status_code, 202, msg=response.content) |
|
639 |
vm_id = json.loads(response.content)["server"]["id"] |
|
640 |
volume = Volume.objects.get(machine_id=vm_id) |
|
641 |
self.assertEqual(volume.disk_template, self.flavor.disk_template) |
|
642 |
self.assertEqual(volume.size, 10) |
|
643 |
self.assertEqual(volume.source, "snapshot:%s" % fixed_image()["id"]) |
|
644 |
self.assertEqual(volume.origin, fixed_image()["checksum"]) |
|
645 |
self.assertEqual(volume.delete_on_termination, False) |
|
646 |
self.assertEqual(volume.userid, user) |
|
647 |
|
|
648 |
source_volume = volume |
|
649 |
# Test using source volume |
|
650 |
request["server"]["block_device_mapping_v2"] = [ |
|
651 |
{"source_type": "volume", |
|
652 |
"uuid": source_volume.id, |
|
653 |
"volume_size": source_volume.size, |
|
654 |
"delete_on_termination": False} |
|
655 |
] |
|
656 |
with mocked_quotaholder(): |
|
657 |
response = self.mypost("servers", user, |
|
658 |
json.dumps(request), 'json') |
|
659 |
# This will fail because the volume is not AVAILABLE. |
|
660 |
self.assertBadRequest(response) |
|
661 |
|
|
662 |
# Test using a blank volume |
|
663 |
request["server"]["block_device_mapping_v2"] = [ |
|
664 |
{"source_type": "blank", |
|
665 |
"volume_size": 10, |
|
666 |
"delete_on_termination": True} |
|
667 |
] |
|
668 |
with mocked_quotaholder(): |
|
669 |
response = self.mypost("servers", user, |
|
670 |
json.dumps(request), 'json') |
|
671 |
self.assertBadRequest(response) |
|
672 |
|
|
588 | 673 |
|
589 | 674 |
@patch('synnefo.logic.rapi_pool.GanetiRapiClient') |
590 | 675 |
class ServerDestroyAPITest(ComputeAPITest): |
b/snf-cyclades-app/synnefo/logic/backend.py | ||
---|---|---|
711 | 711 |
kw['name'] = vm.backend_vm_id |
712 | 712 |
# Defined in settings.GANETI_CREATEINSTANCE_KWARGS |
713 | 713 |
|
714 |
kw['disk_template'] = flavor.disk_template
|
|
714 |
kw['disk_template'] = volumes[0].template
|
|
715 | 715 |
disks = [] |
716 | 716 |
for volume in volumes: |
717 | 717 |
disk = {"name": volume.backend_volume_uuid, |
b/snf-cyclades-app/synnefo/logic/commands.py | ||
---|---|---|
1 |
# Copyright 2011, 2012, 2013 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or without |
|
4 |
# modification, are permitted provided that the following conditions |
|
5 |
# are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above copyright |
|
8 |
# notice, this list of conditions and the following disclaimer. |
|
9 |
# |
|
10 |
# 2. Redistributions in binary form must reproduce the above copyright |
|
11 |
# notice, this list of conditions and the following disclaimer in the |
|
12 |
# documentation and/or other materials provided with the distribution. |
|
13 |
# |
|
14 |
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
|
15 |
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
16 |
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
17 |
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
|
18 |
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
|
19 |
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
|
20 |
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
|
21 |
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
22 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
|
23 |
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
|
24 |
# SUCH DAMAGE. |
|
25 |
# |
|
26 |
# The views and conclusions contained in the software and documentation are |
|
27 |
# those of the authors and should not be interpreted as representing official |
|
28 |
# policies, either expressed or implied, of GRNET S.A. |
|
29 |
|
|
30 |
import logging |
|
31 |
|
|
32 |
from functools import wraps |
|
33 |
from django.db import transaction |
|
34 |
|
|
35 |
from django.conf import settings |
|
36 |
from snf_django.lib.api import faults |
|
37 |
from synnefo import quotas |
|
38 |
from synnefo.db.models import VirtualMachine |
|
39 |
|
|
40 |
|
|
41 |
log = logging.getLogger(__name__) |
|
42 |
|
|
43 |
|
|
44 |
def validate_server_action(vm, action): |
|
45 |
if vm.deleted: |
|
46 |
raise faults.BadRequest("Server '%s' has been deleted." % vm.id) |
|
47 |
|
|
48 |
# Destroyin a server should always be permitted |
|
49 |
if action == "DESTROY": |
|
50 |
return |
|
51 |
|
|
52 |
# Check that there is no pending action |
|
53 |
pending_action = vm.task |
|
54 |
if pending_action: |
|
55 |
if pending_action == "BUILD": |
|
56 |
raise faults.BuildInProgress("Server '%s' is being build." % vm.id) |
|
57 |
raise faults.BadRequest("Cannot perform '%s' action while there is a" |
|
58 |
" pending '%s'." % (action, pending_action)) |
|
59 |
|
|
60 |
# Check if action can be performed to VM's operstate |
|
61 |
operstate = vm.operstate |
|
62 |
if operstate == "ERROR": |
|
63 |
raise faults.BadRequest("Cannot perform '%s' action while server is" |
|
64 |
" in 'ERROR' state." % action) |
|
65 |
elif operstate == "BUILD" and action != "BUILD": |
|
66 |
raise faults.BuildInProgress("Server '%s' is being build." % vm.id) |
|
67 |
elif (action == "START" and operstate != "STOPPED") or\ |
|
68 |
(action == "STOP" and operstate != "STARTED") or\ |
|
69 |
(action == "RESIZE" and operstate != "STOPPED") or\ |
|
70 |
(action in ["CONNECT", "DISCONNECT"] |
|
71 |
and operstate != "STOPPED" |
|
72 |
and not settings.GANETI_USE_HOTPLUG) or \ |
|
73 |
(action in ["ATTACH_VOLUME", "DETACH_VOLUME"] |
|
74 |
and operstate != "STOPPED" |
|
75 |
and not settings.GANETI_USE_HOTPLUG): |
|
76 |
raise faults.BadRequest("Cannot perform '%s' action while server is" |
|
77 |
" in '%s' state." % (action, operstate)) |
|
78 |
return |
|
79 |
|
|
80 |
|
|
81 |
def server_command(action, action_fields=None): |
|
82 |
"""Handle execution of a server action. |
|
83 |
|
|
84 |
Helper function to validate and execute a server action, handle quota |
|
85 |
commission and update the 'task' of the VM in the DB. |
|
86 |
|
|
87 |
1) Check if action can be performed. If it can, then there must be no |
|
88 |
pending task (with the exception of DESTROY). |
|
89 |
2) Handle previous commission if unresolved: |
|
90 |
* If it is not pending and it to accept, then accept |
|
91 |
* If it is not pending and to reject or is pending then reject it. Since |
|
92 |
the action can be performed only if there is no pending task, then there |
|
93 |
can be no pending commission. The exception is DESTROY, but in this case |
|
94 |
the commission can safely be rejected, and the dispatcher will generate |
|
95 |
the correct ones! |
|
96 |
3) Issue new commission and associate it with the VM. Also clear the task. |
|
97 |
4) Send job to ganeti |
|
98 |
5) Update task and commit |
|
99 |
""" |
|
100 |
def decorator(func): |
|
101 |
@wraps(func) |
|
102 |
@transaction.commit_on_success |
|
103 |
def wrapper(vm, *args, **kwargs): |
|
104 |
user_id = vm.userid |
|
105 |
validate_server_action(vm, action) |
|
106 |
vm.action = action |
|
107 |
|
|
108 |
commission_name = "client: api, resource: %s" % vm |
|
109 |
quotas.handle_resource_commission(vm, action=action, |
|
110 |
action_fields=action_fields, |
|
111 |
commission_name=commission_name) |
|
112 |
vm.save() |
|
113 |
|
|
114 |
# XXX: Special case for server creation! |
|
115 |
if action == "BUILD": |
|
116 |
# Perform a commit, because the VirtualMachine must be saved to |
|
117 |
# DB before the OP_INSTANCE_CREATE job in enqueued in Ganeti. |
|
118 |
# Otherwise, messages will arrive from snf-dispatcher about |
|
119 |
# this instance, before the VM is stored in DB. |
|
120 |
transaction.commit() |
|
121 |
# After committing the locks are released. Refetch the instance |
|
122 |
# to guarantee x-lock. |
|
123 |
vm = VirtualMachine.objects.select_for_update().get(id=vm.id) |
|
124 |
|
|
125 |
# Send the job to Ganeti and get the associated jobID |
|
126 |
try: |
|
127 |
job_id = func(vm, *args, **kwargs) |
|
128 |
except Exception as e: |
|
129 |
if vm.serial is not None: |
|
130 |
# Since the job never reached Ganeti, reject the commission |
|
131 |
log.debug("Rejecting commission: '%s', could not perform" |
|
132 |
" action '%s': %s" % (vm.serial, action, e)) |
|
133 |
transaction.rollback() |
|
134 |
quotas.reject_resource_serial(vm) |
|
135 |
transaction.commit() |
|
136 |
raise |
|
137 |
|
|
138 |
if action == "BUILD" and vm.serial is not None: |
|
139 |
# XXX: Special case for server creation: we must accept the |
|
140 |
# commission because the VM has been stored in DB. Also, if |
|
141 |
# communication with Ganeti fails, the job will never reach |
|
142 |
# Ganeti, and the commission will never be resolved. |
|
143 |
quotas.accept_resource_serial(vm) |
|
144 |
|
|
145 |
log.info("user: %s, vm: %s, action: %s, job_id: %s, serial: %s", |
|
146 |
user_id, vm.id, action, job_id, vm.serial) |
|
147 |
|
|
148 |
# store the new task in the VM |
|
149 |
if job_id is not None: |
|
150 |
vm.task = action |
|
151 |
vm.task_job_id = job_id |
|
152 |
vm.save() |
|
153 |
|
|
154 |
return vm |
|
155 |
return wrapper |
|
156 |
return decorator |
b/snf-cyclades-app/synnefo/logic/management/commands/server-create.py | ||
---|---|---|
116 | 116 |
backend = None |
117 | 117 |
|
118 | 118 |
connection_list = parse_connections(options["connections"]) |
119 |
server = servers.create(user_id, name, password, flavor, image, |
|
119 |
server = servers.create(user_id, name, password, flavor, image["id"],
|
|
120 | 120 |
networks=connection_list, |
121 | 121 |
use_backend=backend) |
122 | 122 |
pprint.pprint_server(server, stdout=self.stdout) |
b/snf-cyclades-app/synnefo/logic/server_attachments.py | ||
---|---|---|
1 |
# Copyright 2011, 2012, 2013 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or without |
|
4 |
# modification, are permitted provided that the following conditions |
|
5 |
# are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above copyright |
|
8 |
# notice, this list of conditions and the following disclaimer. |
|
9 |
# |
|
10 |
# 2. Redistributions in binary form must reproduce the above copyright |
|
11 |
# notice, this list of conditions and the following disclaimer in the |
|
12 |
# documentation and/or other materials provided with the distribution. |
|
13 |
# |
|
14 |
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
|
15 |
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
16 |
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
17 |
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
|
18 |
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
|
19 |
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
|
20 |
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
|
21 |
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
22 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
|
23 |
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
|
24 |
# SUCH DAMAGE. |
|
25 |
# |
|
26 |
# The views and conclusions contained in the software and documentation are |
|
27 |
# those of the authors and should not be interpreted as representing official |
|
28 |
# policies, either expressed or implied, of GRNET S.A. |
|
29 |
|
|
30 |
import logging |
|
31 |
|
|
32 |
from snf_django.lib.api import faults |
|
33 |
from django.conf import settings |
|
34 |
from synnefo.logic import backend, commands |
|
35 |
|
|
36 |
log = logging.getLogger(__name__) |
|
37 |
|
|
38 |
|
|
39 |
@commands.server_command("ATTACH_VOLUME") |
|
40 |
def attach_volume(vm, volume): |
|
41 |
"""Attach a volume to a server. |
|
42 |
|
|
43 |
The volume must be in 'AVAILABLE' status in order to be attached. Also, |
|
44 |
number of the volumes that are attached to the server must remain less |
|
45 |
than 'GANETI_MAX_DISKS_PER_INSTANCE' setting. This function will send |
|
46 |
the corresponding job to Ganeti backend and update the status of the |
|
47 |
volume to 'ATTACHING'. |
|
48 |
|
|
49 |
""" |
|
50 |
# Check volume state |
|
51 |
if volume.status not in ["AVAILABLE", "CREATING"]: |
|
52 |
raise faults.BadRequest("Cannot attach volume while volume is in" |
|
53 |
" '%s' status." % volume.status) |
|
54 |
|
|
55 |
# Check that disk templates are the same |
|
56 |
if volume.disk_template != vm.flavor.disk_template: |
|
57 |
msg = ("Volume and server must have the same disk template. Volume has" |
|
58 |
" disk template '%s' while server has '%s'" |
|
59 |
% (volume.disk_template, vm.flavor.disk_template)) |
|
60 |
raise faults.BadRequest(msg) |
|
61 |
|
|
62 |
# Check maximum disk per instance hard limit |
|
63 |
if vm.volumes.count() == settings.GANETI_MAX_DISKS_PER_INSTANCE: |
|
64 |
raise faults.BadRequest("Maximum volumes per server limit reached") |
|
65 |
|
|
66 |
jobid = backend.attach_volume(vm, volume) |
|
67 |
|
|
68 |
log.info("Attached volume '%s' to server '%s'. JobID: '%s'", volume.id, |
|
69 |
volume.machine_id, jobid) |
|
70 |
|
|
71 |
volume.backendjobid = jobid |
|
72 |
volume.machine = vm |
|
73 |
volume.status = "ATTACHING" |
|
74 |
volume.save() |
|
75 |
return jobid |
|
76 |
|
|
77 |
|
|
78 |
@commands.server_command("DETACH_VOLUME") |
|
79 |
def detach_volume(vm, volume): |
|
80 |
"""Detach a volume to a server. |
|
81 |
|
|
82 |
The volume must be in 'IN_USE' status in order to be detached. Also, |
|
83 |
the root volume of the instance (index=0) can not be detached. This |
|
84 |
function will send the corresponding job to Ganeti backend and update the |
|
85 |
status of the volume to 'DETACHING'. |
|
86 |
|
|
87 |
""" |
|
88 |
|
|
89 |
_check_attachment(vm, volume) |
|
90 |
if volume.status != "IN_USE": |
|
91 |
#TODO: Maybe allow other statuses as well ? |
|
92 |
raise faults.BadRequest("Cannot detach volume while volume is in" |
|
93 |
" '%s' status." % volume.status) |
|
94 |
if volume.index == 0: |
|
95 |
raise faults.BadRequest("Cannot detach the root volume of a server") |
|
96 |
jobid = backend.detach_volume(vm, volume) |
|
97 |
log.info("Detached volume '%s' from server '%s'. JobID: '%s'", volume.id, |
|
98 |
volume.machine_id, jobid) |
|
99 |
volume.backendjobid = jobid |
|
100 |
volume.status = "DETACHING" |
|
101 |
volume.save() |
|
102 |
return jobid |
|
103 |
|
|
104 |
|
|
105 |
def _check_attachment(vm, volume): |
|
106 |
"""Check that volume is attached to vm.""" |
|
107 |
if volume.machine_id != vm.id: |
|
108 |
raise faults.BadRequest("Volume '%s' is not attached to server '%s'" |
|
109 |
% volume.id, vm.id) |
b/snf-cyclades-app/synnefo/logic/servers.py | ||
---|---|---|
30 | 30 |
import logging |
31 | 31 |
|
32 | 32 |
from socket import getfqdn |
33 |
from functools import wraps |
|
34 | 33 |
from django import dispatch |
35 | 34 |
from django.db import transaction |
36 | 35 |
from django.utils import simplejson as json |
37 | 36 |
|
38 | 37 |
from snf_django.lib.api import faults |
39 | 38 |
from django.conf import settings |
40 |
from synnefo import quotas |
|
41 | 39 |
from synnefo.api import util |
42 | 40 |
from synnefo.logic import backend, ips, utils |
43 | 41 |
from synnefo.logic.backend_allocator import BackendAllocator |
44 | 42 |
from synnefo.db.models import (NetworkInterface, VirtualMachine, |
45 |
VirtualMachineMetadata, IPAddressLog, Network, |
|
46 |
Volume) |
|
43 |
VirtualMachineMetadata, IPAddressLog, Network) |
|
47 | 44 |
from vncauthproxy.client import request_forwarding as request_vnc_forwarding |
48 | 45 |
from synnefo.logic import rapi |
46 |
from synnefo.volume.volumes import _create_volume |
|
47 |
from synnefo.volume.util import get_volume |
|
48 |
from synnefo.logic import commands |
|
49 | 49 |
|
50 | 50 |
log = logging.getLogger(__name__) |
51 | 51 |
|
... | ... | |
53 | 53 |
server_created = dispatch.Signal(providing_args=["created_vm_params"]) |
54 | 54 |
|
55 | 55 |
|
56 |
def validate_server_action(vm, action): |
|
57 |
if vm.deleted: |
|
58 |
raise faults.BadRequest("Server '%s' has been deleted." % vm.id) |
|
59 |
|
|
60 |
# Destroyin a server should always be permitted |
|
61 |
if action == "DESTROY": |
|
62 |
return |
|
63 |
|
|
64 |
# Check that there is no pending action |
|
65 |
pending_action = vm.task |
|
66 |
if pending_action: |
|
67 |
if pending_action == "BUILD": |
|
68 |
raise faults.BuildInProgress("Server '%s' is being build." % vm.id) |
|
69 |
raise faults.BadRequest("Cannot perform '%s' action while there is a" |
|
70 |
" pending '%s'." % (action, pending_action)) |
|
71 |
|
|
72 |
# Check if action can be performed to VM's operstate |
|
73 |
operstate = vm.operstate |
|
74 |
if operstate == "ERROR": |
|
75 |
raise faults.BadRequest("Cannot perform '%s' action while server is" |
|
76 |
" in 'ERROR' state." % action) |
|
77 |
elif operstate == "BUILD" and action != "BUILD": |
|
78 |
raise faults.BuildInProgress("Server '%s' is being build." % vm.id) |
|
79 |
elif (action == "START" and operstate != "STOPPED") or\ |
|
80 |
(action == "STOP" and operstate != "STARTED") or\ |
|
81 |
(action == "RESIZE" and operstate != "STOPPED") or\ |
|
82 |
(action in ["CONNECT", "DISCONNECT"] |
|
83 |
and operstate != "STOPPED" |
|
84 |
and not settings.GANETI_USE_HOTPLUG) or \ |
|
85 |
(action in ["ATTACH_VOLUME", "DETACH_VOLUME"] |
|
86 |
and operstate != "STOPPED" |
|
87 |
and not settings.GANETI_USE_HOTPLUG): |
|
88 |
raise faults.BadRequest("Cannot perform '%s' action while server is" |
|
89 |
" in '%s' state." % (action, operstate)) |
|
90 |
return |
|
91 |
|
|
92 |
|
|
93 |
def server_command(action, action_fields=None): |
|
94 |
"""Handle execution of a server action. |
|
95 |
|
|
96 |
Helper function to validate and execute a server action, handle quota |
|
97 |
commission and update the 'task' of the VM in the DB. |
|
98 |
|
|
99 |
1) Check if action can be performed. If it can, then there must be no |
|
100 |
pending task (with the exception of DESTROY). |
|
101 |
2) Handle previous commission if unresolved: |
|
102 |
* If it is not pending and it to accept, then accept |
|
103 |
* If it is not pending and to reject or is pending then reject it. Since |
|
104 |
the action can be performed only if there is no pending task, then there |
|
105 |
can be no pending commission. The exception is DESTROY, but in this case |
|
106 |
the commission can safely be rejected, and the dispatcher will generate |
|
107 |
the correct ones! |
|
108 |
3) Issue new commission and associate it with the VM. Also clear the task. |
|
109 |
4) Send job to ganeti |
|
110 |
5) Update task and commit |
|
111 |
""" |
|
112 |
def decorator(func): |
|
113 |
@wraps(func) |
|
114 |
@transaction.commit_on_success |
|
115 |
def wrapper(vm, *args, **kwargs): |
|
116 |
user_id = vm.userid |
|
117 |
validate_server_action(vm, action) |
|
118 |
vm.action = action |
|
119 |
|
|
120 |
commission_name = "client: api, resource: %s" % vm |
|
121 |
quotas.handle_resource_commission(vm, action=action, |
|
122 |
action_fields=action_fields, |
|
123 |
commission_name=commission_name) |
|
124 |
vm.save() |
|
125 |
|
|
126 |
# XXX: Special case for server creation! |
|
127 |
if action == "BUILD": |
|
128 |
# Perform a commit, because the VirtualMachine must be saved to |
|
129 |
# DB before the OP_INSTANCE_CREATE job in enqueued in Ganeti. |
|
130 |
# Otherwise, messages will arrive from snf-dispatcher about |
|
131 |
# this instance, before the VM is stored in DB. |
|
132 |
transaction.commit() |
|
133 |
# After committing the locks are released. Refetch the instance |
|
134 |
# to guarantee x-lock. |
|
135 |
vm = VirtualMachine.objects.select_for_update().get(id=vm.id) |
|
136 |
|
|
137 |
# Send the job to Ganeti and get the associated jobID |
|
138 |
try: |
|
139 |
job_id = func(vm, *args, **kwargs) |
|
140 |
except Exception as e: |
|
141 |
if vm.serial is not None: |
|
142 |
# Since the job never reached Ganeti, reject the commission |
|
143 |
log.debug("Rejecting commission: '%s', could not perform" |
|
144 |
" action '%s': %s" % (vm.serial, action, e)) |
|
145 |
transaction.rollback() |
|
146 |
quotas.reject_resource_serial(vm) |
|
147 |
transaction.commit() |
|
148 |
raise |
|
149 |
|
|
150 |
if action == "BUILD" and vm.serial is not None: |
|
151 |
# XXX: Special case for server creation: we must accept the |
|
152 |
# commission because the VM has been stored in DB. Also, if |
|
153 |
# communication with Ganeti fails, the job will never reach |
|
154 |
# Ganeti, and the commission will never be resolved. |
|
155 |
quotas.accept_resource_serial(vm) |
|
156 |
|
|
157 |
log.info("user: %s, vm: %s, action: %s, job_id: %s, serial: %s", |
|
158 |
user_id, vm.id, action, job_id, vm.serial) |
|
159 |
|
|
160 |
# store the new task in the VM |
|
161 |
if job_id is not None: |
|
162 |
vm.task = action |
|
163 |
vm.task_job_id = job_id |
|
164 |
vm.save() |
|
165 |
|
|
166 |
return vm |
|
167 |
return wrapper |
|
168 |
return decorator |
|
169 |
|
|
170 |
|
|
171 | 56 |
@transaction.commit_on_success |
172 |
def create(userid, name, password, flavor, image, metadata={}, |
|
173 |
personality=[], networks=None, use_backend=None): |
|
57 |
def create(userid, name, password, flavor, image_id, metadata={}, |
|
58 |
personality=[], networks=None, volumes=None, |
|
59 |
use_backend=None): |
|
60 |
|
|
61 |
# Get image information |
|
62 |
# TODO: Image is not mandatory if disks are specified |
|
63 |
image = util.get_image_dict(image_id, userid) |
|
174 | 64 |
|
175 | 65 |
# Check that image fits into the disk |
176 |
if image["size"] > (flavor.disk << 30): |
|
177 |
msg = "Flavor's disk size '%s' is smaller than the image's size '%s'" |
|
178 |
raise faults.BadRequest(msg % (flavor.disk << 30, image["size"])) |
|
66 |
if int(image["size"]) > (flavor.disk << 30): |
|
67 |
msg = ("Flavor's disk size '%s' is smaller than the image's" |
|
68 |
"size '%s'" % (flavor.disk << 30, image["size"])) |
|
69 |
raise faults.BadRequest(msg) |
|
179 | 70 |
|
180 | 71 |
if use_backend is None: |
181 | 72 |
# Allocate server to a Ganeti backend |
... | ... | |
187 | 78 |
# Create the ports for the server |
188 | 79 |
ports = create_instance_ports(userid, networks) |
189 | 80 |
|
190 |
# Fix flavor for archipelago |
|
191 |
disk_template, provider = util.get_flavor_provider(flavor) |
|
192 |
if provider: |
|
193 |
flavor.disk_template = disk_template |
|
194 |
flavor.disk_provider = provider |
|
195 |
flavor.disk_origin = None |
|
196 |
if provider in settings.GANETI_CLONE_PROVIDERS: |
|
197 |
flavor.disk_origin = image['checksum'] |
|
198 |
image['backend_id'] = 'null' |
|
199 |
else: |
|
200 |
flavor.disk_provider = None |
|
201 |
|
|
202 | 81 |
# We must save the VM instance now, so that it gets a valid |
203 | 82 |
# vm.backend_vm_id. |
204 | 83 |
vm = VirtualMachine.objects.create(name=name, |
... | ... | |
215 | 94 |
port.index = index |
216 | 95 |
port.save() |
217 | 96 |
|
218 |
volumes = create_instance_volumes(vm, flavor, image) |
|
97 |
# If no volumes are specified, we automatically create a volume with the |
|
98 |
# size of the flavor and filled with the specified image. |
|
99 |
if not volumes: |
|
100 |
volumes = [{"source_type": "image", |
|
101 |
"source_uuid": image["id"], |
|
102 |
"size": flavor.disk, |
|
103 |
"delete_on_termination": True}] |
|
104 |
|
|
105 |
assert(len(volumes) > 0), "Cannot create server without volumes" |
|
106 |
|
|
107 |
if volumes[0]["source_type"] == "blank": |
|
108 |
raise faults.BadRequest("Root volume cannot be blank") |
|
109 |
|
|
110 |
server_volumes = [] |
|
111 |
for index, vol_info in enumerate(volumes): |
|
112 |
if vol_info["source_type"] == "volume": |
|
113 |
uuid = vol_info["source_uuid"] |
|
114 |
v = get_volume(userid, uuid, for_update=True, |
|
115 |
exception=faults.BadRequest) |
|
116 |
if v.status != "AVAILABLE": |
|
117 |
raise faults.BadRequest("Cannot use volume while it is in %s" |
|
118 |
" status" % v.status) |
|
119 |
v.delete_on_termination = vol_info["delete_on_termination"] |
|
120 |
v.index = index |
|
121 |
v.save() |
|
122 |
else: |
|
123 |
v = _create_volume(server=vm, user_id=userid, |
|
124 |
index=index, **vol_info) |
|
125 |
server_volumes.append(v) |
|
219 | 126 |
|
220 | 127 |
for key, val in metadata.items(): |
221 | 128 |
VirtualMachineMetadata.objects.create( |
... | ... | |
224 | 131 |
vm=vm) |
225 | 132 |
|
226 | 133 |
# Create the server in Ganeti. |
227 |
vm = create_server(vm, ports, volumes, flavor, image, personality, |
|
134 |
vm = create_server(vm, ports, server_volumes, flavor, image, personality,
|
|
228 | 135 |
password) |
229 | 136 |
|
230 | 137 |
return vm |
... | ... | |
250 | 157 |
return use_backend |
251 | 158 |
|
252 | 159 |
|
253 |
@server_command("BUILD") |
|
160 |
@commands.server_command("BUILD")
|
|
254 | 161 |
def create_server(vm, nics, volumes, flavor, image, personality, password): |
255 | 162 |
# dispatch server created signal needed to trigger the 'vmapi', which |
256 | 163 |
# enriches the vm object with the 'config_url' attribute which must be |
257 | 164 |
# passed to the Ganeti job. |
165 |
|
|
166 |
# If the root volume has a provider, then inform snf-image to not fill |
|
167 |
# the volume with data |
|
168 |
image_id = image["backend_id"] |
|
169 |
root_volume = volumes[0] |
|
170 |
if root_volume.origin is not None: |
|
171 |
image_id = "null" |
|
172 |
|
|
258 | 173 |
server_created.send(sender=vm, created_vm_params={ |
259 |
'img_id': image['backend_id'],
|
|
174 |
'img_id': image_id,
|
|
260 | 175 |
'img_passwd': password, |
261 | 176 |
'img_format': str(image['format']), |
262 | 177 |
'img_personality': json.dumps(personality), |
... | ... | |
283 | 198 |
return jobID |
284 | 199 |
|
285 | 200 |
|
286 |
def create_instance_volumes(vm, flavor, image): |
|
287 |
name = "Root volume of server: %s" % vm.id |
|
288 |
volume = Volume.objects.create(userid=vm.userid, |
|
289 |
machine=vm, |
|
290 |
name=name, |
|
291 |
size=flavor.disk, |
|
292 |
source=Volume.SOURCE_IMAGE_PREFIX+image["id"], |
|
293 |
origin=image["checksum"], |
|
294 |
status="CREATING") |
|
295 |
|
|
296 |
volume.save() |
|
297 |
|
|
298 |
return [volume] |
|
299 |
|
|
300 |
|
|
301 |
@server_command("DESTROY") |
|
201 |
@commands.server_command("DESTROY") |
|
302 | 202 |
def destroy(vm, shutdown_timeout=None): |
303 | 203 |
# XXX: Workaround for race where OP_INSTANCE_REMOVE starts executing on |
304 | 204 |
# Ganeti before OP_INSTANCE_CREATE. This will be fixed when |
... | ... | |
312 | 212 |
return backend.delete_instance(vm, shutdown_timeout=shutdown_timeout) |
313 | 213 |
|
314 | 214 |
|
315 |
@server_command("START") |
|
215 |
@commands.server_command("START")
|
|
316 | 216 |
def start(vm): |
317 | 217 |
log.info("Starting VM %s", vm) |
318 | 218 |
return backend.startup_instance(vm) |
319 | 219 |
|
320 | 220 |
|
321 |
@server_command("STOP") |
|
221 |
@commands.server_command("STOP")
|
|
322 | 222 |
def stop(vm, shutdown_timeout=None): |
323 | 223 |
log.info("Stopping VM %s", vm) |
324 | 224 |
return backend.shutdown_instance(vm, shutdown_timeout=shutdown_timeout) |
325 | 225 |
|
326 | 226 |
|
327 |
@server_command("REBOOT") |
|
227 |
@commands.server_command("REBOOT")
|
|
328 | 228 |
def reboot(vm, reboot_type, shutdown_timeout=None): |
329 | 229 |
if reboot_type not in ("SOFT", "HARD"): |
330 | 230 |
raise faults.BadRequest("Malformed request. Invalid reboot" |
... | ... | |
338 | 238 |
def resize(vm, flavor): |
339 | 239 |
action_fields = {"beparams": {"vcpus": flavor.cpu, |
340 | 240 |
"maxmem": flavor.ram}} |
341 |
comm = server_command("RESIZE", action_fields=action_fields) |
|
241 |
comm = commands.server_command("RESIZE", action_fields=action_fields)
|
|
342 | 242 |
return comm(_resize)(vm, flavor) |
343 | 243 |
|
344 | 244 |
|
... | ... | |
358 | 258 |
return backend.resize_instance(vm, vcpus=flavor.cpu, memory=flavor.ram) |
359 | 259 |
|
360 | 260 |
|
361 |
@server_command("SET_FIREWALL_PROFILE") |
|
261 |
@commands.server_command("SET_FIREWALL_PROFILE")
|
|
362 | 262 |
def set_firewall_profile(vm, profile, nic): |
363 | 263 |
log.info("Setting VM %s, NIC %s, firewall %s", vm, nic, profile) |
364 | 264 |
|
... | ... | |
368 | 268 |
return None |
369 | 269 |
|
370 | 270 |
|
371 |
@server_command("CONNECT") |
|
271 |
@commands.server_command("CONNECT")
|
|
372 | 272 |
def connect(vm, network, port=None): |
373 | 273 |
if port is None: |
374 | 274 |
port = _create_port(vm.userid, network) |
... | ... | |
379 | 279 |
return backend.connect_to_network(vm, port) |
380 | 280 |
|
381 | 281 |
|
382 |
@server_command("DISCONNECT") |
|
282 |
@commands.server_command("DISCONNECT")
|
|
383 | 283 |
def disconnect(vm, nic): |
384 | 284 |
log.info("Removing NIC %s from VM %s", nic, vm) |
385 | 285 |
return backend.disconnect_from_network(vm, nic) |
... | ... | |
760 | 660 |
else: |
761 | 661 |
raise faults.BadRequest("Network 'uuid' or 'port' attribute" |
762 | 662 |
" is required.") |
763 |
|
|
764 |
|
|
765 |
@server_command("ATTACH_VOLUME") |
|
766 |
def attach_volume(vm, volume): |
|
767 |
"""Attach a volume to a server. |
|
768 |
|
|
769 |
The volume must be in 'AVAILABLE' status in order to be attached. Also, |
|
770 |
number of the volumes that are attached to the server must remain less |
|
771 |
than 'GANETI_MAX_DISKS_PER_INSTANCE' setting. This function will send |
|
772 |
the corresponding job to Ganeti backend and update the status of the |
|
773 |
volume to 'ATTACHING'. |
|
774 |
|
|
775 |
""" |
|
776 |
# Check volume state |
|
777 |
if volume.status not in ["AVAILABLE", "CREATING"]: |
|
778 |
raise faults.BadRequest("Cannot attach volume while volume is in" |
|
779 |
" '%s' status." % volume.status) |
|
780 |
|
|
781 |
# Check that disk templates are the same |
|
782 |
if volume.disk_template != vm.flavor.disk_template: |
|
783 |
msg = ("Volume and server must have the same disk template. Volume has" |
|
784 |
" disk template '%s' while server has '%s'" |
|
785 |
% (volume.disk_template, vm.flavor.disk_template)) |
|
786 |
raise faults.BadRequest(msg) |
|
787 |
|
|
788 |
# Check maximum disk per instance hard limit |
|
789 |
if vm.volumes.count() == settings.GANETI_MAX_DISKS_PER_INSTANCE: |
|
790 |
raise faults.BadRequest("Maximum volumes per server limit reached") |
|
791 |
|
|
792 |
jobid = backend.attach_volume(vm, volume) |
|
793 |
|
|
794 |
log.info("Attached volume '%s' to server '%s'. JobID: '%s'", volume.id, |
|
795 |
volume.machine_id, jobid) |
|
796 |
|
|
797 |
volume.backendjobid = jobid |
|
798 |
volume.machine = vm |
|
799 |
volume.status = "ATTACHING" |
|
800 |
volume.save() |
|
801 |
return jobid |
|
802 |
|
|
803 |
|
|
804 |
@server_command("DETACH_VOLUME") |
|
805 |
def detach_volume(vm, volume): |
|
806 |
"""Detach a volume to a server. |
|
807 |
|
|
808 |
The volume must be in 'IN_USE' status in order to be detached. Also, |
|
809 |
the root volume of the instance (index=0) can not be detached. This |
|
810 |
function will send the corresponding job to Ganeti backend and update the |
|
811 |
status of the volume to 'DETACHING'. |
|
812 |
|
|
813 |
""" |
|
814 |
|
|
815 |
_check_attachment(vm, volume) |
|
816 |
if volume.status != "IN_USE": |
|
817 |
#TODO: Maybe allow other statuses as well ? |
|
818 |
raise faults.BadRequest("Cannot detach volume while volume is in" |
|
819 |
" '%s' status." % volume.status) |
|
820 |
if volume.index == 0: |
|
821 |
raise faults.BadRequest("Cannot detach the root volume of a server") |
|
822 |
jobid = backend.detach_volume(vm, volume) |
|
823 |
log.info("Detached volume '%s' from server '%s'. JobID: '%s'", volume.id, |
|
824 |
volume.machine_id, jobid) |
|
825 |
volume.backendjobid = jobid |
|
826 |
volume.status = "DETACHING" |
|
827 |
volume.save() |
|
828 |
return jobid |
|
829 |
|
|
830 |
|
|
831 |
def _check_attachment(vm, volume): |
|
832 |
"""Check that volume is attached to vm.""" |
|
833 |
if volume.machine_id != vm.id: |
|
834 |
raise faults.BadRequest("Volume '%s' is not attached to server '%s'" |
|
835 |
% volume.id, vm.id) |
b/snf-cyclades-app/synnefo/volume/volumes.py | ||
---|---|---|
1 | 1 |
import logging |
2 | 2 |
|
3 | 3 |
from django.db import transaction |
4 |
from synnefo.db.models import Volume |
|
5 | 4 |
from snf_django.lib.api import faults |
5 |
from synnefo.db.models import Volume |
|
6 | 6 |
from synnefo.volume import util |
7 |
from synnefo.logic import backend, servers
|
|
7 |
from synnefo.logic import server_attachments
|
|
8 | 8 |
|
9 | 9 |
log = logging.getLogger(__name__) |
10 | 10 |
|
... | ... | |
14 | 14 |
source_volume_id=None, source_snapshot_id=None, |
15 | 15 |
source_image_id=None, metadata=None): |
16 | 16 |
|
17 |
# Currently we cannot create volumes without being attached to a server |
|
17 | 18 |
if server_id is None: |
18 | 19 |
raise faults.BadRequest("Volume must be attached to server") |
19 | 20 |
server = util.get_server(user_id, server_id, for_update=True, |
... | ... | |
25 | 26 |
if len(sources) > 1: |
26 | 27 |
raise faults.BadRequest("Volume can not have more than one source!") |
27 | 28 |
|
28 |
# Only ext_ disk template supports cloning from another source |
|
29 |
if source_volume_id is not None: |
|
30 |
source_type = "volume" |
|
31 |
source_uuid = source_volume_id |
|
32 |
elif source_snapshot_id is not None: |
|
33 |
source_type = "snapshot" |
|
34 |
source_uuid = source_snapshot_id |
|
35 |
elif source_image_id is not None: |
|
36 |
source_type = "image" |
|
37 |
source_uuid = source_image_id |
|
38 |
else: |
|
39 |
source_type = source_uuid = None |
|
40 |
|
|
41 |
volume = _create_volume(server, user_id, size, source_type, source_uuid, |
|
42 |
name, description, index=None) |
|
43 |
|
|
44 |
if metadata is not None: |
|
45 |
for meta_key, meta_val in metadata.items(): |
|
46 |
volume.metadata.create(key=meta_key, value=meta_val) |
|
47 |
|
|
48 |
server_attachments.attach_volume(server, volume) |
|
49 |
|
|
50 |
return volume |
|
51 |
|
|
52 |
|
|
53 |
def _create_volume(server, user_id, size, source_type, source_uuid, |
|
54 |
name=None, description=None, index=None, |
|
55 |
delete_on_termination=True): |
|
56 |
|
|
57 |
# Only ext_ disk template supports cloning from another source. Otherwise |
|
58 |
# is must be the root volume so that 'snf-image' fill the volume |
|
29 | 59 |
disk_template = server.flavor.disk_template |
30 |
if not disk_template.startswith("ext_") and sources: |
|
60 |
can_have_source = (index == 0 or disk_template.startswith("ext_")) |
|
61 |
if not can_have_source and source_type != "blank": |
|
31 | 62 |
msg = ("Volumes of '%s' disk template cannot have a source" % |
32 | 63 |
disk_template) |
33 | 64 |
raise faults.BadRequest(msg) |
34 | 65 |
|
35 |
origin = None |
|
36 |
source = None |
|
37 |
if source_volume_id is not None: |
|
38 |
source_volume = util.get_volume(user_id, source_volume_id, |
|
66 |
# TODO: Check Volume/Snapshot Status |
|
67 |
if source_type == "volume": |
|
68 |
source_volume = util.get_volume(user_id, source_uuid, |
|
39 | 69 |
for_update=True, |
40 | 70 |
exception=faults.BadRequest) |
41 |
# Check that volume is ready to be snapshotted |
|
42 |
if source_volume.status != "AVAILABLE": |
|
43 |
msg = ("Cannot take a snapshot while snapshot is in '%s' state" |
|
44 |
% source_volume.status) |
|
45 |
raise faults.BadRequest(msg) |
|
46 |
source = Volume.prefix_source(source_volume_id, source_type="volume") |
|
71 |
if source_volume.status != "IN_USE": |
|
72 |
raise faults.BadRequest("Cannot clone volume while it is in '%s'" |
|
73 |
" status" % source_volume.status) |
|
74 |
# If no size is specified, use the size of the volume |
|
75 |
if size is None: |
|
76 |
size = source_volume.size |
|
77 |
elif size < source_volume.size: |
|
78 |
raise faults.BadRequest("Volume size cannot be smaller than the" |
|
79 |
" source volume") |
|
80 |
source = Volume.prefix_source(source_uuid, source_type="volume") |
|
47 | 81 |
origin = source_volume.backend_volume_uuid |
48 |
elif source_snapshot_id is not None:
|
|
49 |
source_snapshot = util.get_snapshot(user_id, source_snapshot_id,
|
|
82 |
elif source_type == "snapshot":
|
|
83 |
source_snapshot = util.get_snapshot(user_id, source_uuid,
|
|
50 | 84 |
exception=faults.BadRequest) |
51 |
# TODO: Check the state of the snapshot!! |
|
52 |
source = Volume.prefix_source(source_snapshot_id, |
|
85 |
source = Volume.prefix_source(source_uuid, |
|
53 | 86 |
source_type="snapshot") |
87 |
if size is None: |
|
88 |
raise faults.BadRequest("Volume size is required") |
|
89 |
elif (size << 30) < int(source_snapshot["size"]): |
|
90 |
raise faults.BadRequest("Volume size '%s' is smaller than" |
|
91 |
" snapshot's size '%s'" |
|
92 |
% (size << 30, source_snapshot["size"])) |
|
54 | 93 |
origin = source_snapshot["checksum"] |
55 |
elif source_image_id is not None:
|
|
56 |
source_image = util.get_image(user_id, source_image_id,
|
|
94 |
elif source_type == "image":
|
|
95 |
source_image = util.get_image(user_id, source_uuid,
|
|
57 | 96 |
exception=faults.BadRequest) |
58 |
source = Volume.prefix_source(source_image_id, source_type="image") |
|
97 |
if size is None: |
|
98 |
raise faults.BadRequest("Volume size is required") |
|
99 |
elif (size << 30) < int(source_image["size"]): |
|
100 |
raise faults.BadRequest("Volume size '%s' is smaller than" |
|
101 |
" image's size '%s'" |
|
102 |
% (size << 30, source_image["size"])) |
|
103 |
source = Volume.prefix_source(source_uuid, source_type="image") |
|
59 | 104 |
origin = source_image["checksum"] |
105 |
elif source_type == "blank": |
|
106 |
if size is None: |
|
107 |
raise faults.BadRequest("Volume size is required") |
|
108 |
source = origin = None |
|
109 |
else: |
|
110 |
raise faults.BadRequest("Unknwon source type") |
|
60 | 111 |
|
61 | 112 |
volume = Volume.objects.create(userid=user_id, |
62 | 113 |
size=size, |
63 | 114 |
name=name, |
64 | 115 |
machine=server, |
65 | 116 |
description=description, |
117 |
delete_on_termination=delete_on_termination, |
|
66 | 118 |
source=source, |
67 | 119 |
origin=origin, |
68 | 120 |
#volume_type=volume_type, |
69 | 121 |
status="CREATING") |
70 |
|
|
71 |
if metadata is not None: |
|
72 |
for meta_key, meta_val in metadata.items(): |
|
73 |
volume.metadata.create(key=meta_key, value=meta_val) |
|
74 |
|
|
75 |
servers.attach_volume(server, volume) |
|
76 |
|
|
77 | 122 |
return volume |
78 | 123 |
|
79 | 124 |
|
... | ... | |
83 | 128 |
# A volume is deleted by detaching it from the server that is attached. |
84 | 129 |
# Deleting a detached volume is not implemented. |
85 | 130 |
if volume.machine_id is not None: |
86 |
servers.detach_volume(volume.machine, volume) |
|
131 |
server_attachments.detach_volume(volume.machine, volume)
|
|
87 | 132 |
log.info("Detach volume '%s' from server '%s', job: %s", |
88 | 133 |
volume.id, volume.machine_id, volume.backendjobid) |
89 | 134 |
else: |
Also available in: Unified diff