Revision 5d805533 snf-cyclades-app/synnefo/logic/servers.py
b/snf-cyclades-app/synnefo/logic/servers.py | ||
---|---|---|
30 | 30 |
import logging |
31 | 31 |
|
32 | 32 |
from socket import getfqdn |
33 |
from functools import wraps |
|
34 | 33 |
from django import dispatch |
35 | 34 |
from django.db import transaction |
36 | 35 |
from django.utils import simplejson as json |
37 | 36 |
|
38 | 37 |
from snf_django.lib.api import faults |
39 | 38 |
from django.conf import settings |
40 |
from synnefo import quotas |
|
41 | 39 |
from synnefo.api import util |
42 | 40 |
from synnefo.logic import backend, ips, utils |
43 | 41 |
from synnefo.logic.backend_allocator import BackendAllocator |
44 | 42 |
from synnefo.db.models import (NetworkInterface, VirtualMachine, |
45 |
VirtualMachineMetadata, IPAddressLog, Network, |
|
46 |
Volume) |
|
43 |
VirtualMachineMetadata, IPAddressLog, Network) |
|
47 | 44 |
from vncauthproxy.client import request_forwarding as request_vnc_forwarding |
48 | 45 |
from synnefo.logic import rapi |
46 |
from synnefo.volume.volumes import _create_volume |
|
47 |
from synnefo.volume.util import get_volume |
|
48 |
from synnefo.logic import commands |
|
49 | 49 |
|
50 | 50 |
log = logging.getLogger(__name__) |
51 | 51 |
|
... | ... | |
53 | 53 |
server_created = dispatch.Signal(providing_args=["created_vm_params"]) |
54 | 54 |
|
55 | 55 |
|
56 |
def validate_server_action(vm, action): |
|
57 |
if vm.deleted: |
|
58 |
raise faults.BadRequest("Server '%s' has been deleted." % vm.id) |
|
59 |
|
|
60 |
# Destroyin a server should always be permitted |
|
61 |
if action == "DESTROY": |
|
62 |
return |
|
63 |
|
|
64 |
# Check that there is no pending action |
|
65 |
pending_action = vm.task |
|
66 |
if pending_action: |
|
67 |
if pending_action == "BUILD": |
|
68 |
raise faults.BuildInProgress("Server '%s' is being build." % vm.id) |
|
69 |
raise faults.BadRequest("Cannot perform '%s' action while there is a" |
|
70 |
" pending '%s'." % (action, pending_action)) |
|
71 |
|
|
72 |
# Check if action can be performed to VM's operstate |
|
73 |
operstate = vm.operstate |
|
74 |
if operstate == "ERROR": |
|
75 |
raise faults.BadRequest("Cannot perform '%s' action while server is" |
|
76 |
" in 'ERROR' state." % action) |
|
77 |
elif operstate == "BUILD" and action != "BUILD": |
|
78 |
raise faults.BuildInProgress("Server '%s' is being build." % vm.id) |
|
79 |
elif (action == "START" and operstate != "STOPPED") or\ |
|
80 |
(action == "STOP" and operstate != "STARTED") or\ |
|
81 |
(action == "RESIZE" and operstate != "STOPPED") or\ |
|
82 |
(action in ["CONNECT", "DISCONNECT"] |
|
83 |
and operstate != "STOPPED" |
|
84 |
and not settings.GANETI_USE_HOTPLUG) or \ |
|
85 |
(action in ["ATTACH_VOLUME", "DETACH_VOLUME"] |
|
86 |
and operstate != "STOPPED" |
|
87 |
and not settings.GANETI_USE_HOTPLUG): |
|
88 |
raise faults.BadRequest("Cannot perform '%s' action while server is" |
|
89 |
" in '%s' state." % (action, operstate)) |
|
90 |
return |
|
91 |
|
|
92 |
|
|
93 |
def server_command(action, action_fields=None): |
|
94 |
"""Handle execution of a server action. |
|
95 |
|
|
96 |
Helper function to validate and execute a server action, handle quota |
|
97 |
commission and update the 'task' of the VM in the DB. |
|
98 |
|
|
99 |
1) Check if action can be performed. If it can, then there must be no |
|
100 |
pending task (with the exception of DESTROY). |
|
101 |
2) Handle previous commission if unresolved: |
|
102 |
* If it is not pending and it to accept, then accept |
|
103 |
* If it is not pending and to reject or is pending then reject it. Since |
|
104 |
the action can be performed only if there is no pending task, then there |
|
105 |
can be no pending commission. The exception is DESTROY, but in this case |
|
106 |
the commission can safely be rejected, and the dispatcher will generate |
|
107 |
the correct ones! |
|
108 |
3) Issue new commission and associate it with the VM. Also clear the task. |
|
109 |
4) Send job to ganeti |
|
110 |
5) Update task and commit |
|
111 |
""" |
|
112 |
def decorator(func): |
|
113 |
@wraps(func) |
|
114 |
@transaction.commit_on_success |
|
115 |
def wrapper(vm, *args, **kwargs): |
|
116 |
user_id = vm.userid |
|
117 |
validate_server_action(vm, action) |
|
118 |
vm.action = action |
|
119 |
|
|
120 |
commission_name = "client: api, resource: %s" % vm |
|
121 |
quotas.handle_resource_commission(vm, action=action, |
|
122 |
action_fields=action_fields, |
|
123 |
commission_name=commission_name) |
|
124 |
vm.save() |
|
125 |
|
|
126 |
# XXX: Special case for server creation! |
|
127 |
if action == "BUILD": |
|
128 |
# Perform a commit, because the VirtualMachine must be saved to |
|
129 |
# DB before the OP_INSTANCE_CREATE job in enqueued in Ganeti. |
|
130 |
# Otherwise, messages will arrive from snf-dispatcher about |
|
131 |
# this instance, before the VM is stored in DB. |
|
132 |
transaction.commit() |
|
133 |
# After committing the locks are released. Refetch the instance |
|
134 |
# to guarantee x-lock. |
|
135 |
vm = VirtualMachine.objects.select_for_update().get(id=vm.id) |
|
136 |
|
|
137 |
# Send the job to Ganeti and get the associated jobID |
|
138 |
try: |
|
139 |
job_id = func(vm, *args, **kwargs) |
|
140 |
except Exception as e: |
|
141 |
if vm.serial is not None: |
|
142 |
# Since the job never reached Ganeti, reject the commission |
|
143 |
log.debug("Rejecting commission: '%s', could not perform" |
|
144 |
" action '%s': %s" % (vm.serial, action, e)) |
|
145 |
transaction.rollback() |
|
146 |
quotas.reject_resource_serial(vm) |
|
147 |
transaction.commit() |
|
148 |
raise |
|
149 |
|
|
150 |
if action == "BUILD" and vm.serial is not None: |
|
151 |
# XXX: Special case for server creation: we must accept the |
|
152 |
# commission because the VM has been stored in DB. Also, if |
|
153 |
# communication with Ganeti fails, the job will never reach |
|
154 |
# Ganeti, and the commission will never be resolved. |
|
155 |
quotas.accept_resource_serial(vm) |
|
156 |
|
|
157 |
log.info("user: %s, vm: %s, action: %s, job_id: %s, serial: %s", |
|
158 |
user_id, vm.id, action, job_id, vm.serial) |
|
159 |
|
|
160 |
# store the new task in the VM |
|
161 |
if job_id is not None: |
|
162 |
vm.task = action |
|
163 |
vm.task_job_id = job_id |
|
164 |
vm.save() |
|
165 |
|
|
166 |
return vm |
|
167 |
return wrapper |
|
168 |
return decorator |
|
169 |
|
|
170 |
|
|
171 | 56 |
@transaction.commit_on_success |
172 |
def create(userid, name, password, flavor, image, metadata={}, |
|
173 |
personality=[], networks=None, use_backend=None): |
|
57 |
def create(userid, name, password, flavor, image_id, metadata={}, |
|
58 |
personality=[], networks=None, volumes=None, |
|
59 |
use_backend=None): |
|
60 |
|
|
61 |
# Get image information |
|
62 |
# TODO: Image is not mandatory if disks are specified |
|
63 |
image = util.get_image_dict(image_id, userid) |
|
174 | 64 |
|
175 | 65 |
# Check that image fits into the disk |
176 |
if image["size"] > (flavor.disk << 30): |
|
177 |
msg = "Flavor's disk size '%s' is smaller than the image's size '%s'" |
|
178 |
raise faults.BadRequest(msg % (flavor.disk << 30, image["size"])) |
|
66 |
if int(image["size"]) > (flavor.disk << 30): |
|
67 |
msg = ("Flavor's disk size '%s' is smaller than the image's" |
|
68 |
"size '%s'" % (flavor.disk << 30, image["size"])) |
|
69 |
raise faults.BadRequest(msg) |
|
179 | 70 |
|
180 | 71 |
if use_backend is None: |
181 | 72 |
# Allocate server to a Ganeti backend |
... | ... | |
187 | 78 |
# Create the ports for the server |
188 | 79 |
ports = create_instance_ports(userid, networks) |
189 | 80 |
|
190 |
# Fix flavor for archipelago |
|
191 |
disk_template, provider = util.get_flavor_provider(flavor) |
|
192 |
if provider: |
|
193 |
flavor.disk_template = disk_template |
|
194 |
flavor.disk_provider = provider |
|
195 |
flavor.disk_origin = None |
|
196 |
if provider in settings.GANETI_CLONE_PROVIDERS: |
|
197 |
flavor.disk_origin = image['checksum'] |
|
198 |
image['backend_id'] = 'null' |
|
199 |
else: |
|
200 |
flavor.disk_provider = None |
|
201 |
|
|
202 | 81 |
# We must save the VM instance now, so that it gets a valid |
203 | 82 |
# vm.backend_vm_id. |
204 | 83 |
vm = VirtualMachine.objects.create(name=name, |
... | ... | |
215 | 94 |
port.index = index |
216 | 95 |
port.save() |
217 | 96 |
|
218 |
volumes = create_instance_volumes(vm, flavor, image) |
|
97 |
# If no volumes are specified, we automatically create a volume with the |
|
98 |
# size of the flavor and filled with the specified image. |
|
99 |
if not volumes: |
|
100 |
volumes = [{"source_type": "image", |
|
101 |
"source_uuid": image["id"], |
|
102 |
"size": flavor.disk, |
|
103 |
"delete_on_termination": True}] |
|
104 |
|
|
105 |
assert(len(volumes) > 0), "Cannot create server without volumes" |
|
106 |
|
|
107 |
if volumes[0]["source_type"] == "blank": |
|
108 |
raise faults.BadRequest("Root volume cannot be blank") |
|
109 |
|
|
110 |
server_volumes = [] |
|
111 |
for index, vol_info in enumerate(volumes): |
|
112 |
if vol_info["source_type"] == "volume": |
|
113 |
uuid = vol_info["source_uuid"] |
|
114 |
v = get_volume(userid, uuid, for_update=True, |
|
115 |
exception=faults.BadRequest) |
|
116 |
if v.status != "AVAILABLE": |
|
117 |
raise faults.BadRequest("Cannot use volume while it is in %s" |
|
118 |
" status" % v.status) |
|
119 |
v.delete_on_termination = vol_info["delete_on_termination"] |
|
120 |
v.index = index |
|
121 |
v.save() |
|
122 |
else: |
|
123 |
v = _create_volume(server=vm, user_id=userid, |
|
124 |
index=index, **vol_info) |
|
125 |
server_volumes.append(v) |
|
219 | 126 |
|
220 | 127 |
for key, val in metadata.items(): |
221 | 128 |
VirtualMachineMetadata.objects.create( |
... | ... | |
224 | 131 |
vm=vm) |
225 | 132 |
|
226 | 133 |
# Create the server in Ganeti. |
227 |
vm = create_server(vm, ports, volumes, flavor, image, personality, |
|
134 |
vm = create_server(vm, ports, server_volumes, flavor, image, personality,
|
|
228 | 135 |
password) |
229 | 136 |
|
230 | 137 |
return vm |
... | ... | |
250 | 157 |
return use_backend |
251 | 158 |
|
252 | 159 |
|
253 |
@server_command("BUILD") |
|
160 |
@commands.server_command("BUILD")
|
|
254 | 161 |
def create_server(vm, nics, volumes, flavor, image, personality, password): |
255 | 162 |
# dispatch server created signal needed to trigger the 'vmapi', which |
256 | 163 |
# enriches the vm object with the 'config_url' attribute which must be |
257 | 164 |
# passed to the Ganeti job. |
165 |
|
|
166 |
# If the root volume has a provider, then inform snf-image to not fill |
|
167 |
# the volume with data |
|
168 |
image_id = image["backend_id"] |
|
169 |
root_volume = volumes[0] |
|
170 |
if root_volume.origin is not None: |
|
171 |
image_id = "null" |
|
172 |
|
|
258 | 173 |
server_created.send(sender=vm, created_vm_params={ |
259 |
'img_id': image['backend_id'],
|
|
174 |
'img_id': image_id,
|
|
260 | 175 |
'img_passwd': password, |
261 | 176 |
'img_format': str(image['format']), |
262 | 177 |
'img_personality': json.dumps(personality), |
... | ... | |
283 | 198 |
return jobID |
284 | 199 |
|
285 | 200 |
|
286 |
def create_instance_volumes(vm, flavor, image): |
|
287 |
name = "Root volume of server: %s" % vm.id |
|
288 |
volume = Volume.objects.create(userid=vm.userid, |
|
289 |
machine=vm, |
|
290 |
name=name, |
|
291 |
size=flavor.disk, |
|
292 |
source=Volume.SOURCE_IMAGE_PREFIX+image["id"], |
|
293 |
origin=image["checksum"], |
|
294 |
status="CREATING") |
|
295 |
|
|
296 |
volume.save() |
|
297 |
|
|
298 |
return [volume] |
|
299 |
|
|
300 |
|
|
301 |
@server_command("DESTROY") |
|
201 |
@commands.server_command("DESTROY") |
|
302 | 202 |
def destroy(vm, shutdown_timeout=None): |
303 | 203 |
# XXX: Workaround for race where OP_INSTANCE_REMOVE starts executing on |
304 | 204 |
# Ganeti before OP_INSTANCE_CREATE. This will be fixed when |
... | ... | |
312 | 212 |
return backend.delete_instance(vm, shutdown_timeout=shutdown_timeout) |
313 | 213 |
|
314 | 214 |
|
315 |
@server_command("START") |
|
215 |
@commands.server_command("START")
|
|
316 | 216 |
def start(vm): |
317 | 217 |
log.info("Starting VM %s", vm) |
318 | 218 |
return backend.startup_instance(vm) |
319 | 219 |
|
320 | 220 |
|
321 |
@server_command("STOP") |
|
221 |
@commands.server_command("STOP")
|
|
322 | 222 |
def stop(vm, shutdown_timeout=None): |
323 | 223 |
log.info("Stopping VM %s", vm) |
324 | 224 |
return backend.shutdown_instance(vm, shutdown_timeout=shutdown_timeout) |
325 | 225 |
|
326 | 226 |
|
327 |
@server_command("REBOOT") |
|
227 |
@commands.server_command("REBOOT")
|
|
328 | 228 |
def reboot(vm, reboot_type, shutdown_timeout=None): |
329 | 229 |
if reboot_type not in ("SOFT", "HARD"): |
330 | 230 |
raise faults.BadRequest("Malformed request. Invalid reboot" |
... | ... | |
338 | 238 |
def resize(vm, flavor): |
339 | 239 |
action_fields = {"beparams": {"vcpus": flavor.cpu, |
340 | 240 |
"maxmem": flavor.ram}} |
341 |
comm = server_command("RESIZE", action_fields=action_fields) |
|
241 |
comm = commands.server_command("RESIZE", action_fields=action_fields)
|
|
342 | 242 |
return comm(_resize)(vm, flavor) |
343 | 243 |
|
344 | 244 |
|
... | ... | |
358 | 258 |
return backend.resize_instance(vm, vcpus=flavor.cpu, memory=flavor.ram) |
359 | 259 |
|
360 | 260 |
|
361 |
@server_command("SET_FIREWALL_PROFILE") |
|
261 |
@commands.server_command("SET_FIREWALL_PROFILE")
|
|
362 | 262 |
def set_firewall_profile(vm, profile, nic): |
363 | 263 |
log.info("Setting VM %s, NIC %s, firewall %s", vm, nic, profile) |
364 | 264 |
|
... | ... | |
368 | 268 |
return None |
369 | 269 |
|
370 | 270 |
|
371 |
@server_command("CONNECT") |
|
271 |
@commands.server_command("CONNECT")
|
|
372 | 272 |
def connect(vm, network, port=None): |
373 | 273 |
if port is None: |
374 | 274 |
port = _create_port(vm.userid, network) |
... | ... | |
379 | 279 |
return backend.connect_to_network(vm, port) |
380 | 280 |
|
381 | 281 |
|
382 |
@server_command("DISCONNECT") |
|
282 |
@commands.server_command("DISCONNECT")
|
|
383 | 283 |
def disconnect(vm, nic): |
384 | 284 |
log.info("Removing NIC %s from VM %s", nic, vm) |
385 | 285 |
return backend.disconnect_from_network(vm, nic) |
... | ... | |
760 | 660 |
else: |
761 | 661 |
raise faults.BadRequest("Network 'uuid' or 'port' attribute" |
762 | 662 |
" is required.") |
763 |
|
|
764 |
|
|
765 |
@server_command("ATTACH_VOLUME") |
|
766 |
def attach_volume(vm, volume): |
|
767 |
"""Attach a volume to a server. |
|
768 |
|
|
769 |
The volume must be in 'AVAILABLE' status in order to be attached. Also, |
|
770 |
number of the volumes that are attached to the server must remain less |
|
771 |
than 'GANETI_MAX_DISKS_PER_INSTANCE' setting. This function will send |
|
772 |
the corresponding job to Ganeti backend and update the status of the |
|
773 |
volume to 'ATTACHING'. |
|
774 |
|
|
775 |
""" |
|
776 |
# Check volume state |
|
777 |
if volume.status not in ["AVAILABLE", "CREATING"]: |
|
778 |
raise faults.BadRequest("Cannot attach volume while volume is in" |
|
779 |
" '%s' status." % volume.status) |
|
780 |
|
|
781 |
# Check that disk templates are the same |
|
782 |
if volume.disk_template != vm.flavor.disk_template: |
|
783 |
msg = ("Volume and server must have the same disk template. Volume has" |
|
784 |
" disk template '%s' while server has '%s'" |
|
785 |
% (volume.disk_template, vm.flavor.disk_template)) |
|
786 |
raise faults.BadRequest(msg) |
|
787 |
|
|
788 |
# Check maximum disk per instance hard limit |
|
789 |
if vm.volumes.count() == settings.GANETI_MAX_DISKS_PER_INSTANCE: |
|
790 |
raise faults.BadRequest("Maximum volumes per server limit reached") |
|
791 |
|
|
792 |
jobid = backend.attach_volume(vm, volume) |
|
793 |
|
|
794 |
log.info("Attached volume '%s' to server '%s'. JobID: '%s'", volume.id, |
|
795 |
volume.machine_id, jobid) |
|
796 |
|
|
797 |
volume.backendjobid = jobid |
|
798 |
volume.machine = vm |
|
799 |
volume.status = "ATTACHING" |
|
800 |
volume.save() |
|
801 |
return jobid |
|
802 |
|
|
803 |
|
|
804 |
@server_command("DETACH_VOLUME") |
|
805 |
def detach_volume(vm, volume): |
|
806 |
"""Detach a volume to a server. |
|
807 |
|
|
808 |
The volume must be in 'IN_USE' status in order to be detached. Also, |
|
809 |
the root volume of the instance (index=0) can not be detached. This |
|
810 |
function will send the corresponding job to Ganeti backend and update the |
|
811 |
status of the volume to 'DETACHING'. |
|
812 |
|
|
813 |
""" |
|
814 |
|
|
815 |
_check_attachment(vm, volume) |
|
816 |
if volume.status != "IN_USE": |
|
817 |
#TODO: Maybe allow other statuses as well ? |
|
818 |
raise faults.BadRequest("Cannot detach volume while volume is in" |
|
819 |
" '%s' status." % volume.status) |
|
820 |
if volume.index == 0: |
|
821 |
raise faults.BadRequest("Cannot detach the root volume of a server") |
|
822 |
jobid = backend.detach_volume(vm, volume) |
|
823 |
log.info("Detached volume '%s' from server '%s'. JobID: '%s'", volume.id, |
|
824 |
volume.machine_id, jobid) |
|
825 |
volume.backendjobid = jobid |
|
826 |
volume.status = "DETACHING" |
|
827 |
volume.save() |
|
828 |
return jobid |
|
829 |
|
|
830 |
|
|
831 |
def _check_attachment(vm, volume): |
|
832 |
"""Check that volume is attached to vm.""" |
|
833 |
if volume.machine_id != vm.id: |
|
834 |
raise faults.BadRequest("Volume '%s' is not attached to server '%s'" |
|
835 |
% volume.id, vm.id) |
Also available in: Unified diff