root / docs / developers / showcase.rst @ cf862450
History | View | Annotate | Download (32 kB)
1 |
|
---|---|
2 |
Showcase: create a virtual cluster from scratch |
3 |
=============================================== |
4 |
|
5 |
In this section we will create a virtual cluster, from scratch. |
6 |
|
7 |
Requirements: |
8 |
|
9 |
* A `synnefo <http://www.synnefo.org>`_ deployment with functional *Astakos*, |
10 |
*Pithos+*, *Plankton* and *Cyclades* services. |
11 |
|
12 |
* A kamaki setup, configured with a default cloud (see how to do this with |
13 |
kamaki as a |
14 |
`shell command <../examplesdir/configuration.html#multiple-clouds-in-a-single-configuration>`_ , |
15 |
or a |
16 |
`python library <config.html#set-a-new-cloud-name-it-new-cloud-and-set-it-as-default>`_. |
17 |
|
18 |
* An image stored at file *./my_image.diskdump* that can run on a predefined |
19 |
hardware flavor, identifiable by the flavor id *42* (see how to create an |
20 |
image with the |
21 |
`synnefo image creator <http://www.synnefo.org/docs/snf-image-creator/latest/index.html>`_ |
22 |
). |
23 |
|
24 |
This is the pseudocode: |
25 |
|
26 |
#. Get credentials and service endpoints, with kamaki config and the |
27 |
**Astakos** *identity* and *account* services |
28 |
#. Upload the image file to the **Pithos+** *object-store* service |
29 |
#. Register the image file to the **Plankton** *image* service |
30 |
#. Create a number of virtual servers to the **Cyclades** *compute* service |
31 |
|
32 |
|
33 |
Credentials and endpoints |
34 |
------------------------- |
35 |
|
36 |
We assume that the kamaki configuration file contains at least one cloud |
37 |
configuration, and this configuration is also set as the default cloud for |
38 |
kamaki. A cloud configuration is basically a name for the cloud, an |
39 |
authentication URL and an authentication TOKEN: the credentials we are looking |
40 |
for! |
41 |
|
42 |
This is the plan: |
43 |
|
44 |
#. Get the credentials from the kamaki configuration |
45 |
#. Initialize an AstakosClient and test the credentials |
46 |
#. Get the endpoints for all services |
47 |
|
48 |
.. code-block:: python |
49 |
|
50 |
from sys import stderr |
51 |
from kamaki.cli.config import Config, CONFIG_PATH |
52 |
from kamaki.clients import ClientError |
53 |
from kamaki.clients.astakos import AstakosClient |
54 |
|
55 |
# Initialize Config with default values. |
56 |
cnf = Config() |
57 |
|
58 |
# 1. Get the credentials |
59 |
# Get default cloud name |
60 |
cloud_name = cnf.get('global', 'default_cloud') |
61 |
assert cloud_name, 'No default_cloud in file %s\n' % CONFIG_PATH |
62 |
|
63 |
# Get cloud authentication URL and TOKEN |
64 |
try: |
65 |
AUTH_URL = cnf.get_cloud(cloud_name, 'url') |
66 |
except KeyError: |
67 |
stderr.write('No authentication URL in cloud %s\n' % cloud_name) |
68 |
raise |
69 |
try: |
70 |
AUTH_TOKEN = cnf.get_cloud(cloud_name, 'token') |
71 |
except KeyError: |
72 |
stderr.write('No token in cloud %s\n' % cloud_name) |
73 |
raise |
74 |
|
75 |
# 2. Test the credentials |
76 |
# Test authentication credentials |
77 |
try: |
78 |
auth = AstakosClient(AUTH_URL, AUTH_TOKEN) |
79 |
auth.authenticate() |
80 |
except ClientError: |
81 |
stderr.write('Athentication failed with url %s and token %s\n' % ( |
82 |
AUTH_URL, AUTH_TOKEN)) |
83 |
raise |
84 |
|
85 |
# 3. Get the endpoints |
86 |
# Identity, Account --> astakos |
87 |
# Compute, Network --> cyclades |
88 |
# Object-store --> pithos |
89 |
# Image --> plankton |
90 |
try: |
91 |
endpoints = dict( |
92 |
astakos=AUTH_URL, |
93 |
cyclades=auth.get_service_endpoints('compute')['publicURL'], |
94 |
network=auth.get_service_endpoints('network')['publicURL'], |
95 |
pithos=auth.get_service_endpoints('object-store')['publicURL'], |
96 |
plankton=auth.get_service_endpoints('image')['publicURL'] |
97 |
) |
98 |
user_id = auth.user_info['id'] |
99 |
except ClientError: |
100 |
stderr.write( |
101 |
'Failed to get user id and endpoints from the identity server\n') |
102 |
raise |
103 |
|
104 |
# 4. Pretty print the results |
105 |
stderr.write('Endpoints for user with id %s\n' % user_id) |
106 |
for k, v in endpoints.items(): |
107 |
stderr.write('\t%s:\t%s\n' % (k, v)) |
108 |
|
109 |
The output of this script should look similar to this:: |
110 |
|
111 |
Endpoints for user with id my-us3r-1d-asdf-1234-fd324rt |
112 |
pithos: https://pithos.example.com/object-store/v1 |
113 |
plankton: https://cyclades.example.com/image/v1.0 |
114 |
network: https://cyclades.example.com/network/v2.0 |
115 |
cyclades: https://cyclades.example.com/compute/v2.0 |
116 |
astakos: https://accounts.example.com/identity/v2.0 |
117 |
|
118 |
|
119 |
|
120 |
Upload the image |
121 |
---------------- |
122 |
|
123 |
We assume there is an image file at the current local directory, at |
124 |
*./my_image.diskdump* and we need to upload it to a Pithos+ container. We also |
125 |
assume the contains does not currently exist. We will name it *images*. |
126 |
|
127 |
This is the plan: |
128 |
|
129 |
#. Initialize a Pithos+ client |
130 |
#. Create the container *images* |
131 |
#. Upload the local file to the container |
132 |
|
133 |
.. code-block:: python |
134 |
|
135 |
from os.path import abspath |
136 |
from kamaki.clients.pithos import PithosClient |
137 |
|
138 |
CONTAINER = 'images' |
139 |
IMAGE_FILE = 'my_image.diskdump' |
140 |
|
141 |
|
142 |
# 1. Initialize Pithos+ client and set account to current user |
143 |
try: |
144 |
pithos = PithosClient(endpoints['pithos'], AUTH_TOKEN) |
145 |
except ClientError: |
146 |
stderr.write('Failed to initialize a Pithos+ client\n') |
147 |
raise |
148 |
pithos.account = user_id |
149 |
|
150 |
# 2. Create the container "images" and let pithos client work with that |
151 |
try: |
152 |
pithos.create_container(CONTAINER) |
153 |
except ClientError: |
154 |
stderr.write('Failed to create container %s\n' % CONTAINER) |
155 |
raise |
156 |
pithos.container = CONTAINER |
157 |
|
158 |
# 3. Upload |
159 |
with open(abspath(IMAGE_FILE)) as f: |
160 |
try: |
161 |
stderr.write('This may take a while ...') |
162 |
pithos.upload_object(IMAGE_FILE, f) |
163 |
except ClientError: |
164 |
stderr.write('Failed to upload file %s to container %s\n' % ( |
165 |
IMAGE_FILE, CONTAINER)) |
166 |
raise |
167 |
|
168 |
Register the image |
169 |
------------------ |
170 |
|
171 |
Now the image is located at *pithos://<user_id>/images/my_image.diskdump* |
172 |
and we want to register it to the Plankton *image* service. |
173 |
|
174 |
.. code-block:: python |
175 |
|
176 |
from kamaki.clients.image import ImageClient |
177 |
|
178 |
IMAGE_NAME = 'My image' |
179 |
IMAGE_LOCATION = (user_id, CONTAINER, IMAGE_FILE) |
180 |
|
181 |
# 3.1 Initialize ImageClient |
182 |
try: |
183 |
plankton = ImageClient(endpoints['plankton'], AUTH_TOKEN) |
184 |
except ClientError: |
185 |
stderr.write('Failed to initialize the Image client client\n') |
186 |
raise |
187 |
|
188 |
# 3.2 Register the image |
189 |
properties = dict(osfamily='linux', root_partition='1') |
190 |
try: |
191 |
image = plankton.register(IMAGE_NAME, IMAGE_LOCATION) |
192 |
except ClientError: |
193 |
stderr.write('Failed to register image %s\n' % IMAGE_NAME) |
194 |
raise |
195 |
|
196 |
Create the virtual cluster |
197 |
-------------------------- |
198 |
|
199 |
In order to build a virtual cluster, we need some information: |
200 |
|
201 |
* an image id. We can get them from *image['id']* (the id of the image we |
202 |
have just created) |
203 |
* a hardware flavor. Assume we have picked the flavor with id *42* |
204 |
* a set of names for our virtual servers. We will name them *cluster1*, |
205 |
*cluster2*, etc. |
206 |
|
207 |
Here is the plan: |
208 |
|
209 |
#. Initialize a Cyclades/Compute client |
210 |
#. Create a number of virtual servers. Their name should be prefixed as |
211 |
"cluster" |
212 |
|
213 |
.. code-block:: python |
214 |
|
215 |
# 4. Create virtual cluster |
216 |
from kamaki.clients.cyclades import CycladesClient |
217 |
|
218 |
FLAVOR_ID = 42 |
219 |
IMAGE_ID = image['id'] |
220 |
CLUSTER_SIZE = 2 |
221 |
CLUSTER_PREFIX = 'node' |
222 |
|
223 |
# 4.1 Initialize a cyclades client |
224 |
try: |
225 |
cyclades = CycladesClient(endpoints['cyclades'], AUTH_TOKEN) |
226 |
except ClientError: |
227 |
stderr.write('Failed to initialize cyclades client\n') |
228 |
raise |
229 |
|
230 |
# 4.2 Create 2 servers prefixed as "cluster" |
231 |
servers = [] |
232 |
for i in range(1, CLUSTER_SIZE + 1): |
233 |
server_name = '%s%s' % (CLUSTER_PREFIX, i) |
234 |
try: |
235 |
servers.append(cyclades.create_server( |
236 |
server_name, FLAVOR_ID, IMAGE_ID, networks=[])) |
237 |
except ClientError: |
238 |
stderr.write('Failed while creating server %s\n' % server_name) |
239 |
raise |
240 |
|
241 |
.. note:: the **networks=[]** parameter instructs the service to not connect |
242 |
the server on any networks. |
243 |
|
244 |
Networking |
245 |
---------- |
246 |
|
247 |
There are public and private networks. |
248 |
|
249 |
Public networks are managed by the service administrators. Public IPs, though, |
250 |
can be handled through the API: clients can create (reserve) and destroy |
251 |
(release) IPs from/to the network pool and attach them on their virtual |
252 |
devices. |
253 |
|
254 |
Private networks can be created by clients and they are considered a user |
255 |
resource, limited by user quotas. |
256 |
|
257 |
Ports are the connections between virtual servers and networks. This is the |
258 |
case for IP attachments as well as private network connections. |
259 |
|
260 |
.. code-block:: python |
261 |
|
262 |
# 5.1 Initialize a network client |
263 |
from kamaki.clients.cyclades import CycladesNetworkClient |
264 |
|
265 |
try: |
266 |
network = CycladesNetworkClient(endpoints['network'], AUTH_TOKEN) |
267 |
except ClientError: |
268 |
stderr.write('Failed to initialize network client\n') |
269 |
raise |
270 |
|
271 |
# 5.2 Pick a public network |
272 |
try: |
273 |
public_networks = [ |
274 |
net for net in network.list_networks() if net.get('public')] |
275 |
except ClientError: |
276 |
stderr.write('Failed while listing networks\n') |
277 |
raise |
278 |
try: |
279 |
public_net = public_networks[0] |
280 |
except IndexError: |
281 |
stderr.write('No public networks\n') |
282 |
raise |
283 |
|
284 |
# 5.3 Reserve IPs and attach them on the servers |
285 |
ips = list() |
286 |
for vm in servers: |
287 |
try: |
288 |
ips.append(network.create_floatingip(public_net['id'])) |
289 |
addr = ips[-1]['floating_ip_address'] |
290 |
stderr.write(' Reserved IP %s\n' % addr) |
291 |
|
292 |
network.create_port( |
293 |
public_net['id'], vm['id'], fixed_ips=dict(ip_address=addr)) |
294 |
except ClientError: |
295 |
stderr.write('Failed to attach an IP on virtual server %s\n' % ( |
296 |
vm['id'])) |
297 |
raise |
298 |
|
299 |
# 5.4 Create a private network |
300 |
try: |
301 |
private_net = network.create_network('MAC_FILTERED') |
302 |
except ClientError: |
303 |
stderr.write('Failed to create private network\n') |
304 |
raise |
305 |
|
306 |
# 5.5 Connect server on the private network |
307 |
for vm in servers: |
308 |
try: |
309 |
network.create_port(private_net['id'], vm['id']) |
310 |
except ClientError: |
311 |
stderr.write('Failed to connect server %s on network %s\n' % ( |
312 |
vm['id'], private_net['id'])) |
313 |
raise |
314 |
|
315 |
Some improvements |
316 |
----------------- |
317 |
|
318 |
Progress Bars |
319 |
''''''''''''' |
320 |
|
321 |
Uploading an image might take a while. You can wait patiently, or you can use a |
322 |
progress generator. Even better, combine a generator with the progress bar |
323 |
package that comes with kamaki. The upload_object method accepts two generators |
324 |
as parameters: one for calculating local file hashes and another for uploading |
325 |
|
326 |
.. code-block:: python |
327 |
|
328 |
from progress.bar import Bar |
329 |
|
330 |
def hash_gen(n): |
331 |
bar = Bar('Calculating hashes...') |
332 |
for i in bar.iter(range(int(n))): |
333 |
yield |
334 |
yield |
335 |
|
336 |
def upload_gen(n): |
337 |
bar = Bar('Uploading...') |
338 |
for i in bar.iter(range(int(n))): |
339 |
yield |
340 |
yield |
341 |
|
342 |
... |
343 |
pithos.upload_object( |
344 |
IMAGE_FILE, f, hash_cb=hash_gen, upload_cb=upload_gen) |
345 |
|
346 |
We can create a method to produce progress bar generators, and use it in other |
347 |
methods as well: |
348 |
|
349 |
.. code-block:: python |
350 |
|
351 |
try: |
352 |
from progress.bar import Bar |
353 |
|
354 |
def create_pb(msg): |
355 |
def generator(n): |
356 |
bar=Bar(msg) |
357 |
for i in bar.iter(range(int(n))): |
358 |
yield |
359 |
yield |
360 |
return generator |
361 |
except ImportError: |
362 |
stderr.write('Suggestion: install python-progress\n') |
363 |
def create_pb(msg): |
364 |
return None |
365 |
|
366 |
... |
367 |
pithos.upload_object( |
368 |
IMAGE_FILE, f, |
369 |
hash_cb=create_pb('Calculating hashes...'), |
370 |
upload_cb=create_pb('Uploading...')) |
371 |
|
372 |
Wait for servers to built |
373 |
''''''''''''''''''''''''' |
374 |
|
375 |
When a create_server method is finished successfully, a server is being built. |
376 |
Usually, it takes a while for a server to built. Fortunately, there is a wait |
377 |
method in the kamaki cyclades client. It can use a progress bar too! |
378 |
|
379 |
.. code-block:: python |
380 |
|
381 |
# 4.2 Create 2 servers prefixed as "cluster" |
382 |
... |
383 |
|
384 |
# 4.3 Wait for servers to built |
385 |
for server in servers: |
386 |
st = cyclades.wait_server(server['id']) |
387 |
assert st == 'ACTIVE', 'Server built failed with status %s\n' % st |
388 |
|
389 |
Wait for ports to built |
390 |
''''''''''''''''''''''' |
391 |
|
392 |
A connect (port) may take more than a moment to be created. A wait method can |
393 |
stall the execution of the program until the port built has finished |
394 |
(successfully or with an error). |
395 |
|
396 |
.. code-block:: python |
397 |
|
398 |
# 5.3 Reserve IPs and attach them on the servers |
399 |
... |
400 |
port = network.create_port( |
401 |
public_net['id'], vm['id'], fixed_ips=dict(ip_address=addr)) |
402 |
st = network.wait_port(port['id']) |
403 |
assert st == 'ACTIVE', 'Connection failed with status %s\n' % st |
404 |
|
405 |
Asynchronous server creation |
406 |
'''''''''''''''''''''''''''' |
407 |
|
408 |
In case of a large virtual cluster, it might be faster to spawn the servers |
409 |
with asynchronous requests. Kamaki clients offer an automated mechanism for |
410 |
asynchronous requests. |
411 |
|
412 |
.. code-block:: python |
413 |
|
414 |
# 4.2 Create 2 servers prefixed as "cluster" |
415 |
create_params = [dict( |
416 |
name='%s%s' % (CLUSTER_PREFIX, i), |
417 |
flavor_id=FLAVOR_ID, |
418 |
image_id=IMAGE_ID) for i in range(1, CLUSTER_SIZE + 1)] |
419 |
try: |
420 |
servers = cyclades.async_run(cyclades.create_server, create_params, networks=[]) |
421 |
except ClientError: |
422 |
stderr.write('Failed while creating servers\n') |
423 |
raise |
424 |
|
425 |
Clean up virtual cluster |
426 |
'''''''''''''''''''''''' |
427 |
|
428 |
We need to clean up Cyclades from servers left from previous cluster creations. |
429 |
This clean up will destroy all servers prefixed with "cluster". It will run |
430 |
before the cluster creation: |
431 |
|
432 |
.. code-block:: python |
433 |
|
434 |
# 4.2 Clean up virtual cluster |
435 |
to_delete = [server for server in cyclades.list_servers(detail=True) if ( |
436 |
server['name'].startswith(CLUSTER_PREFIX))] |
437 |
for server in to_delete: |
438 |
cyclades.delete_server(server['id']) |
439 |
for server in to_delete: |
440 |
cyclades.wait_server( |
441 |
server['id'], server['status'], |
442 |
wait_cb=create_pb('Deleting %s...' % server['name'])) |
443 |
|
444 |
# 4.3 Create 2 servers prefixed as "cluster" |
445 |
... |
446 |
|
447 |
Clean up unused networks and IPs |
448 |
'''''''''''''''''''''''''''''''' |
449 |
|
450 |
IPs and private networks are limited resources. This script identifies unused |
451 |
IPs and private networks and destroys them. We know if an IP or private network |
452 |
is being used by checking whether a port (connection) is associated with them. |
453 |
|
454 |
.. code-block:: python |
455 |
|
456 |
unused_ips = [ |
457 |
ip for ip in network.list_floatingips() if not ip['port_id']] |
458 |
|
459 |
for ip in unused_ips: |
460 |
network.delete_floatingip(ip['id']) |
461 |
|
462 |
used_net_ids = set([port['network_id'] for port in network.list_ports()]) |
463 |
unused_nets = [net for net in network.list_ports() if not ( |
464 |
net['public'] or net['id'] in used_net_ids)] |
465 |
|
466 |
for net in unused_nets: |
467 |
network.delete_network(net['id']) |
468 |
|
469 |
Inject ssh keys |
470 |
''''''''''''''' |
471 |
|
472 |
When a server is created, the returned value contains a filed "adminPass". This |
473 |
field can be used to manually log into the server. |
474 |
|
475 |
An easier way is to |
476 |
`inject the ssh keys <../examplesdir/server.html#inject-ssh-keys-to-a-debian-server>`_ |
477 |
of the users who are going to use the virtual servers. |
478 |
|
479 |
Assuming that we have collected the keys in a file named *rsa.pub*, we can |
480 |
inject them into each server, with the personality argument |
481 |
|
482 |
.. code-block:: python |
483 |
|
484 |
SSH_KEYS = 'rsa.pub' |
485 |
|
486 |
... |
487 |
|
488 |
# 4.3 Create 2 servers prefixed as "cluster" |
489 |
personality = [] |
490 |
if SSH_KEYS: |
491 |
with open(abspath(SSH_KEYS)) as f: |
492 |
personality.append(dict( |
493 |
contents=b64encode(f.read()), |
494 |
path='/root/.ssh/authorized_keys', |
495 |
owner='root', group='root', mode=0600) |
496 |
personality.append(dict( |
497 |
contents=b64encode('StrictHostKeyChecking no'), |
498 |
path='/root/.ssh/config', |
499 |
owner='root', group='root', mode=0600)) |
500 |
|
501 |
create_params = [dict( |
502 |
name='%s%s' % (CLUSTER_PREFIX, i), |
503 |
flavor_id=FLAVOR_ID, |
504 |
image_id=IMAGE_ID, |
505 |
personality=personality) for i in range(1, CLUSTER_SIZE + 1)] |
506 |
... |
507 |
|
508 |
Save server passwords in a file |
509 |
''''''''''''''''''''''''''''''' |
510 |
|
511 |
A last touch: define a local file to store the created server information, |
512 |
including the superuser password. |
513 |
|
514 |
.. code-block:: python |
515 |
|
516 |
# 4.4 Store passwords in file |
517 |
SERVER_INFO = 'servers.txt' |
518 |
with open(abspath(SERVER_INFO), 'w+') as f: |
519 |
from json import dump |
520 |
dump(servers, f, intend=2) |
521 |
|
522 |
# 4.5 Wait for 2 servers to built |
523 |
... |
524 |
|
525 |
Errors and logs |
526 |
''''''''''''''' |
527 |
|
528 |
Developers may use the kamaki tools for |
529 |
`error handling <clients-api.html#error-handling>`_ and |
530 |
`logging <logging.html>`_, or implement their own methods. |
531 |
|
532 |
To demonstrate, we will modify the container creation code to warn users if the |
533 |
container already exists. We need a stream logger for the warning and a |
534 |
knowledge of the expected return values for the *create_container* method. |
535 |
|
536 |
First, let's get the logger. |
537 |
|
538 |
.. code-block:: python |
539 |
|
540 |
from kamaki.cli.logger import add_stream_logger, get_logger |
541 |
|
542 |
add_stream_logger(__name__) |
543 |
log = get_logger(__name__) |
544 |
|
545 |
The *create_container* method makes an HTTP request to the pithos server. It |
546 |
considers the request succesfull if the status code of the response is 201 |
547 |
(created) or 202 (accepted). These status codes mean that the container has |
548 |
been created or that it was already there anyway, respectively. |
549 |
|
550 |
We will force *create_container* to raise an error in case of a 202 response. |
551 |
This can be done by instructing *create_container* to accept only 201 as a |
552 |
successful status. |
553 |
|
554 |
.. code-block:: python |
555 |
|
556 |
try: |
557 |
pithos.create_container(CONTAINER, success=(201, )) |
558 |
except ClientError as ce: |
559 |
if ce.status in (202, ): |
560 |
log.warning('Container %s already exists' % CONTAINER') |
561 |
else: |
562 |
log.debug('Failed to create container %s' % CONTAINER) |
563 |
raise |
564 |
log.info('Container %s is ready' % CONTAINER) |
565 |
|
566 |
create a cluster from scratch |
567 |
----------------------------- |
568 |
|
569 |
We are ready to create a module that uses kamaki to create a cluster from |
570 |
scratch. We revised the code by grouping functionality in methods and using |
571 |
logging more. We also added some command line interaction candy. |
572 |
|
573 |
.. code-block:: python |
574 |
|
575 |
from sys import argv |
576 |
from os.path import abspath |
577 |
from base64 import b64encode |
578 |
from kamaki.clients import ClientError |
579 |
from kamaki.cli.logger import get_logger, add_file_logger |
580 |
from progress.bar import Bar |
581 |
from logging import DEBUG |
582 |
|
583 |
# Define loggers |
584 |
log = get_logger(__name__) |
585 |
add_file_logger('kamaki.clients', DEBUG, '%s.log' % __name__) |
586 |
add_file_logger(__name__, DEBUG, '%s.log' % __name__) |
587 |
|
588 |
# Create progress bar generator |
589 |
|
590 |
|
591 |
def create_pb(msg): |
592 |
def generator(n): |
593 |
bar = Bar(msg) |
594 |
for i in bar.iter(range(int(n))): |
595 |
yield |
596 |
yield |
597 |
return generator |
598 |
|
599 |
|
600 |
# kamaki.config |
601 |
# Identity,Account / Astakos |
602 |
|
603 |
def init_astakos(): |
604 |
from kamaki.clients.astakos import AstakosClient |
605 |
from kamaki.cli.config import Config, CONFIG_PATH |
606 |
|
607 |
print(' Get the credentials') |
608 |
cnf = Config() |
609 |
|
610 |
# Get default cloud name |
611 |
try: |
612 |
cloud_name = cnf.get('global', 'default_cloud') |
613 |
except KeyError: |
614 |
log.debug('No default cloud set in file %' % CONFIG_PATH) |
615 |
raise |
616 |
|
617 |
try: |
618 |
AUTH_URL = cnf.get_cloud(cloud_name, 'url') |
619 |
except KeyError: |
620 |
log.debug('No authentication URL in cloud %s' % cloud_name) |
621 |
raise |
622 |
try: |
623 |
AUTH_TOKEN = cnf.get_cloud(cloud_name, 'token') |
624 |
except KeyError: |
625 |
log.debug('No token in cloud %s' % cloud_name) |
626 |
raise |
627 |
|
628 |
print(' Test the credentials') |
629 |
try: |
630 |
auth = AstakosClient(AUTH_URL, AUTH_TOKEN) |
631 |
auth.authenticate() |
632 |
except ClientError: |
633 |
log.debug('Athentication failed with url %s and token %s' % ( |
634 |
AUTH_URL, AUTH_TOKEN)) |
635 |
raise |
636 |
|
637 |
return auth, AUTH_TOKEN |
638 |
|
639 |
|
640 |
def endpoints_and_user_id(auth): |
641 |
print(' Get the endpoints') |
642 |
try: |
643 |
endpoints = dict( |
644 |
astakos=auth.get_service_endpoints('identity')['publicURL'], |
645 |
cyclades=auth.get_service_endpoints('compute')['publicURL'], |
646 |
network=auth.get_service_endpoints('network')['publicURL'], |
647 |
pithos=auth.get_service_endpoints('object-store')['publicURL'], |
648 |
plankton=auth.get_service_endpoints('image')['publicURL'] |
649 |
) |
650 |
user_id = auth.user_info['id'] |
651 |
except ClientError: |
652 |
print('Failed to get endpoints & user_id from identity server') |
653 |
raise |
654 |
return endpoints, user_id |
655 |
|
656 |
|
657 |
# Object-store / Pithos+ |
658 |
|
659 |
def init_pithos(endpoint, token, user_id): |
660 |
from kamaki.clients.pithos import PithosClient |
661 |
|
662 |
print(' Initialize Pithos+ client and set account to user uuid') |
663 |
try: |
664 |
return PithosClient(endpoint, token, user_id) |
665 |
except ClientError: |
666 |
log.debug('Failed to initialize a Pithos+ client') |
667 |
raise |
668 |
|
669 |
|
670 |
def upload_image(pithos, container, image_path): |
671 |
|
672 |
print(' Create the container "images" and use it') |
673 |
try: |
674 |
pithos.create_container(container, success=(201, )) |
675 |
except ClientError as ce: |
676 |
if ce.status in (202, ): |
677 |
log.warning('Container %s already exists' % container) |
678 |
else: |
679 |
log.debug('Failed to create container %s' % container) |
680 |
raise |
681 |
pithos.container = container |
682 |
|
683 |
print(' Upload to "images"') |
684 |
with open(abspath(image_path)) as f: |
685 |
try: |
686 |
pithos.upload_object( |
687 |
image_path, f, |
688 |
hash_cb=create_pb(' Calculating hashes...'), |
689 |
upload_cb=create_pb(' Uploading...')) |
690 |
except ClientError: |
691 |
log.debug('Failed to upload file %s to container %s' % ( |
692 |
image_path, container)) |
693 |
raise |
694 |
|
695 |
|
696 |
# Image / Plankton |
697 |
|
698 |
def init_plankton(endpoint, token): |
699 |
from kamaki.clients.image import ImageClient |
700 |
|
701 |
print(' Initialize ImageClient') |
702 |
try: |
703 |
return ImageClient(endpoint, token) |
704 |
except ClientError: |
705 |
log.debug('Failed to initialize the Image client') |
706 |
raise |
707 |
|
708 |
|
709 |
def register_image(plankton, name, user_id, container, path, properties): |
710 |
|
711 |
image_location = (user_id, container, path) |
712 |
print(' Register the image') |
713 |
try: |
714 |
return plankton.register(name, image_location, properties) |
715 |
except ClientError: |
716 |
log.debug('Failed to register image %s' % name) |
717 |
raise |
718 |
|
719 |
|
720 |
def init_network(endpoint, token): |
721 |
from kamaki.clients.cyclades import CycladesNetworkClient |
722 |
|
723 |
print(' Initialize a network client') |
724 |
try: |
725 |
return CycladesNetworkClient(endpoint, token) |
726 |
except ClientError: |
727 |
log.debug('Failed to initialize a network Client') |
728 |
raise |
729 |
|
730 |
|
731 |
def connect_servers(network, servers): |
732 |
print 'Create a private network' |
733 |
try: |
734 |
net = network.create_network('MAC_FILTERED', 'A private network') |
735 |
except ClientError: |
736 |
log.debug('Failed to create a private network') |
737 |
raise |
738 |
|
739 |
for vm in servers: |
740 |
port = network.create_port(net['id'], vm['id']) |
741 |
msg = 'Connection server %s to network %s' % (vm['id'], net['id']) |
742 |
network.wait_port(port['id'], wait_cb=create_pb(msg)) |
743 |
|
744 |
|
745 |
# Compute / Cyclades |
746 |
|
747 |
def init_cyclades(endpoint, token): |
748 |
from kamaki.clients.cyclades import CycladesClient |
749 |
|
750 |
print(' Initialize a cyclades client') |
751 |
try: |
752 |
return CycladesClient(endpoint, token) |
753 |
except ClientError: |
754 |
log.debug('Failed to initialize cyclades client') |
755 |
raise |
756 |
|
757 |
|
758 |
class Cluster(object): |
759 |
|
760 |
def __init__(self, cyclades, prefix, flavor_id, image_id, size): |
761 |
self.client = cyclades |
762 |
self.prefix, self.size = prefix, int(size) |
763 |
self.flavor_id, self.image_id = flavor_id, image_id |
764 |
|
765 |
def list(self): |
766 |
return [s for s in self.client.list_servers(detail=True) if ( |
767 |
s['name'].startswith(self.prefix))] |
768 |
|
769 |
def clean_up(self): |
770 |
to_delete = self.list() |
771 |
print(' There are %s servers to clean up' % len(to_delete)) |
772 |
for server in to_delete: |
773 |
self.client.delete_server(server['id']) |
774 |
for server in to_delete: |
775 |
self.client.wait_server( |
776 |
server['id'], server['status'], |
777 |
wait_cb=create_pb(' Deleting %s...' % server['name'])) |
778 |
|
779 |
def _personality(self, ssh_keys_path='', pub_keys_path=''): |
780 |
personality = [] |
781 |
if ssh_keys_path: |
782 |
with open(abspath(ssh_keys_path)) as f: |
783 |
personality.append(dict( |
784 |
contents=b64encode(f.read()), |
785 |
path='/root/.ssh/id_rsa', |
786 |
owner='root', group='root', mode=0600)) |
787 |
if pub_keys_path: |
788 |
with open(abspath(pub_keys_path)) as f: |
789 |
personality.append(dict( |
790 |
contents=b64encode(f.read()), |
791 |
path='/root/.ssh/authorized_keys', |
792 |
owner='root', group='root', mode=0600)) |
793 |
if ssh_keys_path or pub_keys_path: |
794 |
personality.append(dict( |
795 |
contents=b64encode('StrictHostKeyChecking no'), |
796 |
path='/root/.ssh/config', |
797 |
owner='root', group='root', mode=0600)) |
798 |
return personality |
799 |
|
800 |
def create(self, ssh_k_path='', pub_k_path='', server_log_path=''): |
801 |
print('\n Create %s servers prefixed as %s' % ( |
802 |
self.size, self.prefix)) |
803 |
servers = [] |
804 |
for i in range(1, self.size + 1): |
805 |
try: |
806 |
server_name = '%s%s' % (self.prefix, i) |
807 |
|
808 |
servers.append(self.client.create_server( |
809 |
server_name, self.flavor_id, self.image_id, |
810 |
networks=[], |
811 |
personality=self._personality(ssh_k_path, pub_k_path))) |
812 |
except ClientError: |
813 |
log.debug('Failed while creating server %s' % server_name) |
814 |
raise |
815 |
|
816 |
if server_log_path: |
817 |
print(' Store passwords in file %s' % server_log_path) |
818 |
with open(abspath(server_log_path), 'w+') as f: |
819 |
from json import dump |
820 |
dump(servers, f, indent=2) |
821 |
|
822 |
print(' Wait for %s servers to built' % self.size) |
823 |
for server in servers: |
824 |
new_status = self.client.wait_server( |
825 |
server['id'], |
826 |
wait_cb=create_pb(' Creating %s...' % server['name'])) |
827 |
print(' Status for server %s is %s' % ( |
828 |
server['name'], new_status or 'not changed yet')) |
829 |
return servers |
830 |
|
831 |
|
832 |
def main(opts): |
833 |
|
834 |
print('1. Credentials and Endpoints') |
835 |
auth, token = init_astakos() |
836 |
endpoints, user_id = endpoints_and_user_id(auth) |
837 |
|
838 |
print('2. Upload the image file') |
839 |
pithos = init_pithos(endpoints['pithos'], token, user_id) |
840 |
|
841 |
upload_image(pithos, opts.container, opts.imagefile) |
842 |
|
843 |
print('3. Register the image') |
844 |
plankton = init_plankton(endpoints['plankton'], token) |
845 |
|
846 |
image = register_image( |
847 |
plankton, 'my image', user_id, opts.container, opts.imagefile, |
848 |
properties=dict( |
849 |
osfamily=opts.osfamily, root_partition=opts.rootpartition)) |
850 |
|
851 |
print('4. Create virtual cluster') |
852 |
cluster = Cluster( |
853 |
cyclades=init_cyclades(endpoints['cyclades'], token), |
854 |
prefix=opts.prefix, |
855 |
flavor_id=opts.flavorid, |
856 |
image_id=image['id'], |
857 |
size=opts.clustersize) |
858 |
if opts.delete_stale: |
859 |
cluster.clean_up() |
860 |
servers = cluster.create( |
861 |
opts.sshkeypath, opts.pubkeypath, opts.serverlogpath) |
862 |
|
863 |
# Group servers |
864 |
cluster_servers = cluster.list() |
865 |
|
866 |
active = [s for s in cluster_servers if s['status'] == 'ACTIVE'] |
867 |
print('%s cluster servers are ACTIVE' % len(active)) |
868 |
|
869 |
attached = [s for s in cluster_servers if s['attachments']] |
870 |
print('%s cluster servers are attached to networks' % len(attached)) |
871 |
|
872 |
build = [s for s in cluster_servers if s['status'] == 'BUILD'] |
873 |
print('%s cluster servers are being built' % len(build)) |
874 |
|
875 |
error = [s for s in cluster_servers if s['status'] in ('ERROR')] |
876 |
print('%s cluster servers failed (ERROR satus)' % len(error)) |
877 |
|
878 |
|
879 |
if __name__ == '__main__': |
880 |
|
881 |
# Add some interaction candy |
882 |
from optparse import OptionParser |
883 |
|
884 |
kw = {} |
885 |
kw['usage'] = '%prog [options]' |
886 |
kw['description'] = '%prog deploys a compute cluster on Synnefo w. kamaki' |
887 |
|
888 |
parser = OptionParser(**kw) |
889 |
parser.disable_interspersed_args() |
890 |
parser.add_option('--prefix', |
891 |
action='store', type='string', dest='prefix', |
892 |
help='The prefix to use for naming cluster nodes', |
893 |
default='node') |
894 |
parser.add_option('--clustersize', |
895 |
action='store', type='string', dest='clustersize', |
896 |
help='Number of virtual cluster nodes to create ', |
897 |
default=2) |
898 |
parser.add_option('--flavor-id', |
899 |
action='store', type='int', dest='flavorid', |
900 |
metavar='FLAVOR ID', |
901 |
help='Choose flavor id for the virtual hardware ' |
902 |
'of cluster nodes', |
903 |
default=42) |
904 |
parser.add_option('--image-file', |
905 |
action='store', type='string', dest='imagefile', |
906 |
metavar='IMAGE FILE PATH', |
907 |
help='The image file to upload and register ', |
908 |
default='my_image.diskdump') |
909 |
parser.add_option('--delete-stale', |
910 |
action='store_true', dest='delete_stale', |
911 |
help='Delete stale servers from previous runs, whose ' |
912 |
'name starts with the specified prefix, see ' |
913 |
'--prefix', |
914 |
default=False) |
915 |
parser.add_option('--container', |
916 |
action='store', type='string', dest='container', |
917 |
metavar='PITHOS+ CONTAINER', |
918 |
help='The Pithos+ container to store image file', |
919 |
default='images') |
920 |
parser.add_option('--ssh-key-path', |
921 |
action='store', type='string', dest='sshkeypath', |
922 |
metavar='PATH OF SSH KEYS', |
923 |
help='The ssh keys to inject to server (e.g., id_rsa) ', |
924 |
default='') |
925 |
parser.add_option('--pub-key-path', |
926 |
action='store', type='string', dest='pubkeypath', |
927 |
metavar='PATH OF PUBLIC KEYS', |
928 |
help='The public keys to inject to server', |
929 |
default='') |
930 |
parser.add_option('--server-log-path', |
931 |
action='store', type='string', dest='serverlogpath', |
932 |
metavar='FILE TO LOG THE VIRTUAL SERVERS', |
933 |
help='Where to store information on created servers ' |
934 |
'including superuser passwords', |
935 |
default='') |
936 |
parser.add_option('--image-osfamily', |
937 |
action='store', type='string', dest='osfamily', |
938 |
metavar='OS FAMILY', |
939 |
help='linux, windows, etc.', |
940 |
default='linux') |
941 |
parser.add_option('--image-root-partition', |
942 |
action='store', type='string', dest='rootpartition', |
943 |
metavar='IMAGE ROOT PARTITION', |
944 |
help='The partition where the root home is ', |
945 |
default='1') |
946 |
|
947 |
opts, args = parser.parse_args(argv[1:]) |
948 |
|
949 |
main(opts) |