Revision bda47e03
b/snf-cyclades-app/conf/20-snf-cyclades-app-api.conf | ||
---|---|---|
13 | 13 |
## parameter refers to a point in time more than POLL_LIMIT seconds ago. |
14 | 14 |
#POLL_LIMIT = 3600 |
15 | 15 |
# |
16 |
## Astakos groups that have access to '/admin' views. |
|
17 |
#ADMIN_STATS_PERMITTED_GROUPS = ["admin-stats"] |
|
18 |
# |
|
16 | 19 |
## |
17 | 20 |
## Network Configuration |
18 | 21 |
## |
b/snf-cyclades-app/synnefo/admin/stats.py | ||
---|---|---|
1 |
# Copyright 2013 GRNET S.A. All rights reserved. |
|
2 |
# |
|
3 |
# Redistribution and use in source and binary forms, with or |
|
4 |
# without modification, are permitted provided that the following |
|
5 |
# conditions are met: |
|
6 |
# |
|
7 |
# 1. Redistributions of source code must retain the above |
|
8 |
# copyright notice, this list of conditions and the following |
|
9 |
# disclaimer. |
|
10 |
# |
|
11 |
# 2. Redistributions in binary form must reproduce the above |
|
12 |
# copyright notice, this list of conditions and the following |
|
13 |
# disclaimer in the documentation and/or other materials |
|
14 |
# provided with the distribution. |
|
15 |
# |
|
16 |
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS |
|
17 |
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
18 |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
19 |
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR |
|
20 |
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
21 |
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
22 |
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
|
23 |
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
|
24 |
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
25 |
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
|
26 |
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
27 |
# POSSIBILITY OF SUCH DAMAGE. |
|
28 |
# |
|
29 |
# The views and conclusions contained in the software and |
|
30 |
# documentation are those of the authors and should not be |
|
31 |
# interpreted as representing official policies, either expressed |
|
32 |
# or implied, of GRNET S.A. |
|
33 |
|
|
34 |
|
|
35 |
import itertools |
|
36 |
import operator |
|
37 |
import datetime |
|
38 |
|
|
39 |
from collections import defaultdict # , OrderedDict |
|
40 |
from copy import copy |
|
41 |
from django.conf import settings |
|
42 |
from django.db.models import Count, Sum |
|
43 |
|
|
44 |
from snf_django.lib.astakos import UserCache |
|
45 |
from synnefo.db.models import VirtualMachine, Network, Backend |
|
46 |
from synnefo.api.util import get_image |
|
47 |
from synnefo.logic import backend as backend_mod |
|
48 |
|
|
49 |
|
|
50 |
def get_cyclades_stats(backend=None, clusters=True, servers=True, |
|
51 |
resources=True, networks=True, images=True): |
|
52 |
stats = {"datetime": datetime.datetime.now().strftime("%c")} |
|
53 |
if clusters: |
|
54 |
stats["clusters"] = get_cluster_stats(backend=backend) |
|
55 |
if servers: |
|
56 |
stats["servers"] = get_servers_stats(backend=backend) |
|
57 |
if resources: |
|
58 |
stats["resources"] = get_resources_stats(backend=backend) |
|
59 |
if networks: |
|
60 |
stats["networks"] = get_networks_stats() |
|
61 |
if images: |
|
62 |
stats["images"] = get_images_stats(backend=None) |
|
63 |
return stats |
|
64 |
|
|
65 |
|
|
66 |
def get_cluster_stats(backend): |
|
67 |
total = Backend.objects.all() |
|
68 |
stats = {"total": total.count(), |
|
69 |
"drained": total.filter(drained=True).count(), |
|
70 |
"offline": total.filter(offline=True).count()} |
|
71 |
return stats |
|
72 |
|
|
73 |
|
|
74 |
def _get_total_servers(backend=None): |
|
75 |
total_servers = VirtualMachine.objects.all() |
|
76 |
if backend is not None: |
|
77 |
total_servers = total_servers.filter(backend=backend) |
|
78 |
return total_servers |
|
79 |
|
|
80 |
|
|
81 |
def get_servers_stats(backend=None): |
|
82 |
total_servers = _get_total_servers(backend=backend) |
|
83 |
per_state = total_servers.values("operstate")\ |
|
84 |
.annotate(count=Count("operstate")) |
|
85 |
stats = {"total": 0} |
|
86 |
[stats.setdefault(s[0], 0) for s in VirtualMachine.OPER_STATES] |
|
87 |
for x in per_state: |
|
88 |
stats[x["operstate"]] = x["count"] |
|
89 |
stats["total"] += x["count"] |
|
90 |
return stats |
|
91 |
|
|
92 |
|
|
93 |
def get_resources_stats(backend=None): |
|
94 |
total_servers = _get_total_servers(backend=backend) |
|
95 |
active_servers = total_servers.filter(deleted=False) |
|
96 |
|
|
97 |
allocated = {} |
|
98 |
server_count = {} |
|
99 |
for res in ["cpu", "ram", "disk", "disk_template"]: |
|
100 |
server_count[res] = {} |
|
101 |
allocated[res] = 0 |
|
102 |
val = "flavor__%s" % res |
|
103 |
results = active_servers.values(val).annotate(count=Count(val)) |
|
104 |
for result in results: |
|
105 |
server_count[res][result[val]] = result["count"] |
|
106 |
if res != "disk_template": |
|
107 |
allocated[res] += result["count"] |
|
108 |
|
|
109 |
resources_stats = get_backend_stats(backend=backend) |
|
110 |
for res in ["cpu", "ram", "disk", "disk_template"]: |
|
111 |
if res not in resources_stats: |
|
112 |
resources_stats[res] = {} |
|
113 |
resources_stats[res]["servers"] = server_count[res] |
|
114 |
resources_stats[res]["allocated"] = allocated[res] |
|
115 |
|
|
116 |
return resources_stats |
|
117 |
|
|
118 |
|
|
119 |
def get_images_stats(backend=None): |
|
120 |
total_servers = _get_total_servers(backend=backend) |
|
121 |
active_servers = total_servers.filter(deleted=False) |
|
122 |
|
|
123 |
active_servers_images = active_servers.values("imageid", "userid")\ |
|
124 |
.annotate(number=Count("imageid")) |
|
125 |
image_cache = ImageCache() |
|
126 |
image_stats = defaultdict(int) |
|
127 |
for result in active_servers_images: |
|
128 |
imageid = image_cache.get_image(result["imageid"], result["userid"]) |
|
129 |
image_stats[imageid] += result["number"] |
|
130 |
return dict(image_stats) |
|
131 |
|
|
132 |
|
|
133 |
def get_networks_stats(): |
|
134 |
total_networks = Network.objects.all() |
|
135 |
stats = {"public_ips": get_ip_stats(), |
|
136 |
"total": 0} |
|
137 |
per_state = total_networks.values("state")\ |
|
138 |
.annotate(count=Count("state")) |
|
139 |
[stats.setdefault(s[0], 0) for s in Network.OPER_STATES] |
|
140 |
for x in per_state: |
|
141 |
stats[x["state"]] = x["count"] |
|
142 |
stats["total"] += x["count"] |
|
143 |
return stats |
|
144 |
|
|
145 |
|
|
146 |
def group_by_resource(objects, resource): |
|
147 |
stats = {} |
|
148 |
key = operator.attrgetter("flavor."+resource) |
|
149 |
grouped = itertools.groupby(sorted(objects, key=key), key) |
|
150 |
for val, group in grouped: |
|
151 |
stats[val] = len(list(group)) |
|
152 |
return stats |
|
153 |
|
|
154 |
|
|
155 |
def get_ip_stats(): |
|
156 |
total, free = 0, 0, |
|
157 |
for network in Network.objects.filter(public=True, deleted=False): |
|
158 |
try: |
|
159 |
net_total, net_free = network.ip_count() |
|
160 |
except AttributeError: |
|
161 |
# TODO: Check that this works.. |
|
162 |
pool = network.get_pool(locked=False) |
|
163 |
net_total = pool.pool_size |
|
164 |
net_free = pool.count_available() |
|
165 |
if not network.drained: |
|
166 |
total += net_total |
|
167 |
free += net_free |
|
168 |
return {"total": total, |
|
169 |
"free": free} |
|
170 |
|
|
171 |
|
|
172 |
def get_backend_stats(backend=None): |
|
173 |
if backend is None: |
|
174 |
backends = Backend.objects.filter(offline=False) |
|
175 |
else: |
|
176 |
if backend.offline: |
|
177 |
return {} |
|
178 |
backends = [backend] |
|
179 |
[backend_mod.update_backend_resources(b) for b in backends] |
|
180 |
resources = {} |
|
181 |
for attr in ("dfree", "dtotal", "mfree", "mtotal", "ctotal"): |
|
182 |
resources[attr] = 0 |
|
183 |
for b in backends: |
|
184 |
resources[attr] += getattr(b, attr) |
|
185 |
|
|
186 |
return {"disk": {"free": resources["dfree"], "total": resources["dtotal"]}, |
|
187 |
"ram": {"free": resources["mfree"], "total": resources["mtotal"]}, |
|
188 |
"cpu": {"free": resources["ctotal"], "total": resources["ctotal"]}, |
|
189 |
"disk_template": {"free": 0, "total": 0}} |
|
190 |
|
|
191 |
|
|
192 |
class ImageCache(object): |
|
193 |
def __init__(self): |
|
194 |
self.images = {} |
|
195 |
usercache = UserCache(settings.ASTAKOS_BASE_URL, |
|
196 |
settings.CYCLADES_SERVICE_TOKEN) |
|
197 |
self.system_user_uuid = \ |
|
198 |
usercache.get_uuid(settings.SYSTEM_IMAGES_OWNER) |
|
199 |
|
|
200 |
def get_image(self, imageid, userid): |
|
201 |
if not imageid in self.images: |
|
202 |
try: |
|
203 |
image = get_image(imageid, userid) |
|
204 |
owner = image["owner"] |
|
205 |
owner = "system" if image["owner"] == self.system_user_uuid\ |
|
206 |
else "user" |
|
207 |
self.images[imageid] = owner + ":" + image["name"] |
|
208 |
except Exception: |
|
209 |
self.images[imageid] = "unknown:unknown" |
|
210 |
|
|
211 |
return self.images[imageid] |
|
212 |
|
|
213 |
|
|
214 |
def get_public_stats(): |
|
215 |
# VirtualMachines |
|
216 |
vm_objects = VirtualMachine.objects |
|
217 |
servers = vm_objects.values("deleted", "operstate")\ |
|
218 |
.annotate(count=Count("id"), |
|
219 |
cpu=Sum("flavor__cpu"), |
|
220 |
ram=Sum("flavor__ram"), |
|
221 |
disk=Sum("flavor__disk")) |
|
222 |
zero_stats = {"count": 0, "cpu": 0, "ram": 0, "disk": 0} |
|
223 |
server_stats = {} |
|
224 |
for state in VirtualMachine.RSAPI_STATE_FROM_OPER_STATE.values(): |
|
225 |
server_stats[state] = copy(zero_stats) |
|
226 |
|
|
227 |
for stats in servers: |
|
228 |
deleted = stats.pop("deleted") |
|
229 |
operstate = stats.pop("operstate") |
|
230 |
state = VirtualMachine.RSAPI_STATE_FROM_OPER_STATE.get(operstate) |
|
231 |
if deleted: |
|
232 |
for key in zero_stats.keys(): |
|
233 |
server_stats["DELETED"][key] += stats.get(key, 0) |
|
234 |
elif state: |
|
235 |
for key in zero_stats.keys(): |
|
236 |
server_stats[state][key] += stats.get(key, 0) |
|
237 |
|
|
238 |
#Networks |
|
239 |
net_objects = Network.objects |
|
240 |
networks = net_objects.values("deleted", "state")\ |
|
241 |
.annotate(count=Count("id")) |
|
242 |
zero_stats = {"count": 0} |
|
243 |
network_stats = {} |
|
244 |
for state in Network.RSAPI_STATE_FROM_OPER_STATE.values(): |
|
245 |
network_stats[state] = copy(zero_stats) |
|
246 |
|
|
247 |
for stats in networks: |
|
248 |
deleted = stats.pop("deleted") |
|
249 |
state = stats.pop("state") |
|
250 |
state = Network.RSAPI_STATE_FROM_OPER_STATE.get(state) |
|
251 |
if deleted: |
|
252 |
for key in zero_stats.keys(): |
|
253 |
network_stats["DELETED"][key] += stats.get(key, 0) |
|
254 |
elif state: |
|
255 |
for key in zero_stats.keys(): |
|
256 |
network_stats[state][key] += stats.get(key, 0) |
|
257 |
|
|
258 |
statistics = {"servers": server_stats, |
|
259 |
"networks": network_stats} |
|
260 |
return statistics |
b/snf-cyclades-app/synnefo/admin/urls.py | ||
---|---|---|
40 | 40 |
def index(request): |
41 | 41 |
raise Http404 |
42 | 42 |
|
43 |
|
|
43 | 44 |
urlpatterns = patterns( |
44 | 45 |
'', |
45 | 46 |
url(r'^$', index), |
46 |
url(r'^stats$', views.get_stats), |
|
47 |
url(r'^stats$', views.get_public_stats), |
|
48 |
url(r'^stats/detail$', views.get_cyclades_stats), |
|
47 | 49 |
) |
b/snf-cyclades-app/synnefo/admin/views.py | ||
---|---|---|
34 | 34 |
import logging |
35 | 35 |
from django import http |
36 | 36 |
from django.utils import simplejson as json |
37 |
from synnefo.db.models import VirtualMachine, Network |
|
38 |
from django.db.models import Count, Sum |
|
37 |
from django.conf import settings |
|
39 | 38 |
from snf_django.lib import api |
40 |
from copy import copy |
|
41 | 39 |
|
40 |
from synnefo.admin import stats |
|
42 | 41 |
|
43 |
log = logging.getLogger(__name__) |
|
42 |
logger = logging.getLogger(__name__)
|
|
44 | 43 |
|
45 | 44 |
|
46 | 45 |
@api.api_method(http_method='GET', user_required=False, token_required=False, |
47 |
logger=log, serializations=['json']) |
|
46 |
logger=logger, serializations=['json'])
|
|
48 | 47 |
@api.allow_jsonp() |
49 |
def get_stats(request): |
|
50 |
stats = get_statistics()
|
|
51 |
data = json.dumps(stats) |
|
48 |
def get_public_stats(request):
|
|
49 |
_stats = stats.get_public_stats()
|
|
50 |
data = json.dumps(_stats)
|
|
52 | 51 |
return http.HttpResponse(data, status=200, content_type='application/json') |
53 | 52 |
|
54 | 53 |
|
55 |
def get_statistics(): |
|
56 |
# VirtualMachines |
|
57 |
vm_objects = VirtualMachine.objects |
|
58 |
servers = vm_objects.values("deleted", "operstate")\ |
|
59 |
.annotate(count=Count("id"), |
|
60 |
cpu=Sum("flavor__cpu"), |
|
61 |
ram=Sum("flavor__ram"), |
|
62 |
disk=Sum("flavor__disk")) |
|
63 |
zero_stats = {"count": 0, "cpu": 0, "ram": 0, "disk": 0} |
|
64 |
server_stats = {} |
|
65 |
for state in VirtualMachine.RSAPI_STATE_FROM_OPER_STATE.values(): |
|
66 |
server_stats[state] = copy(zero_stats) |
|
67 |
|
|
68 |
for stats in servers: |
|
69 |
deleted = stats.pop("deleted") |
|
70 |
operstate = stats.pop("operstate") |
|
71 |
state = VirtualMachine.RSAPI_STATE_FROM_OPER_STATE.get(operstate) |
|
72 |
if deleted: |
|
73 |
for key in zero_stats.keys(): |
|
74 |
server_stats["DELETED"][key] += stats.get(key, 0) |
|
75 |
elif state: |
|
76 |
for key in zero_stats.keys(): |
|
77 |
server_stats[state][key] += stats.get(key, 0) |
|
78 |
|
|
79 |
#Networks |
|
80 |
net_objects = Network.objects |
|
81 |
networks = net_objects.values("deleted", "state")\ |
|
82 |
.annotate(count=Count("id")) |
|
83 |
zero_stats = {"count": 0} |
|
84 |
network_stats = {} |
|
85 |
for state in Network.RSAPI_STATE_FROM_OPER_STATE.values(): |
|
86 |
network_stats[state] = copy(zero_stats) |
|
87 |
|
|
88 |
for stats in networks: |
|
89 |
deleted = stats.pop("deleted") |
|
90 |
state = stats.pop("state") |
|
91 |
state = Network.RSAPI_STATE_FROM_OPER_STATE.get(state) |
|
92 |
if deleted: |
|
93 |
for key in zero_stats.keys(): |
|
94 |
network_stats["DELETED"][key] += stats.get(key, 0) |
|
95 |
elif state: |
|
96 |
for key in zero_stats.keys(): |
|
97 |
network_stats[state][key] += stats.get(key, 0) |
|
98 |
|
|
99 |
statistics = {"servers": server_stats, |
|
100 |
"networks": network_stats} |
|
101 |
return statistics |
|
54 |
@api.api_method(http_method='GET', user_required=True, token_required=True, |
|
55 |
logger=logger, serializations=['json']) |
|
56 |
@api.user_in_groups(permitted_groups=settings.ADMIN_STATS_PERMITTED_GROUPS, |
|
57 |
logger=logger) |
|
58 |
def get_cyclades_stats(request): |
|
59 |
_stats = stats.get_cyclades_stats(backend=None, clusters=True, |
|
60 |
servers=True, resources=True, |
|
61 |
networks=True, images=True) |
|
62 |
data = json.dumps(_stats) |
|
63 |
return http.HttpResponse(data, status=200, content_type='application/json') |
b/snf-cyclades-app/synnefo/app_settings/default/api.py | ||
---|---|---|
13 | 13 |
# parameter refers to a point in time more than POLL_LIMIT seconds ago. |
14 | 14 |
POLL_LIMIT = 3600 |
15 | 15 |
|
16 |
# Astakos groups that have access to '/admin' views. |
|
17 |
ADMIN_STATS_PERMITTED_GROUPS = ["admin-stats"] |
|
18 |
|
|
16 | 19 |
# |
17 | 20 |
# Network Configuration |
18 | 21 |
# |
b/snf-cyclades-app/synnefo/logic/management/commands/stats-cyclades.py | ||
---|---|---|
32 | 32 |
# or implied, of GRNET S.A. |
33 | 33 |
|
34 | 34 |
|
35 |
import itertools |
|
36 |
import operator |
|
37 |
import datetime |
|
38 | 35 |
import json |
39 | 36 |
import string |
40 | 37 |
|
41 | 38 |
from optparse import make_option |
42 | 39 |
|
43 |
from collections import defaultdict # , OrderedDict |
|
44 |
from django.conf import settings |
|
45 |
from django.db.models import Count |
|
46 | 40 |
from snf_django.management.utils import pprint_table, parse_bool |
47 | 41 |
|
48 |
from synnefo.db.models import Backend |
|
49 | 42 |
from snf_django.management.commands import SynnefoCommand, CommandError |
50 |
from snf_django.lib.astakos import UserCache |
|
51 |
from synnefo.api.util import get_image |
|
52 |
from synnefo.db.models import VirtualMachine, Network |
|
53 |
from synnefo.logic import backend as backend_mod |
|
54 | 43 |
from synnefo.management.common import get_backend |
44 |
from synnefo.admin import stats as statistics |
|
55 | 45 |
|
56 | 46 |
|
57 | 47 |
class Command(SynnefoCommand): |
... | ... | |
107 | 97 |
networks = parse_bool(options["networks"]) |
108 | 98 |
images = parse_bool(options["images"]) |
109 | 99 |
|
110 |
stats = get_cyclades_stats(backend, clusters, servers, resources,
|
|
111 |
networks, images) |
|
100 |
stats = statistics.get_cyclades_stats(backend, clusters, servers,
|
|
101 |
resources, networks, images)
|
|
112 | 102 |
|
113 | 103 |
output_format = options["output_format"] |
114 | 104 |
if output_format == "json": |
... | ... | |
120 | 110 |
output_format) |
121 | 111 |
|
122 | 112 |
|
123 |
def get_cyclades_stats(backend=None, clusters=True, servers=True, |
|
124 |
resources=True, networks=True, images=True): |
|
125 |
stats = {"datetime": datetime.datetime.now().strftime("%c")} |
|
126 |
if clusters: |
|
127 |
stats["clusters"] = get_cluster_stats(backend=backend) |
|
128 |
if servers: |
|
129 |
stats["servers"] = get_servers_stats(backend=backend) |
|
130 |
if resources: |
|
131 |
stats["resources"] = get_resources_stats(backend=backend) |
|
132 |
if networks: |
|
133 |
stats["networks"] = get_networks_stats() |
|
134 |
if images: |
|
135 |
stats["images"] = get_images_stats(backend=None) |
|
136 |
return stats |
|
137 |
|
|
138 |
|
|
139 | 113 |
def columns_from_fields(fields, values): |
140 | 114 |
return zip(map(string.lower, fields), [values.get(f, 0) for f in fields]) |
141 | 115 |
|
... | ... | |
143 | 117 |
def pretty_print_stats(stats, stdout): |
144 | 118 |
newline = lambda: stdout.write("\n") |
145 | 119 |
|
146 |
datetime = stats.get("datetime") |
|
147 |
stdout.write("datetime: %s\n" % datetime) |
|
120 |
_datetime = stats.get("datetime")
|
|
121 |
stdout.write("datetime: %s\n" % _datetime)
|
|
148 | 122 |
newline() |
149 | 123 |
|
150 | 124 |
clusters = stats.get("clusters") |
... | ... | |
192 | 166 |
pprint_table(stdout, sorted(images.items()), None, |
193 | 167 |
title="Statistics for Images") |
194 | 168 |
newline() |
195 |
|
|
196 |
|
|
197 |
def get_cluster_stats(backend): |
|
198 |
total = Backend.objects.all() |
|
199 |
stats = {"total": total.count(), |
|
200 |
"drained": total.filter(drained=True).count(), |
|
201 |
"offline": total.filter(offline=True).count()} |
|
202 |
return stats |
|
203 |
|
|
204 |
|
|
205 |
def _get_total_servers(backend=None): |
|
206 |
total_servers = VirtualMachine.objects.all() |
|
207 |
if backend is not None: |
|
208 |
total_servers = total_servers.filter(backend=backend) |
|
209 |
return total_servers |
|
210 |
|
|
211 |
|
|
212 |
def get_servers_stats(backend=None): |
|
213 |
total_servers = _get_total_servers(backend=backend) |
|
214 |
per_state = total_servers.values("operstate")\ |
|
215 |
.annotate(count=Count("operstate")) |
|
216 |
stats = {"total": 0} |
|
217 |
for x in per_state: |
|
218 |
stats[x["operstate"]] = x["count"] |
|
219 |
stats["total"] += x["count"] |
|
220 |
return stats |
|
221 |
|
|
222 |
|
|
223 |
def get_resources_stats(backend=None): |
|
224 |
total_servers = _get_total_servers(backend=backend) |
|
225 |
active_servers = total_servers.filter(deleted=False) |
|
226 |
|
|
227 |
allocated = {} |
|
228 |
server_count = {} |
|
229 |
for res in ["cpu", "ram", "disk", "disk_template"]: |
|
230 |
server_count[res] = {} |
|
231 |
allocated[res] = 0 |
|
232 |
val = "flavor__%s" % res |
|
233 |
results = active_servers.values(val).annotate(count=Count(val)) |
|
234 |
for result in results: |
|
235 |
server_count[res][result[val]] = result["count"] |
|
236 |
if res != "disk_template": |
|
237 |
allocated[res] += result["count"] |
|
238 |
|
|
239 |
resources_stats = get_backend_stats(backend=backend) |
|
240 |
for res in ["cpu", "ram", "disk", "disk_template"]: |
|
241 |
if res not in resources_stats: |
|
242 |
resources_stats[res] = {} |
|
243 |
resources_stats[res]["servers"] = server_count[res] |
|
244 |
resources_stats[res]["allocated"] = allocated[res] |
|
245 |
|
|
246 |
return resources_stats |
|
247 |
|
|
248 |
|
|
249 |
def get_images_stats(backend=None): |
|
250 |
total_servers = _get_total_servers(backend=backend) |
|
251 |
active_servers = total_servers.filter(deleted=False) |
|
252 |
|
|
253 |
active_servers_images = active_servers.values("imageid", "userid")\ |
|
254 |
.annotate(number=Count("imageid")) |
|
255 |
image_cache = ImageCache() |
|
256 |
image_stats = defaultdict(int) |
|
257 |
for result in active_servers_images: |
|
258 |
imageid = image_cache.get_image(result["imageid"], result["userid"]) |
|
259 |
image_stats[imageid] += result["number"] |
|
260 |
return dict(image_stats) |
|
261 |
|
|
262 |
|
|
263 |
def get_networks_stats(): |
|
264 |
total_networks = Network.objects.all() |
|
265 |
stats = {"public_ips": get_ip_stats(), |
|
266 |
"total": 0} |
|
267 |
per_state = total_networks.values("state")\ |
|
268 |
.annotate(count=Count("state")) |
|
269 |
for x in per_state: |
|
270 |
stats[x["state"]] = x["count"] |
|
271 |
stats["total"] += x["count"] |
|
272 |
return stats |
|
273 |
|
|
274 |
|
|
275 |
def group_by_resource(objects, resource): |
|
276 |
stats = {} |
|
277 |
key = operator.attrgetter("flavor."+resource) |
|
278 |
grouped = itertools.groupby(sorted(objects, key=key), key) |
|
279 |
for val, group in grouped: |
|
280 |
stats[val] = len(list(group)) |
|
281 |
return stats |
|
282 |
|
|
283 |
|
|
284 |
def get_ip_stats(): |
|
285 |
total, free = 0, 0, |
|
286 |
for network in Network.objects.filter(public=True, deleted=False): |
|
287 |
try: |
|
288 |
net_total, net_free = network.ip_count() |
|
289 |
except AttributeError: |
|
290 |
# TODO: Check that this works.. |
|
291 |
pool = network.get_pool(locked=False) |
|
292 |
net_total = pool.pool_size |
|
293 |
net_free = pool.count_available() |
|
294 |
if not network.drained: |
|
295 |
total += net_total |
|
296 |
free += net_free |
|
297 |
return {"total": total, |
|
298 |
"free": free} |
|
299 |
|
|
300 |
|
|
301 |
def get_backend_stats(backend=None): |
|
302 |
if backend is None: |
|
303 |
backends = Backend.objects.filter(offline=False) |
|
304 |
else: |
|
305 |
if backend.offline: |
|
306 |
return {} |
|
307 |
backends = [backend] |
|
308 |
[backend_mod.update_backend_resources(b) for b in backends] |
|
309 |
resources = {} |
|
310 |
for attr in ("dfree", "dtotal", "mfree", "mtotal", "ctotal"): |
|
311 |
resources[attr] = 0 |
|
312 |
for b in backends: |
|
313 |
resources[attr] += getattr(b, attr) |
|
314 |
|
|
315 |
return {"disk": {"free": resources["dfree"], "total": resources["dtotal"]}, |
|
316 |
"ram": {"free": resources["mfree"], "total": resources["mtotal"]}, |
|
317 |
"cpu": {"free": resources["ctotal"], "total": resources["ctotal"]}, |
|
318 |
"disk_template": {"free": 0, "total": 0}} |
|
319 |
|
|
320 |
|
|
321 |
class ImageCache(object): |
|
322 |
def __init__(self): |
|
323 |
self.images = {} |
|
324 |
usercache = UserCache(settings.ASTAKOS_BASE_URL, |
|
325 |
settings.CYCLADES_SERVICE_TOKEN) |
|
326 |
self.system_user_uuid = \ |
|
327 |
usercache.get_uuid(settings.SYSTEM_IMAGES_OWNER) |
|
328 |
|
|
329 |
def get_image(self, imageid, userid): |
|
330 |
if not imageid in self.images: |
|
331 |
try: |
|
332 |
image = get_image(imageid, userid) |
|
333 |
owner = image["owner"] |
|
334 |
owner = "system" if image["owner"] == self.system_user_uuid\ |
|
335 |
else "user" |
|
336 |
self.images[imageid] = owner + ":" + image["name"] |
|
337 |
except Exception: |
|
338 |
self.images[imageid] = "unknown:unknown" |
|
339 |
|
|
340 |
return self.images[imageid] |
b/snf-django-lib/snf_django/lib/api/__init__.py | ||
---|---|---|
250 | 250 |
Wrapper to enable jsonp responses. |
251 | 251 |
""" |
252 | 252 |
def wrapper(func): |
253 |
@wraps(func) |
|
253 | 254 |
def view_wrapper(request, *args, **kwargs): |
254 | 255 |
response = func(request, *args, **kwargs) |
255 | 256 |
if 'content-type' in response._headers and \ |
... | ... | |
263 | 264 |
return response |
264 | 265 |
return view_wrapper |
265 | 266 |
return wrapper |
267 |
|
|
268 |
|
|
269 |
def user_in_groups(permitted_groups, logger=None): |
|
270 |
"""Check that the request user belongs to one of permitted groups. |
|
271 |
|
|
272 |
Django view wrapper to check that the already identified request user |
|
273 |
belongs to one of the allowed groups. |
|
274 |
|
|
275 |
""" |
|
276 |
if not logger: |
|
277 |
logger = log |
|
278 |
|
|
279 |
def decorator(func): |
|
280 |
@wraps(func) |
|
281 |
def wrapper(request, *args, **kwargs): |
|
282 |
if hasattr(request, "user") and request.user is not None: |
|
283 |
groups = request.user["access"]["user"]["roles"] |
|
284 |
groups = [g["name"] for g in groups] |
|
285 |
else: |
|
286 |
raise faults.Forbidden |
|
287 |
|
|
288 |
common_groups = set(groups) & set(permitted_groups) |
|
289 |
|
|
290 |
if not common_groups: |
|
291 |
msg = ("Not allowing access to '%s' by user '%s'. User does" |
|
292 |
" not belong to a valid group. User groups: %s," |
|
293 |
" Required groups %s" |
|
294 |
% (request.path, request.user, groups, |
|
295 |
permitted_groups)) |
|
296 |
logger.error(msg) |
|
297 |
raise faults.Forbidden |
|
298 |
|
|
299 |
logger.info("User '%s' in groups '%s' accessed view '%s'", |
|
300 |
request.user_uniq, groups, request.path) |
|
301 |
|
|
302 |
return func(request, *args, **kwargs) |
|
303 |
return wrapper |
|
304 |
return decorator |
Also available in: Unified diff