#!/usr/bin/env python
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from pithos.backends.modular import ModularBackend
+
class Migration(object):
def __init__(self, db):
self.engine = create_engine(db)
self.metadata = MetaData(self.engine)
#self.engine.echo = True
self.conn = self.engine.connect()
-
+
options = getattr(settings, 'BACKEND', None)[1]
self.backend = ModularBackend(*options)
-
+
def execute(self):
pass
+
class Cache():
def __init__(self, db):
self.engine = create_engine(db)
metadata = MetaData(self.engine)
-
- columns=[]
+
+ columns = []
columns.append(Column('path', String(2048), primary_key=True))
columns.append(Column('hash', String(255)))
self.files = Table('files', metadata, *columns)
self.conn = self.engine.connect()
self.engine.echo = True
metadata.create_all(self.engine)
-
+
def put(self, path, hash):
# Insert or replace.
- s = self.files.delete().where(self.files.c.path==path)
+ s = self.files.delete().where(self.files.c.path == path)
r = self.conn.execute(s)
r.close()
s = self.files.insert()
r = self.conn.execute(s, {'path': path, 'hash': hash})
r.close()
-
+
def get(self, path):
s = select([self.files.c.hash], self.files.c.path == path)
r = self.conn.execute(s)
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
- pkg_resources.require("distribute>="+version)
+ pkg_resources.require("distribute>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
- "The required version of distribute (>=%s) is not available,\n"
- "and can't be installed while this script is running. Please\n"
- "install a more recent version first, using\n"
- "'easy_install -U distribute'."
- "\n\n(Currently using %r)\n" % (version, e.args[0]))
+ "The required version of distribute (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U distribute'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
+
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
dst.close()
return os.path.realpath(saveto)
+
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
return __no_sandbox
+
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
_patch_file = _no_sandbox(_patch_file)
+
def _same_content(path, content):
return open(path).read() == content
+
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
+
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
+
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
- (SETUPTOOLS_FAKED_VERSION, pyver)
+ (SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
finally:
f.close()
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+_create_fake_setuptools_pkg_info = _no_sandbox(
+ _create_fake_setuptools_pkg_info)
+
def _patch_egg_dir(path):
# let's check if it's already patched
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
- args = sys.argv[sys.argv.index('install')+1:]
+ args = sys.argv[sys.argv.index('install') + 1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
- top_dir = args[index+1]
+ top_dir = args[index + 1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
replacement=False))
except TypeError:
# old distribute API
- setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+ setuptools_dist = ws.find(
+ pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
- _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+ _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 448 # decimal for oct 0700
+ tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
logger = logging.getLogger(__name__)
+
def delegate_to_login_service(request):
url = AUTHENTICATION_URL
users = AUTHENTICATION_USERS
if users or not url:
return HttpResponseNotFound()
-
+
p = urlparse(url)
if request.is_secure():
proto = 'https://'
uri = proto + p.netloc + '/login?' + urlencode(params)
return HttpResponseRedirect(uri)
+
@csrf_exempt
def delegate_to_feedback_service(request):
url = AUTHENTICATION_URL
users = AUTHENTICATION_USERS
if users or not url:
return HttpResponseNotFound()
-
+
p = urlparse(url)
if request.is_secure():
proto = 'https://'
else:
proto = 'http://'
-
+
uri = proto + p.netloc + '/im/service/api/v2.0/feedback'
- headers = { 'X-Auth-Token' : SERVICE_TOKEN }
+ headers = {'X-Auth-Token': SERVICE_TOKEN}
values = dict([(k, v) for k, v in request.POST.items()])
data = urllib.urlencode(values)
req = urllib2.Request(uri, data, headers)
except urllib2.URLError, e:
logger.exception(e)
return HttpResponse(status=e.reason)
- return HttpResponse()
\ No newline at end of file
+ return HttpResponse()
from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION,
- BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH,
- BACKEND_BLOCK_UMASK,
- BACKEND_QUEUE_MODULE, BACKEND_QUEUE_CONNECTION,
- BACKEND_QUOTA, BACKEND_VERSIONING)
+ BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH,
+ BACKEND_BLOCK_UMASK,
+ BACKEND_QUEUE_MODULE, BACKEND_QUEUE_CONNECTION,
+ BACKEND_QUOTA, BACKEND_VERSIONING)
from pithos.backends import connect_backend
from pithos.api.util import hashmap_md5
import socket
from smtplib import SMTPException
+
def update_md5(m):
if m['resource'] != 'object' or m['details']['action'] != 'object update':
return
-
+
backend = connect_backend(db_module=BACKEND_DB_MODULE,
db_connection=BACKEND_DB_CONNECTION,
block_module=BACKEND_BLOCK_MODULE,
queue_connection=BACKEND_QUEUE_CONNECTION)
backend.default_policy['quota'] = BACKEND_QUOTA
backend.default_policy['versioning'] = BACKEND_VERSIONING
-
+
path = m['value']
account, container, name = path.split('/', 2)
version = m['details']['version']
meta = None
try:
- meta = backend.get_object_meta(account, account, container, name, 'pithos', version)
+ meta = backend.get_object_meta(
+ account, account, container, name, 'pithos', version)
if meta['checksum'] == '':
- size, hashmap = backend.get_object_hashmap(account, account, container, name, version)
+ size, hashmap = backend.get_object_hashmap(
+ account, account, container, name, version)
checksum = hashmap_md5(backend, hashmap, size)
- backend.update_object_checksum(account, account, container, name, version, checksum)
+ backend.update_object_checksum(
+ account, account, container, name, version, checksum)
print 'INFO: Updated checksum for path "%s"' % (path,)
except Exception, e:
print 'WARNING: Can not update checksum for path "%s" (%s)' % (path, e)
-
+
backend.close()
+
def send_sharing_notification(m):
if m['resource'] != 'sharing':
return
-
+
members = m['details']['members']
user = m['details']['user']
path = m['value']
account, container, name = path.split('/', 2)
-
+
subject = 'Invitation to a Pithos+ shared object'
from_email = DEFAULT_FROM_EMAIL
recipient_list = members
- message = 'User %s has invited you to a Pithos+ shared object. You can view it under "Shared to me" at "%s".' %(user, path)
+ message = 'User %s has invited you to a Pithos+ shared object. You can view it under "Shared to me" at "%s".' % (user, path)
try:
send_mail(subject, message, from_email, recipient_list)
- print 'INFO: Sharing notification sent for path "%s" to %s' % (path, ','.join(recipient_list))
+ print 'INFO: Sharing notification sent for path "%s" to %s' % (
+ path, ','.join(recipient_list))
except (SMTPException, socket.error) as e:
- print 'WARNING: Can not update send email for sharing "%s" (%s)' % (path, e)
+ print 'WARNING: Can not update send email for sharing "%s" (%s)' % (
+ path, e)
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
# or implied, of GRNET S.A.
+
def camelCase(s):
return s[0].lower() + s[1:]
self.details = details
self.name = name or camelCase(self.__class__.__name__)
+
class NotModified(Fault):
code = 304
+
class BadRequest(Fault):
code = 400
+
class Unauthorized(Fault):
code = 401
+
class Forbidden(Fault):
code = 403
+
class ItemNotFound(Fault):
code = 404
+
class Conflict(Fault):
code = 409
+
class LengthRequired(Fault):
code = 411
+
class PreconditionFailed(Fault):
code = 412
+
class RequestEntityTooLarge(Fault):
code = 413
+
class RangeNotSatisfiable(Fault):
code = 416
+
class UnprocessableEntity(Fault):
code = 422
+
class InternalServerError(Fault):
code = 500
+
class NotImplemented(Fault):
code = 501
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from synnefo.lib.astakos import get_user
-from pithos.api.faults import (Fault, NotModified, BadRequest, Unauthorized, Forbidden, ItemNotFound, Conflict,
+from pithos.api.faults import (
+ Fault, NotModified, BadRequest, Unauthorized, Forbidden, ItemNotFound, Conflict,
LengthRequired, PreconditionFailed, RequestEntityTooLarge, RangeNotSatisfiable, UnprocessableEntity)
-from pithos.api.util import (json_encode_decimal, rename_meta_key, format_header_key, printable_header_dict,
+from pithos.api.util import (
+ json_encode_decimal, rename_meta_key, format_header_key, printable_header_dict,
get_account_headers, put_account_headers, get_container_headers, put_container_headers, get_object_headers,
put_object_headers, update_manifest_meta, update_sharing_meta, update_public_meta,
validate_modification_preconditions, validate_matching_preconditions, split_container_object_string,
@csrf_exempt
def top_demux(request):
if request.method == 'GET':
- try:
- request.GET['X-Auth-Token']
- except KeyError:
- try:
- request.META['HTTP_X_AUTH_TOKEN']
- except KeyError:
- return authenticate(request)
- return account_list(request)
+ try:
+ request.GET['X-Auth-Token']
+ except KeyError:
+ try:
+ request.META['HTTP_X_AUTH_TOKEN']
+ except KeyError:
+ return authenticate(request)
+ return account_list(request)
else:
return method_not_allowed(request)
+
@csrf_exempt
def account_demux(request, v_account):
if request.method == 'HEAD':
else:
return method_not_allowed(request)
+
@csrf_exempt
def container_demux(request, v_account, v_container):
if request.method == 'HEAD':
else:
return method_not_allowed(request)
+
@csrf_exempt
def object_demux(request, v_account, v_container, v_object):
# Helper to avoid placing the token in the URL when loading objects from a browser.
else:
return method_not_allowed(request)
+
@api_method('GET', user_required=False)
def authenticate(request):
# Normal Response Codes: 204
# Error Response Codes: internalServerError (500),
# forbidden (403),
# badRequest (400)
-
+
x_auth_user = request.META.get('HTTP_X_AUTH_USER')
x_auth_key = request.META.get('HTTP_X_AUTH_KEY')
if not x_auth_user or not x_auth_key:
raise BadRequest('Missing X-Auth-User or X-Auth-Key header')
response = HttpResponse(status=204)
-
+
uri = request.build_absolute_uri()
if '?' in uri:
uri = uri[:uri.find('?')]
-
+
response['X-Auth-Token'] = x_auth_key
- response['X-Storage-Url'] = uri + ('' if uri.endswith('/') else '/') + x_auth_user
+ response['X-Storage-Url'] = uri + ('' if uri.endswith('/')
+ else '/') + x_auth_user
return response
+
@api_method('GET', format_allowed=True)
def account_list(request):
# Normal Response Codes: 200, 204
# Error Response Codes: internalServerError (500),
# badRequest (400)
response = HttpResponse()
-
+
marker = request.GET.get('marker')
limit = get_int_parameter(request.GET.get('limit'))
if not limit:
limit = 10000
-
+
accounts = request.backend.list_accounts(request.user_uniq, marker, limit)
-
+
if request.serialization == 'text':
if len(accounts) == 0:
# The cloudfiles python bindings expect 200 if json/xml.
response.status_code = 200
response.content = '\n'.join(accounts) + '\n'
return response
-
+
account_meta = []
for x in accounts:
if x == request.user_uniq:
continue
try:
- meta = request.backend.get_account_meta(request.user_uniq, x, 'pithos', include_user_defined=False)
+ meta = request.backend.get_account_meta(
+ request.user_uniq, x, 'pithos', include_user_defined=False)
groups = request.backend.get_account_groups(request.user_uniq, x)
except NotAllowedError:
raise Forbidden('Not allowed')
else:
rename_meta_key(meta, 'modified', 'last_modified')
- rename_meta_key(meta, 'until_timestamp', 'x_account_until_timestamp')
+ rename_meta_key(
+ meta, 'until_timestamp', 'x_account_until_timestamp')
if groups:
- meta['X-Account-Group'] = printable_header_dict(dict([(k, ','.join(v)) for k, v in groups.iteritems()]))
+ meta['X-Account-Group'] = printable_header_dict(
+ dict([(k, ','.join(v)) for k, v in groups.iteritems()]))
account_meta.append(printable_header_dict(meta))
if request.serialization == 'xml':
data = render_to_string('accounts.xml', {'accounts': account_meta})
- elif request.serialization == 'json':
+ elif request.serialization == 'json':
data = json.dumps(account_meta)
response.status_code = 200
response.content = data
return response
+
@api_method('HEAD')
def account_meta(request, v_account):
# Normal Response Codes: 204
# Error Response Codes: internalServerError (500),
# forbidden (403),
# badRequest (400)
-
+
until = get_int_parameter(request.GET.get('until'))
try:
- meta = request.backend.get_account_meta(request.user_uniq, v_account, 'pithos', until)
- groups = request.backend.get_account_groups(request.user_uniq, v_account)
- policy = request.backend.get_account_policy(request.user_uniq, v_account)
+ meta = request.backend.get_account_meta(
+ request.user_uniq, v_account, 'pithos', until)
+ groups = request.backend.get_account_groups(
+ request.user_uniq, v_account)
+ policy = request.backend.get_account_policy(
+ request.user_uniq, v_account)
except NotAllowedError:
raise Forbidden('Not allowed')
-
+
validate_modification_preconditions(request, meta)
-
+
response = HttpResponse(status=204)
put_account_headers(response, meta, groups, policy)
return response
+
@api_method('POST')
def account_update(request, v_account):
# Normal Response Codes: 202
# Error Response Codes: internalServerError (500),
# forbidden (403),
# badRequest (400)
-
+
meta, groups = get_account_headers(request)
replace = True
if 'update' in request.GET:
if groups:
try:
request.backend.update_account_groups(request.user_uniq, v_account,
- groups, replace)
+ groups, replace)
except NotAllowedError:
raise Forbidden('Not allowed')
except ValueError:
raise Forbidden('Not allowed')
return HttpResponse(status=202)
+
@api_method('GET', format_allowed=True)
def container_list(request, v_account):
# Normal Response Codes: 200, 204
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
until = get_int_parameter(request.GET.get('until'))
try:
- meta = request.backend.get_account_meta(request.user_uniq, v_account, 'pithos', until)
- groups = request.backend.get_account_groups(request.user_uniq, v_account)
- policy = request.backend.get_account_policy(request.user_uniq, v_account)
+ meta = request.backend.get_account_meta(
+ request.user_uniq, v_account, 'pithos', until)
+ groups = request.backend.get_account_groups(
+ request.user_uniq, v_account)
+ policy = request.backend.get_account_policy(
+ request.user_uniq, v_account)
except NotAllowedError:
raise Forbidden('Not allowed')
-
+
validate_modification_preconditions(request, meta)
-
+
response = HttpResponse()
put_account_headers(response, meta, groups, policy)
-
+
marker = request.GET.get('marker')
limit = get_int_parameter(request.GET.get('limit'))
if not limit:
limit = 10000
-
+
shared = False
if 'shared' in request.GET:
shared = True
public = False
if 'public' in request.GET:
public = True
-
+
try:
- containers = request.backend.list_containers(request.user_uniq, v_account,
- marker, limit, shared, until, public)
+ containers = request.backend.list_containers(
+ request.user_uniq, v_account,
+ marker, limit, shared, until, public)
except NotAllowedError:
raise Forbidden('Not allowed')
except NameError:
containers = []
-
+
if request.serialization == 'text':
if len(containers) == 0:
# The cloudfiles python bindings expect 200 if json/xml.
response.status_code = 200
response.content = '\n'.join(containers) + '\n'
return response
-
+
container_meta = []
for x in containers:
try:
- meta = request.backend.get_container_meta(request.user_uniq, v_account,
- x, 'pithos', until, include_user_defined=False)
+ meta = request.backend.get_container_meta(
+ request.user_uniq, v_account,
+ x, 'pithos', until, include_user_defined=False)
policy = request.backend.get_container_policy(request.user_uniq,
- v_account, x)
+ v_account, x)
except NotAllowedError:
raise Forbidden('Not allowed')
except NameError:
pass
else:
rename_meta_key(meta, 'modified', 'last_modified')
- rename_meta_key(meta, 'until_timestamp', 'x_container_until_timestamp')
+ rename_meta_key(
+ meta, 'until_timestamp', 'x_container_until_timestamp')
if policy:
- meta['X-Container-Policy'] = printable_header_dict(dict([(k, v) for k, v in policy.iteritems()]))
+ meta['X-Container-Policy'] = printable_header_dict(
+ dict([(k, v) for k, v in policy.iteritems()]))
container_meta.append(printable_header_dict(meta))
if request.serialization == 'xml':
- data = render_to_string('containers.xml', {'account': v_account, 'containers': container_meta})
- elif request.serialization == 'json':
+ data = render_to_string('containers.xml', {'account':
+ v_account, 'containers': container_meta})
+ elif request.serialization == 'json':
data = json.dumps(container_meta)
response.status_code = 200
response.content = data
return response
+
@api_method('HEAD')
def container_meta(request, v_account, v_container):
# Normal Response Codes: 204
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
until = get_int_parameter(request.GET.get('until'))
try:
meta = request.backend.get_container_meta(request.user_uniq, v_account,
- v_container, 'pithos', until)
+ v_container, 'pithos', until)
meta['object_meta'] = request.backend.list_container_meta(request.user_uniq,
- v_account, v_container, 'pithos', until)
- policy = request.backend.get_container_policy(request.user_uniq, v_account,
- v_container)
+ v_account, v_container, 'pithos', until)
+ policy = request.backend.get_container_policy(
+ request.user_uniq, v_account,
+ v_container)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Container does not exist')
-
+
validate_modification_preconditions(request, meta)
-
+
response = HttpResponse(status=204)
put_container_headers(request, response, meta, policy)
return response
+
@api_method('PUT')
def container_create(request, v_account, v_container):
# Normal Response Codes: 201, 202
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
meta, policy = get_container_headers(request)
-
+
try:
- request.backend.put_container(request.user_uniq, v_account, v_container, policy)
+ request.backend.put_container(
+ request.user_uniq, v_account, v_container, policy)
ret = 201
except NotAllowedError:
raise Forbidden('Not allowed')
raise BadRequest('Invalid policy header')
except NameError:
ret = 202
-
+
if ret == 202 and policy:
try:
- request.backend.update_container_policy(request.user_uniq, v_account,
- v_container, policy, replace=False)
+ request.backend.update_container_policy(
+ request.user_uniq, v_account,
+ v_container, policy, replace=False)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
if meta:
try:
request.backend.update_container_meta(request.user_uniq, v_account,
- v_container, 'pithos', meta, replace=False)
+ v_container, 'pithos', meta, replace=False)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Container does not exist')
-
+
return HttpResponse(status=ret)
+
@api_method('POST', format_allowed=True)
def container_update(request, v_account, v_container):
# Normal Response Codes: 202
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
meta, policy = get_container_headers(request)
replace = True
if 'update' in request.GET:
replace = False
if policy:
try:
- request.backend.update_container_policy(request.user_uniq, v_account,
- v_container, policy, replace)
+ request.backend.update_container_policy(
+ request.user_uniq, v_account,
+ v_container, policy, replace)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
if meta or replace:
try:
request.backend.update_container_meta(request.user_uniq, v_account,
- v_container, 'pithos', meta, replace)
+ v_container, 'pithos', meta, replace)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Container does not exist')
-
+
content_length = -1
if request.META.get('HTTP_TRANSFER_ENCODING') != 'chunked':
- content_length = get_int_parameter(request.META.get('CONTENT_LENGTH', 0))
+ content_length = get_int_parameter(
+ request.META.get('CONTENT_LENGTH', 0))
content_type = request.META.get('CONTENT_TYPE')
hashmap = []
if content_type and content_type == 'application/octet-stream' and content_length != 0:
for data in socket_read_iterator(request, content_length,
- request.backend.block_size):
+ request.backend.block_size):
# TODO: Raise 408 (Request Timeout) if this takes too long.
# TODO: Raise 499 (Client Disconnect) if a length is defined and we stop before getting this much data.
hashmap.append(request.backend.put_block(data))
-
+
response = HttpResponse(status=202)
if hashmap:
response.content = simple_list_response(request, hashmap)
return response
+
@api_method('DELETE')
def container_delete(request, v_account, v_container):
# Normal Response Codes: 204
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
until = get_int_parameter(request.GET.get('until'))
-
+
delimiter = request.GET.get('delimiter')
-
+
try:
- request.backend.delete_container(request.user_uniq, v_account, v_container,
- until, delimiter=delimiter)
+ request.backend.delete_container(
+ request.user_uniq, v_account, v_container,
+ until, delimiter=delimiter)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise Conflict('Container is not empty')
return HttpResponse(status=204)
+
@api_method('GET', format_allowed=True)
def object_list(request, v_account, v_container):
# Normal Response Codes: 200, 204
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
until = get_int_parameter(request.GET.get('until'))
try:
meta = request.backend.get_container_meta(request.user_uniq, v_account,
- v_container, 'pithos', until)
+ v_container, 'pithos', until)
meta['object_meta'] = request.backend.list_container_meta(request.user_uniq,
- v_account, v_container, 'pithos', until)
- policy = request.backend.get_container_policy(request.user_uniq, v_account,
- v_container)
+ v_account, v_container, 'pithos', until)
+ policy = request.backend.get_container_policy(
+ request.user_uniq, v_account,
+ v_container)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Container does not exist')
-
+
validate_modification_preconditions(request, meta)
-
+
response = HttpResponse()
put_container_headers(request, response, meta, policy)
-
+
path = request.GET.get('path')
prefix = request.GET.get('prefix')
delimiter = request.GET.get('delimiter')
-
+
# Path overrides prefix and delimiter.
virtual = True
if path:
prefix = path
delimiter = '/'
virtual = False
-
+
# Naming policy.
if prefix and delimiter and not prefix.endswith(delimiter):
prefix = prefix + delimiter
if not prefix:
prefix = ''
prefix = prefix.lstrip('/')
-
+
marker = request.GET.get('marker')
limit = get_int_parameter(request.GET.get('limit'))
if not limit:
limit = 10000
-
+
keys = request.GET.get('meta')
if keys:
- keys = [smart_str(x.strip()) for x in keys.split(',') if x.strip() != '']
+ keys = [smart_str(x.strip()) for x in keys.split(',')
+ if x.strip() != '']
included, excluded, opers = parse_filters(keys)
keys = []
keys += [format_header_key('X-Object-Meta-' + x) for x in included]
- keys += ['!'+format_header_key('X-Object-Meta-' + x) for x in excluded]
- keys += ['%s%s%s' % (format_header_key('X-Object-Meta-' + k), o, v) for k, o, v in opers]
+ keys += ['!' + format_header_key('X-Object-Meta-' + x)
+ for x in excluded]
+ keys += ['%s%s%s' % (format_header_key(
+ 'X-Object-Meta-' + k), o, v) for k, o, v in opers]
else:
keys = []
-
+
shared = False
if 'shared' in request.GET:
shared = True
public = False
if 'public' in request.GET:
public = True
-
+
if request.serialization == 'text':
try:
- objects = request.backend.list_objects(request.user_uniq, v_account,
- v_container, prefix, delimiter, marker,
- limit, virtual, 'pithos', keys, shared,
- until, None, public)
+ objects = request.backend.list_objects(
+ request.user_uniq, v_account,
+ v_container, prefix, delimiter, marker,
+ limit, virtual, 'pithos', keys, shared,
+ until, None, public)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Container does not exist')
-
+
if len(objects) == 0:
# The cloudfiles python bindings expect 200 if json/xml.
response.status_code = 204
return response
try:
- objects = request.backend.list_object_meta(request.user_uniq, v_account,
- v_container, prefix, delimiter, marker,
- limit, virtual, 'pithos', keys, shared, until, None, public)
+ objects = request.backend.list_object_meta(
+ request.user_uniq, v_account,
+ v_container, prefix, delimiter, marker,
+ limit, virtual, 'pithos', keys, shared, until, None, public)
object_permissions = {}
object_public = {}
if until is None:
name_idx = len('/'.join((v_account, v_container, '')))
for x in request.backend.list_object_permissions(request.user_uniq,
- v_account, v_container, prefix):
+ v_account, v_container, prefix):
object = x[name_idx:]
object_permissions[object] = request.backend.get_object_permissions(
- request.user_uniq, v_account, v_container, object)
+ request.user_uniq, v_account, v_container, object)
for k, v in request.backend.list_object_public(request.user_uniq,
- v_account, v_container, prefix).iteritems():
+ v_account, v_container, prefix).iteritems():
object_public[k[name_idx:]] = v
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Container does not exist')
-
+
object_meta = []
for meta in objects:
if len(meta) == 1:
# Virtual objects/directories.
object_meta.append(meta)
else:
- rename_meta_key(meta, 'hash', 'x_object_hash') # Will be replaced by checksum.
+ rename_meta_key(
+ meta, 'hash', 'x_object_hash') # Will be replaced by checksum.
rename_meta_key(meta, 'checksum', 'hash')
rename_meta_key(meta, 'type', 'content_type')
rename_meta_key(meta, 'uuid', 'x_object_uuid')
rename_meta_key(meta, 'modified', 'last_modified')
rename_meta_key(meta, 'modified_by', 'x_object_modified_by')
rename_meta_key(meta, 'version', 'x_object_version')
- rename_meta_key(meta, 'version_timestamp', 'x_object_version_timestamp')
+ rename_meta_key(
+ meta, 'version_timestamp', 'x_object_version_timestamp')
permissions = object_permissions.get(meta['name'], None)
if permissions:
- update_sharing_meta(request, permissions, v_account, v_container, meta['name'], meta)
+ update_sharing_meta(request, permissions, v_account,
+ v_container, meta['name'], meta)
public = object_public.get(meta['name'], None)
if public:
update_public_meta(public, meta)
object_meta.append(printable_header_dict(meta))
if request.serialization == 'xml':
- data = render_to_string('objects.xml', {'container': v_container, 'objects': object_meta})
- elif request.serialization == 'json':
+ data = render_to_string(
+ 'objects.xml', {'container': v_container, 'objects': object_meta})
+ elif request.serialization == 'json':
data = json.dumps(object_meta, default=json_encode_decimal)
response.status_code = 200
response.content = data
return response
+
@api_method('HEAD')
def object_meta(request, v_account, v_container, v_object):
# Normal Response Codes: 204
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
version = request.GET.get('version')
try:
meta = request.backend.get_object_meta(request.user_uniq, v_account,
- v_container, v_object, 'pithos', version)
+ v_container, v_object, 'pithos', version)
if version is None:
- permissions = request.backend.get_object_permissions(request.user_uniq,
- v_account, v_container, v_object)
- public = request.backend.get_object_public(request.user_uniq, v_account,
- v_container, v_object)
+ permissions = request.backend.get_object_permissions(
+ request.user_uniq,
+ v_account, v_container, v_object)
+ public = request.backend.get_object_public(
+ request.user_uniq, v_account,
+ v_container, v_object)
else:
permissions = None
public = None
raise ItemNotFound('Object does not exist')
except VersionNotExists:
raise ItemNotFound('Version does not exist')
-
+
update_manifest_meta(request, v_account, meta)
- update_sharing_meta(request, permissions, v_account, v_container, v_object, meta)
+ update_sharing_meta(
+ request, permissions, v_account, v_container, v_object, meta)
update_public_meta(public, meta)
-
+
# Evaluate conditions.
validate_modification_preconditions(request, meta)
try:
response = HttpResponse(status=304)
response['ETag'] = meta['checksum']
return response
-
+
response = HttpResponse(status=200)
put_object_headers(response, meta)
return response
+
@api_method('GET', format_allowed=True)
def object_read(request, v_account, v_container, v_object):
# Normal Response Codes: 200, 206
# forbidden (403),
# badRequest (400),
# notModified (304)
-
+
version = request.GET.get('version')
-
+
# Reply with the version list. Do this first, as the object may be deleted.
if version == 'list':
if request.serialization == 'text':
raise BadRequest('No format specified for version list.')
-
+
try:
v = request.backend.list_versions(request.user_uniq, v_account,
- v_container, v_object)
+ v_container, v_object)
except NotAllowedError:
raise Forbidden('Not allowed')
d = {'versions': v}
if request.serialization == 'xml':
d['object'] = v_object
data = render_to_string('versions.xml', d)
- elif request.serialization == 'json':
+ elif request.serialization == 'json':
data = json.dumps(d, default=json_encode_decimal)
-
+
response = HttpResponse(data, status=200)
response['Content-Length'] = len(data)
return response
-
+
try:
meta = request.backend.get_object_meta(request.user_uniq, v_account,
- v_container, v_object, 'pithos', version)
+ v_container, v_object, 'pithos', version)
if version is None:
- permissions = request.backend.get_object_permissions(request.user_uniq,
- v_account, v_container, v_object)
- public = request.backend.get_object_public(request.user_uniq, v_account,
- v_container, v_object)
+ permissions = request.backend.get_object_permissions(
+ request.user_uniq,
+ v_account, v_container, v_object)
+ public = request.backend.get_object_public(
+ request.user_uniq, v_account,
+ v_container, v_object)
else:
permissions = None
public = None
raise ItemNotFound('Object does not exist')
except VersionNotExists:
raise ItemNotFound('Version does not exist')
-
+
update_manifest_meta(request, v_account, meta)
- update_sharing_meta(request, permissions, v_account, v_container, v_object, meta)
+ update_sharing_meta(
+ request, permissions, v_account, v_container, v_object, meta)
update_public_meta(public, meta)
-
+
# Evaluate conditions.
validate_modification_preconditions(request, meta)
try:
response = HttpResponse(status=304)
response['ETag'] = meta['checksum']
return response
-
+
hashmap_reply = False
if 'hashmap' in request.GET and request.serialization != 'text':
hashmap_reply = True
-
+
sizes = []
hashmaps = []
if 'X-Object-Manifest' in meta and not hashmap_reply:
try:
- src_container, src_name = split_container_object_string('/' + meta['X-Object-Manifest'])
- objects = request.backend.list_objects(request.user_uniq, v_account,
- src_container, prefix=src_name, virtual=False)
+ src_container, src_name = split_container_object_string(
+ '/' + meta['X-Object-Manifest'])
+ objects = request.backend.list_objects(
+ request.user_uniq, v_account,
+ src_container, prefix=src_name, virtual=False)
except NotAllowedError:
raise Forbidden('Not allowed')
except ValueError:
raise BadRequest('Invalid X-Object-Manifest header')
except ItemNotExists:
raise ItemNotFound('Container does not exist')
-
+
try:
for x in objects:
s, h = request.backend.get_object_hashmap(request.user_uniq,
- v_account, src_container, x[0], x[1])
+ v_account, src_container, x[0], x[1])
sizes.append(s)
hashmaps.append(h)
except NotAllowedError:
raise ItemNotFound('Version does not exist')
else:
try:
- s, h = request.backend.get_object_hashmap(request.user_uniq, v_account,
- v_container, v_object, version)
+ s, h = request.backend.get_object_hashmap(
+ request.user_uniq, v_account,
+ v_container, v_object, version)
sizes.append(s)
hashmaps.append(h)
except NotAllowedError:
raise ItemNotFound('Object does not exist')
except VersionNotExists:
raise ItemNotFound('Version does not exist')
-
+
# Reply with the hashmap.
if hashmap_reply:
size = sum(sizes)
if request.serialization == 'xml':
d['object'] = v_object
data = render_to_string('hashes.xml', d)
- elif request.serialization == 'json':
+ elif request.serialization == 'json':
data = json.dumps(d)
-
+
response = HttpResponse(data, status=200)
put_object_headers(response, meta)
response['Content-Length'] = len(data)
return response
-
- request.serialization = 'text' # Unset.
+
+ request.serialization = 'text' # Unset.
return object_data_response(request, sizes, hashmaps, meta)
+
@api_method('PUT', format_allowed=True)
def object_write(request, v_account, v_container, v_object):
# Normal Response Codes: 201
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
# Evaluate conditions.
if request.META.get('HTTP_IF_MATCH') or request.META.get('HTTP_IF_NONE_MATCH'):
try:
- meta = request.backend.get_object_meta(request.user_uniq, v_account,
- v_container, v_object, 'pithos')
+ meta = request.backend.get_object_meta(
+ request.user_uniq, v_account,
+ v_container, v_object, 'pithos')
except NotAllowedError:
raise Forbidden('Not allowed')
except NameError:
meta = {}
validate_matching_preconditions(request, meta)
-
+
copy_from = request.META.get('HTTP_X_COPY_FROM')
move_from = request.META.get('HTTP_X_MOVE_FROM')
if copy_from or move_from:
delimiter = request.GET.get('delimiter')
- content_length = get_content_length(request) # Required by the API.
-
+ content_length = get_content_length(request) # Required by the API.
+
src_account = request.META.get('HTTP_X_SOURCE_ACCOUNT')
if not src_account:
src_account = request.user_uniq
if move_from:
try:
- src_container, src_name = split_container_object_string(move_from)
+ src_container, src_name = split_container_object_string(
+ move_from)
except ValueError:
raise BadRequest('Invalid X-Move-From header')
- version_id = copy_or_move_object(request, src_account, src_container, src_name,
- v_account, v_container, v_object, move=True, delimiter=delimiter)
+ version_id = copy_or_move_object(
+ request, src_account, src_container, src_name,
+ v_account, v_container, v_object, move=True, delimiter=delimiter)
else:
try:
- src_container, src_name = split_container_object_string(copy_from)
+ src_container, src_name = split_container_object_string(
+ copy_from)
except ValueError:
raise BadRequest('Invalid X-Copy-From header')
- version_id = copy_or_move_object(request, src_account, src_container, src_name,
- v_account, v_container, v_object, move=False, delimiter=delimiter)
+ version_id = copy_or_move_object(
+ request, src_account, src_container, src_name,
+ v_account, v_container, v_object, move=False, delimiter=delimiter)
response = HttpResponse(status=201)
response['X-Object-Version'] = version_id
return response
-
+
content_type, meta, permissions, public = get_object_headers(request)
content_length = -1
if request.META.get('HTTP_TRANSFER_ENCODING') != 'chunked':
# Should be BadRequest, but API says otherwise.
if content_type is None:
raise LengthRequired('Missing Content-Type header')
-
+
if 'hashmap' in request.GET:
if request.serialization not in ('json', 'xml'):
raise BadRequest('Invalid hashmap format')
-
+
data = ''
for block in socket_read_iterator(request, content_length,
- request.backend.block_size):
+ request.backend.block_size):
data = '%s%s' % (data, block)
-
+
if request.serialization == 'json':
d = json.loads(data)
if not hasattr(d, '__getitem__'):
xml = minidom.parseString(data)
obj = xml.getElementsByTagName('object')[0]
size = int(obj.attributes['bytes'].value)
-
+
hashes = xml.getElementsByTagName('hash')
hashmap = []
for hash in hashes:
hashmap.append(hash.firstChild.data)
except:
raise BadRequest('Invalid data formatting')
-
- checksum = '' # Do not set to None (will copy previous value).
+
+ checksum = '' # Do not set to None (will copy previous value).
else:
md5 = hashlib.md5()
size = 0
hashmap = []
for data in socket_read_iterator(request, content_length,
- request.backend.block_size):
+ request.backend.block_size):
# TODO: Raise 408 (Request Timeout) if this takes too long.
# TODO: Raise 499 (Client Disconnect) if a length is defined and we stop before getting this much data.
size += len(data)
hashmap.append(request.backend.put_block(data))
md5.update(data)
-
+
checksum = md5.hexdigest().lower()
etag = request.META.get('HTTP_ETAG')
if etag and parse_etags(etag)[0].lower() != checksum:
raise UnprocessableEntity('Object ETag does not match')
-
+
try:
version_id = request.backend.update_object_hashmap(request.user_uniq,
- v_account, v_container, v_object, size, content_type,
- hashmap, checksum, 'pithos', meta, True, permissions)
+ v_account, v_container, v_object, size, content_type,
+ hashmap, checksum, 'pithos', meta, True, permissions)
except NotAllowedError:
raise Forbidden('Not allowed')
except IndexError, e:
checksum = hashmap_md5(request.backend, hashmap, size)
try:
request.backend.update_object_checksum(request.user_uniq,
- v_account, v_container, v_object, version_id, checksum)
+ v_account, v_container, v_object, version_id, checksum)
except NotAllowedError:
raise Forbidden('Not allowed')
if public is not None:
try:
request.backend.update_object_public(request.user_uniq, v_account,
- v_container, v_object, public)
+ v_container, v_object, public)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Object does not exist')
-
+
response = HttpResponse(status=201)
if checksum:
response['ETag'] = checksum
response['X-Object-Version'] = version_id
return response
+
@api_method('POST')
def object_write_form(request, v_account, v_container, v_object):
# Normal Response Codes: 201
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
request.upload_handlers = [SaveToBackendHandler(request)]
- if not request.FILES.has_key('X-Object-Data'):
+ if 'X-Object-Data' not in request.FILES:
raise BadRequest('Missing X-Object-Data field')
file = request.FILES['X-Object-Data']
-
+
checksum = file.etag
try:
version_id = request.backend.update_object_hashmap(request.user_uniq,
- v_account, v_container, v_object, file.size, file.content_type,
- file.hashmap, checksum, 'pithos', {}, True)
+ v_account, v_container, v_object, file.size, file.content_type,
+ file.hashmap, checksum, 'pithos', {}, True)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Container does not exist')
except QuotaError:
raise RequestEntityTooLarge('Quota exceeded')
-
+
response = HttpResponse(status=201)
response['ETag'] = checksum
response['X-Object-Version'] = version_id
response.content = checksum
return response
+
@api_method('COPY', format_allowed=True)
def object_copy(request, v_account, v_container, v_object):
# Normal Response Codes: 201
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
dest_account = request.META.get('HTTP_DESTINATION_ACCOUNT')
if not dest_account:
dest_account = request.user_uniq
dest_container, dest_name = split_container_object_string(dest_path)
except ValueError:
raise BadRequest('Invalid Destination header')
-
+
# Evaluate conditions.
if request.META.get('HTTP_IF_MATCH') or request.META.get('HTTP_IF_NONE_MATCH'):
src_version = request.META.get('HTTP_X_SOURCE_VERSION')
try:
- meta = request.backend.get_object_meta(request.user_uniq, v_account,
- v_container, v_object, 'pithos', src_version)
+ meta = request.backend.get_object_meta(
+ request.user_uniq, v_account,
+ v_container, v_object, 'pithos', src_version)
except NotAllowedError:
raise Forbidden('Not allowed')
except (ItemNotExists, VersionNotExists):
raise ItemNotFound('Container or object does not exist')
validate_matching_preconditions(request, meta)
-
+
delimiter = request.GET.get('delimiter')
-
+
version_id = copy_or_move_object(request, v_account, v_container, v_object,
- dest_account, dest_container, dest_name, move=False, delimiter=delimiter)
+ dest_account, dest_container, dest_name, move=False, delimiter=delimiter)
response = HttpResponse(status=201)
response['X-Object-Version'] = version_id
return response
+
@api_method('MOVE', format_allowed=True)
def object_move(request, v_account, v_container, v_object):
# Normal Response Codes: 201
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
dest_account = request.META.get('HTTP_DESTINATION_ACCOUNT')
if not dest_account:
dest_account = request.user_uniq
dest_container, dest_name = split_container_object_string(dest_path)
except ValueError:
raise BadRequest('Invalid Destination header')
-
+
# Evaluate conditions.
if request.META.get('HTTP_IF_MATCH') or request.META.get('HTTP_IF_NONE_MATCH'):
try:
- meta = request.backend.get_object_meta(request.user_uniq, v_account,
- v_container, v_object, 'pithos')
+ meta = request.backend.get_object_meta(
+ request.user_uniq, v_account,
+ v_container, v_object, 'pithos')
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Container or object does not exist')
validate_matching_preconditions(request, meta)
-
+
delimiter = request.GET.get('delimiter')
-
+
version_id = copy_or_move_object(request, v_account, v_container, v_object,
- dest_account, dest_container, dest_name, move=True, delimiter=delimiter)
+ dest_account, dest_container, dest_name, move=True, delimiter=delimiter)
response = HttpResponse(status=201)
response['X-Object-Version'] = version_id
return response
+
@api_method('POST', format_allowed=True)
def object_update(request, v_account, v_container, v_object):
# Normal Response Codes: 202, 204
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
content_type, meta, permissions, public = get_object_headers(request)
-
+
try:
- prev_meta = request.backend.get_object_meta(request.user_uniq, v_account,
- v_container, v_object, 'pithos')
+ prev_meta = request.backend.get_object_meta(
+ request.user_uniq, v_account,
+ v_container, v_object, 'pithos')
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Object does not exist')
-
+
# Evaluate conditions.
if request.META.get('HTTP_IF_MATCH') or request.META.get('HTTP_IF_NONE_MATCH'):
validate_matching_preconditions(request, prev_meta)
-
+
replace = True
if 'update' in request.GET:
replace = False
-
+
# A Content-Type or X-Source-Object header indicates data updates.
src_object = request.META.get('HTTP_X_SOURCE_OBJECT')
if (not content_type or content_type != 'application/octet-stream') and not src_object:
response = HttpResponse(status=202)
-
+
# Do permissions first, as it may fail easier.
if permissions is not None:
try:
request.backend.update_object_permissions(request.user_uniq,
- v_account, v_container, v_object, permissions)
+ v_account, v_container, v_object, permissions)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise BadRequest('Invalid sharing header')
if public is not None:
try:
- request.backend.update_object_public(request.user_uniq, v_account,
- v_container, v_object, public)
+ request.backend.update_object_public(
+ request.user_uniq, v_account,
+ v_container, v_object, public)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Object does not exist')
if meta or replace:
try:
- version_id = request.backend.update_object_meta(request.user_uniq,
- v_account, v_container, v_object, 'pithos', meta, replace)
+ version_id = request.backend.update_object_meta(
+ request.user_uniq,
+ v_account, v_container, v_object, 'pithos', meta, replace)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
- raise ItemNotFound('Object does not exist')
+ raise ItemNotFound('Object does not exist')
response['X-Object-Version'] = version_id
-
+
return response
-
+
# Single range update. Range must be in Content-Range.
# Based on: http://code.google.com/p/gears/wiki/ContentRangePostProposal
# (with the addition that '*' is allowed for the range - will append).
ranges = get_content_range(request)
if not ranges:
raise RangeNotSatisfiable('Invalid Content-Range header')
-
+
try:
size, hashmap = request.backend.get_object_hashmap(request.user_uniq,
- v_account, v_container, v_object)
+ v_account, v_container, v_object)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Object does not exist')
-
+
offset, length, total = ranges
if offset is None:
offset = size
src_container, src_name = split_container_object_string(src_object)
src_version = request.META.get('HTTP_X_SOURCE_VERSION')
try:
- src_size, src_hashmap = request.backend.get_object_hashmap(request.user_uniq,
- src_account, src_container, src_name, src_version)
+ src_size, src_hashmap = request.backend.get_object_hashmap(
+ request.user_uniq,
+ src_account, src_container, src_name, src_version)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Source object does not exist')
-
+
if length is None:
length = src_size
elif length > src_size:
content_length = -1
if request.META.get('HTTP_TRANSFER_ENCODING') != 'chunked':
content_length = get_content_length(request)
-
+
if length is None:
length = content_length
else:
elif length != content_length:
raise BadRequest('Content length does not match range length')
if total is not None and (total != size or offset >= size or (length > 0 and offset + length >= size)):
- raise RangeNotSatisfiable('Supplied range will change provided object limits')
-
+ raise RangeNotSatisfiable(
+ 'Supplied range will change provided object limits')
+
dest_bytes = request.META.get('HTTP_X_OBJECT_BYTES')
if dest_bytes is not None:
dest_bytes = get_int_parameter(dest_bytes)
if dest_bytes is None:
raise BadRequest('Invalid X-Object-Bytes header')
-
+
if src_object:
if offset % request.backend.block_size == 0:
# Update the hashes only.
else:
data = request.backend.get_block(src_hashmap[sbi])
hashmap[bi] = request.backend.update_block(hashmap[bi],
- data[:bl], 0)
+ data[:bl], 0)
else:
hashmap.append(src_hashmap[sbi])
offset += bl
else:
data = ''
for d in socket_read_iterator(request, length,
- request.backend.block_size):
+ request.backend.block_size):
# TODO: Raise 408 (Request Timeout) if this takes too long.
# TODO: Raise 499 (Client Disconnect) if a length is defined and we stop before getting this much data.
data += d
data = data[bytes:]
if len(data) > 0:
put_object_block(request, hashmap, data, offset)
-
+
if offset > size:
size = offset
if dest_bytes is not None and dest_bytes < size:
size = dest_bytes
hashmap = hashmap[:(int((size - 1) / request.backend.block_size) + 1)]
- checksum = hashmap_md5(request.backend, hashmap, size) if UPDATE_MD5 else ''
+ checksum = hashmap_md5(
+ request.backend, hashmap, size) if UPDATE_MD5 else ''
try:
version_id = request.backend.update_object_hashmap(request.user_uniq,
- v_account, v_container, v_object, size, prev_meta['type'],
- hashmap, checksum, 'pithos', meta, replace, permissions)
+ v_account, v_container, v_object, size, prev_meta[
+ 'type'],
+ hashmap, checksum, 'pithos', meta, replace, permissions)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
if public is not None:
try:
request.backend.update_object_public(request.user_uniq, v_account,
- v_container, v_object, public)
+ v_container, v_object, public)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Object does not exist')
-
+
response = HttpResponse(status=204)
response['ETag'] = checksum
response['X-Object-Version'] = version_id
return response
+
@api_method('DELETE')
def object_delete(request, v_account, v_container, v_object):
# Normal Response Codes: 204
# itemNotFound (404),
# forbidden (403),
# badRequest (400)
-
+
until = get_int_parameter(request.GET.get('until'))
delimiter = request.GET.get('delimiter')
-
+
try:
- request.backend.delete_object(request.user_uniq, v_account, v_container,
- v_object, until, delimiter=delimiter)
+ request.backend.delete_object(
+ request.user_uniq, v_account, v_container,
+ v_object, until, delimiter=delimiter)
except NotAllowedError:
raise Forbidden('Not allowed')
except ItemNotExists:
raise ItemNotFound('Object does not exist')
return HttpResponse(status=204)
+
@api_method()
def method_not_allowed(request):
raise BadRequest('Method not allowed')
from django.core.management.base import BaseCommand, CommandError
from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION,
- BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH,
- BACKEND_BLOCK_UMASK,
- BACKEND_QUEUE_MODULE, BACKEND_QUEUE_CONNECTION,
- BACKEND_QUOTA, BACKEND_VERSIONING)
+ BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH,
+ BACKEND_BLOCK_UMASK,
+ BACKEND_QUEUE_MODULE, BACKEND_QUEUE_CONNECTION,
+ BACKEND_QUOTA, BACKEND_VERSIONING)
from pithos.backends import connect_backend
class Command(BaseCommand):
args = "<user>"
help = "Get/set a user's quota"
-
+
option_list = BaseCommand.option_list + (
make_option('--set-quota',
- dest='quota',
- metavar='BYTES',
- help="Set user's quota"),
- )
-
+ dest='quota',
+ metavar='BYTES',
+ help="Set user's quota"),
+ )
+
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Please provide a user")
-
+
user = args[0]
quota = options.get('quota')
if quota is not None:
quota = int(quota)
except ValueError:
raise CommandError("Invalid quota")
-
+
backend = connect_backend(db_module=BACKEND_DB_MODULE,
db_connection=BACKEND_DB_CONNECTION,
block_module=BACKEND_BLOCK_MODULE,
if quota is not None:
backend.update_account_policy(user, user, {'quota': quota})
else:
- self.stdout.write("Quota for %s: %s\n" % (user, backend.get_account_policy(user, user)['quota']))
+ self.stdout.write("Quota for %s: %s\n" % (
+ user, backend.get_account_policy(user, user)['quota']))
backend.close()
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from pithos.api.faults import (Fault, BadRequest, ItemNotFound)
from pithos.api.util import (put_object_headers, update_manifest_meta,
- validate_modification_preconditions, validate_matching_preconditions,
- object_data_response, api_method)
+ validate_modification_preconditions, validate_matching_preconditions,
+ object_data_response, api_method)
from pithos.api.short_url import decode_url
from pithos.api.settings import AUTHENTICATION_URL, AUTHENTICATION_USERS
else:
return method_not_allowed(request)
+
@api_method('HEAD', user_required=False)
def public_meta(request, v_public):
# Normal Response Codes: 204
# Error Response Codes: internalServerError (500),
# itemNotFound (404),
# badRequest (400)
-
+
try:
- v_account, v_container, v_object = request.backend.get_public(request.user_uniq,
- decode_url(v_public))
+ v_account, v_container, v_object = request.backend.get_public(
+ request.user_uniq,
+ decode_url(v_public))
meta = request.backend.get_object_meta(request.user_uniq, v_account,
- v_container, v_object, 'pithos')
- public = request.backend.get_object_public(request.user_uniq, v_account,
- v_container, v_object)
+ v_container, v_object, 'pithos')
+ public = request.backend.get_object_public(
+ request.user_uniq, v_account,
+ v_container, v_object)
except:
raise ItemNotFound('Object does not exist')
-
+
if not public:
raise ItemNotFound('Object does not exist')
update_manifest_meta(request, v_account, meta)
-
+
response = HttpResponse(status=200)
put_object_headers(response, meta, True)
return response
+
@api_method('GET', user_required=False)
def public_read(request, v_public):
# Normal Response Codes: 200, 206
# itemNotFound (404),
# badRequest (400),
# notModified (304)
-
try:
- v_account, v_container, v_object = request.backend.get_public(request.user_uniq,
- decode_url(v_public))
+ v_account, v_container, v_object = request.backend.get_public(
+ request.user_uniq,
+ decode_url(v_public))
meta = request.backend.get_object_meta(request.user_uniq, v_account,
- v_container, v_object, 'pithos')
- public = request.backend.get_object_public(request.user_uniq, v_account,
- v_container, v_object)
+ v_container, v_object, 'pithos')
+ public = request.backend.get_object_public(
+ request.user_uniq, v_account,
+ v_container, v_object)
except:
raise ItemNotFound('Object does not exist')
-
+
if not public:
raise ItemNotFound('Object does not exist')
update_manifest_meta(request, v_account, meta)
-
+
# Evaluate conditions.
validate_modification_preconditions(request, meta)
try:
response = HttpResponse(status=304)
response['ETag'] = meta['ETag']
return response
-
+
sizes = []
hashmaps = []
if 'X-Object-Manifest' in meta:
try:
- src_container, src_name = split_container_object_string('/' + meta['X-Object-Manifest'])
- objects = request.backend.list_objects(request.user_uniq, v_account,
- src_container, prefix=src_name, virtual=False)
+ src_container, src_name = split_container_object_string(
+ '/' + meta['X-Object-Manifest'])
+ objects = request.backend.list_objects(
+ request.user_uniq, v_account,
+ src_container, prefix=src_name, virtual=False)
except:
raise ItemNotFound('Object does not exist')
-
+
try:
for x in objects:
s, h = request.backend.get_object_hashmap(request.user_uniq,
- v_account, src_container, x[0], x[1])
+ v_account, src_container, x[0], x[1])
sizes.append(s)
hashmaps.append(h)
except:
raise ItemNotFound('Object does not exist')
else:
try:
- s, h = request.backend.get_object_hashmap(request.user_uniq, v_account,
- v_container, v_object)
+ s, h = request.backend.get_object_hashmap(
+ request.user_uniq, v_account,
+ v_container, v_object)
sizes.append(s)
hashmaps.append(h)
except:
raise ItemNotFound('Object does not exist')
-
+
if 'Content-Disposition' not in meta:
name = v_object.rstrip('/').split('/')[-1]
if not name:
name = v_public
meta['Content-Disposition'] = 'attachment; filename=%s' % (name,)
-
+
return object_data_response(request, sizes, hashmaps, meta, True)
+
@api_method(user_required=False)
def method_not_allowed(request, **v_args):
raise ItemNotFound('Object does not exist')
'0009': 'διογÎνης'
}
-AUTHENTICATION_URL = getattr(settings, 'PITHOS_AUTHENTICATION_URL', 'http://127.0.0.1:8000/im/authenticate')
+AUTHENTICATION_URL = getattr(settings, 'PITHOS_AUTHENTICATION_URL',
+ 'http://127.0.0.1:8000/im/authenticate')
AUTHENTICATION_USERS = getattr(settings, 'PITHOS_AUTHENTICATION_USERS', {})
COOKIE_NAME = getattr(settings, 'ASTAKOS_COOKIE_NAME', '_pithos2_a')
# SQLAlchemy (choose SQLite/MySQL/PostgreSQL).
-BACKEND_DB_MODULE = getattr(settings, 'PITHOS_BACKEND_DB_MODULE', 'pithos.backends.lib.sqlalchemy')
-BACKEND_DB_CONNECTION = getattr(settings, 'PITHOS_BACKEND_DB_CONNECTION', 'sqlite:////tmp/pithos-backend.db')
+BACKEND_DB_MODULE = getattr(
+ settings, 'PITHOS_BACKEND_DB_MODULE', 'pithos.backends.lib.sqlalchemy')
+BACKEND_DB_CONNECTION = getattr(settings, 'PITHOS_BACKEND_DB_CONNECTION',
+ 'sqlite:////tmp/pithos-backend.db')
# Block storage.
-BACKEND_BLOCK_MODULE = getattr(settings, 'PITHOS_BACKEND_BLOCK_MODULE', 'pithos.backends.lib.hashfiler')
-BACKEND_BLOCK_PATH = getattr(settings, 'PITHOS_BACKEND_BLOCK_PATH', '/tmp/pithos-data/')
+BACKEND_BLOCK_MODULE = getattr(
+ settings, 'PITHOS_BACKEND_BLOCK_MODULE', 'pithos.backends.lib.hashfiler')
+BACKEND_BLOCK_PATH = getattr(
+ settings, 'PITHOS_BACKEND_BLOCK_PATH', '/tmp/pithos-data/')
BACKEND_BLOCK_UMASK = getattr(settings, 'PITHOS_BACKEND_BLOCK_UMASK', 0o022)
# Queue for billing.
-BACKEND_QUEUE_MODULE = getattr(settings, 'PITHOS_BACKEND_QUEUE_MODULE', None) # Example: 'pithos.backends.lib.rabbitmq'
-BACKEND_QUEUE_CONNECTION = getattr(settings, 'PITHOS_BACKEND_QUEUE_CONNECTION', None) # Example: 'rabbitmq://guest:guest@localhost:5672/pithos'
+BACKEND_QUEUE_MODULE = getattr(settings, 'PITHOS_BACKEND_QUEUE_MODULE',
+ None) # Example: 'pithos.backends.lib.rabbitmq'
+BACKEND_QUEUE_CONNECTION = getattr(settings, 'PITHOS_BACKEND_QUEUE_CONNECTION', None) # Example: 'rabbitmq://guest:guest@localhost:5672/pithos'
# Default setting for new accounts.
-BACKEND_QUOTA = getattr(settings, 'PITHOS_BACKEND_QUOTA', 50 * 1024 * 1024 * 1024)
+BACKEND_QUOTA = getattr(
+ settings, 'PITHOS_BACKEND_QUOTA', 50 * 1024 * 1024 * 1024)
BACKEND_VERSIONING = getattr(settings, 'PITHOS_BACKEND_VERSIONING', 'auto')
# Update object checksums when using hashmaps.
# Copyright (C) 2009 by Michael Fogleman
-#
+#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
-#
+#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
Python implementation for generating Tiny URL- and bit.ly-like URLs.
-A bit-shuffling approach is used to avoid generating consecutive, predictable
-URLs. However, the algorithm is deterministic and will guarantee that no
+A bit-shuffling approach is used to avoid generating consecutive, predictable
+URLs. However, the algorithm is deterministic and will guarantee that no
collisions will occur.
-The URL alphabet is fully customizable and may contain any number of
-characters. By default, digits and lower-case letters are used, with
-some removed to avoid confusion between characters like o, O and 0. The
-default alphabet is shuffled and has a prime number of characters to further
+The URL alphabet is fully customizable and may contain any number of
+characters. By default, digits and lower-case letters are used, with
+some removed to avoid confusion between characters like o, O and 0. The
+default alphabet is shuffled and has a prime number of characters to further
improve the results of the algorithm.
-The block size specifies how many bits will be shuffled. The lower BLOCK_SIZE
+The block size specifies how many bits will be shuffled. The lower BLOCK_SIZE
bits are reversed. Any bits higher than BLOCK_SIZE will remain as is.
-BLOCK_SIZE of 0 will leave all bits unaffected and the algorithm will simply
+BLOCK_SIZE of 0 will leave all bits unaffected and the algorithm will simply
be converting your integer to a different base.
-The intended use is that incrementing, consecutive integers will be used as
-keys to generate the short URLs. For example, when creating a new URL, the
-unique integer ID assigned by a database could be used to generate the URL
-by using this module. Or a simple counter may be used. As long as the same
+The intended use is that incrementing, consecutive integers will be used as
+keys to generate the short URLs. For example, when creating a new URL, the
+unique integer ID assigned by a database could be used to generate the URL
+by using this module. Or a simple counter may be used. As long as the same
integer is not used twice, the same short URL will not be generated twice.
-The module supports both encoding and decoding of URLs. The min_length
+The module supports both encoding and decoding of URLs. The min_length
parameter allows you to pad the URL if you want it to be a specific length.
Sample Usage:
>>> print key
12
-Use the functions in the top-level of the module to use the default encoder.
-Otherwise, you may create your own UrlEncoder object and use its encode_url
+Use the functions in the top-level of the module to use the default encoder.
+Otherwise, you may create your own UrlEncoder object and use its encode_url
and decode_url methods.
Author: Michael Fogleman
DEFAULT_BLOCK_SIZE = 24
MIN_LENGTH = 5
+
class UrlEncoder(object):
def __init__(self, alphabet=DEFAULT_ALPHABET, block_size=DEFAULT_BLOCK_SIZE):
self.alphabet = alphabet
self.mask = (1 << block_size) - 1
self.mapping = range(block_size)
self.mapping.reverse()
+
def encode_url(self, n, min_length=MIN_LENGTH):
return self.enbase(self.encode(n), min_length)
+
def decode_url(self, n):
return self.decode(self.debase(n))
+
def encode(self, n):
return (n & ~self.mask) | self._encode(n & self.mask)
+
def _encode(self, n):
result = 0
for i, b in enumerate(self.mapping):
if n & (1 << i):
result |= (1 << b)
return result
+
def decode(self, n):
return (n & ~self.mask) | self._decode(n & self.mask)
+
def _decode(self, n):
result = 0
for i, b in enumerate(self.mapping):
if n & (1 << b):
result |= (1 << i)
return result
+
def enbase(self, x, min_length=MIN_LENGTH):
result = self._enbase(x)
padding = self.alphabet[0] * (min_length - len(result))
return '%s%s' % (padding, result)
+
def _enbase(self, x):
n = len(self.alphabet)
if x < n:
return self.alphabet[x]
return self._enbase(x / n) + self.alphabet[x % n]
+
def debase(self, x):
n = len(self.alphabet)
result = 0
DEFAULT_ENCODER = UrlEncoder()
+
def encode(n):
return DEFAULT_ENCODER.encode(n)
+
def decode(n):
return DEFAULT_ENCODER.decode(n)
+
def enbase(n, min_length=MIN_LENGTH):
return DEFAULT_ENCODER.enbase(n, min_length)
+
def debase(n):
return DEFAULT_ENCODER.debase(n)
+
def encode_url(n, min_length=MIN_LENGTH):
return DEFAULT_ENCODER.encode_url(n, min_length)
+
def decode_url(n):
return DEFAULT_ENCODER.decode_url(n)
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
register = template.Library()
+
@register.filter
-def get_type(value):
- return value.__class__.__name__
+def get_type(value):
+ return value.__class__.__name__
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
# TODO: This only works when in this order.
api_urlpatterns = patterns('pithos.api.functions',
- (r'^$', 'top_demux'),
- (r'^(?P<v_account>.+?)/(?P<v_container>.+?)/(?P<v_object>.+?)$', 'object_demux'),
- (r'^(?P<v_account>.+?)/(?P<v_container>.+?)/?$', 'container_demux'),
- (r'^(?P<v_account>.+?)/?$', 'account_demux')
-)
+ (r'^$', 'top_demux'),
+ (
+ r'^(?P<v_account>.+?)/(?P<v_container>.+?)/(?P<v_object>.+?)$',
+ 'object_demux'),
+ (r'^(?P<v_account>.+?)/(?P<v_container>.+?)/?$',
+ 'container_demux'),
+ (r'^(?P<v_account>.+?)/?$', 'account_demux')
+ )
urlpatterns = patterns('',
- (r'^v1(?:$|/)', include(api_urlpatterns)),
- (r'^v1\.0(?:$|/)', include(api_urlpatterns)),
- (r'^public/(?P<v_public>.+?)/?$', 'pithos.api.public.public_demux'),
- (r'^login/?$', 'pithos.api.delegate.delegate_to_login_service'),
- (r'^feedback/?$', 'pithos.api.delegate.delegate_to_feedback_service'),
-)
+ (r'^v1(?:$|/)', include(api_urlpatterns)),
+ (r'^v1\.0(?:$|/)', include(api_urlpatterns)),
+ (r'^public/(?P<v_public>.+?)/?$',
+ 'pithos.api.public.public_demux'),
+ (r'^login/?$',
+ 'pithos.api.delegate.delegate_to_login_service'),
+ (r'^feedback/?$',
+ 'pithos.api.delegate.delegate_to_feedback_service'),
+ )
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from synnefo.lib.parsedate import parse_http_date_safe, parse_http_date
from synnefo.lib.astakos import get_user
-from pithos.api.faults import (Fault, NotModified, BadRequest, Unauthorized, Forbidden, ItemNotFound,
- Conflict, LengthRequired, PreconditionFailed, RequestEntityTooLarge,
- RangeNotSatisfiable, InternalServerError, NotImplemented)
+from pithos.api.faults import (
+ Fault, NotModified, BadRequest, Unauthorized, Forbidden, ItemNotFound,
+ Conflict, LengthRequired, PreconditionFailed, RequestEntityTooLarge,
+ RangeNotSatisfiable, InternalServerError, NotImplemented)
from pithos.api.short_url import encode_url
from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION,
- BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH,
- BACKEND_BLOCK_UMASK,
- BACKEND_QUEUE_MODULE, BACKEND_QUEUE_CONNECTION,
- BACKEND_QUOTA, BACKEND_VERSIONING,
- AUTHENTICATION_URL, AUTHENTICATION_USERS,
- SERVICE_TOKEN, COOKIE_NAME)
+ BACKEND_BLOCK_MODULE, BACKEND_BLOCK_PATH,
+ BACKEND_BLOCK_UMASK,
+ BACKEND_QUEUE_MODULE, BACKEND_QUEUE_CONNECTION,
+ BACKEND_QUOTA, BACKEND_VERSIONING,
+ AUTHENTICATION_URL, AUTHENTICATION_USERS,
+ SERVICE_TOKEN, COOKIE_NAME)
from pithos.backends import connect_backend
from pithos.backends.base import NotAllowedError, QuotaError, ItemNotExists, VersionNotExists
class UTC(tzinfo):
- def utcoffset(self, dt):
- return timedelta(0)
+ def utcoffset(self, dt):
+ return timedelta(0)
- def tzname(self, dt):
- return 'UTC'
+ def tzname(self, dt):
+ return 'UTC'
+
+ def dst(self, dt):
+ return timedelta(0)
- def dst(self, dt):
- return timedelta(0)
def json_encode_decimal(obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
raise TypeError(repr(obj) + " is not JSON serializable")
+
def isoformat(d):
- """Return an ISO8601 date string that includes a timezone."""
+ """Return an ISO8601 date string that includes a timezone."""
+
+ return d.replace(tzinfo=UTC()).isoformat()
- return d.replace(tzinfo=UTC()).isoformat()
def rename_meta_key(d, old, new):
if old not in d:
d[new] = d[old]
del(d[old])
+
def printable_header_dict(d):
"""Format a meta dictionary for printing out json/xml.
-
+
Convert all keys to lower case and replace dashes with underscores.
Format 'last_modified' timestamp.
"""
-
+
if 'last_modified' in d and d['last_modified']:
- d['last_modified'] = isoformat(datetime.fromtimestamp(d['last_modified']))
+ d['last_modified'] = isoformat(
+ datetime.fromtimestamp(d['last_modified']))
return dict([(k.lower().replace('-', '_'), v) for k, v in d.iteritems()])
+
def format_header_key(k):
"""Convert underscores to dashes and capitalize intra-dash strings."""
return '-'.join([x.capitalize() for x in k.replace('_', '-').split('-')])
+
def get_header_prefix(request, prefix):
"""Get all prefix-* request headers in a dict. Reformat keys with format_header_key()."""
-
+
prefix = 'HTTP_' + prefix.upper().replace('-', '_')
# TODO: Document or remove '~' replacing.
return dict([(format_header_key(k[5:]), v.replace('~', '')) for k, v in request.META.iteritems() if k.startswith(prefix) and len(k) > len(prefix)])
+
def check_meta_headers(meta):
if len(meta) > 90:
raise BadRequest('Too many headers.')
if len(v) > 256:
raise BadRequest('Header value too large.')
+
def get_account_headers(request):
meta = get_header_prefix(request, 'X-Account-Meta-')
check_meta_headers(meta)
groups[n].remove('')
return meta, groups
+
def put_account_headers(response, meta, groups, policy):
if 'count' in meta:
response['X-Account-Container-Count'] = meta['count']
response['X-Account-Bytes-Used'] = meta['bytes']
response['Last-Modified'] = http_date(int(meta['modified']))
for k in [x for x in meta.keys() if x.startswith('X-Account-Meta-')]:
- response[smart_str(k, strings_only=True)] = smart_str(meta[k], strings_only=True)
+ response[smart_str(
+ k, strings_only=True)] = smart_str(meta[k], strings_only=True)
if 'until_timestamp' in meta:
- response['X-Account-Until-Timestamp'] = http_date(int(meta['until_timestamp']))
+ response['X-Account-Until-Timestamp'] = http_date(
+ int(meta['until_timestamp']))
for k, v in groups.iteritems():
k = smart_str(k, strings_only=True)
k = format_header_key('X-Account-Group-' + k)
for k, v in policy.iteritems():
response[smart_str(format_header_key('X-Account-Policy-' + k), strings_only=True)] = smart_str(v, strings_only=True)
+
def get_container_headers(request):
meta = get_header_prefix(request, 'X-Container-Meta-')
check_meta_headers(meta)
policy = dict([(k[19:].lower(), v.replace(' ', '')) for k, v in get_header_prefix(request, 'X-Container-Policy-').iteritems()])
return meta, policy
+
def put_container_headers(request, response, meta, policy):
if 'count' in meta:
response['X-Container-Object-Count'] = meta['count']
response['X-Container-Bytes-Used'] = meta['bytes']
response['Last-Modified'] = http_date(int(meta['modified']))
for k in [x for x in meta.keys() if x.startswith('X-Container-Meta-')]:
- response[smart_str(k, strings_only=True)] = smart_str(meta[k], strings_only=True)
- l = [smart_str(x, strings_only=True) for x in meta['object_meta'] if x.startswith('X-Object-Meta-')]
+ response[smart_str(
+ k, strings_only=True)] = smart_str(meta[k], strings_only=True)
+ l = [smart_str(x, strings_only=True) for x in meta['object_meta']
+ if x.startswith('X-Object-Meta-')]
response['X-Container-Object-Meta'] = ','.join([x[14:] for x in l])
response['X-Container-Block-Size'] = request.backend.block_size
response['X-Container-Block-Hash'] = request.backend.hash_algorithm
if 'until_timestamp' in meta:
- response['X-Container-Until-Timestamp'] = http_date(int(meta['until_timestamp']))
+ response['X-Container-Until-Timestamp'] = http_date(
+ int(meta['until_timestamp']))
for k, v in policy.iteritems():
response[smart_str(format_header_key('X-Container-Policy-' + k), strings_only=True)] = smart_str(v, strings_only=True)
+
def get_object_headers(request):
content_type = request.META.get('CONTENT_TYPE', None)
meta = get_header_prefix(request, 'X-Object-Meta-')
meta['X-Object-Manifest'] = request.META['HTTP_X_OBJECT_MANIFEST']
return content_type, meta, get_sharing(request), get_public(request)
+
def put_object_headers(response, meta, restricted=False):
response['ETag'] = meta['checksum']
response['Content-Length'] = meta['bytes']
if not restricted:
response['X-Object-Hash'] = meta['hash']
response['X-Object-UUID'] = meta['uuid']
- response['X-Object-Modified-By'] = smart_str(meta['modified_by'], strings_only=True)
+ response['X-Object-Modified-By'] = smart_str(
+ meta['modified_by'], strings_only=True)
response['X-Object-Version'] = meta['version']
- response['X-Object-Version-Timestamp'] = http_date(int(meta['version_timestamp']))
+ response['X-Object-Version-Timestamp'] = http_date(
+ int(meta['version_timestamp']))
for k in [x for x in meta.keys() if x.startswith('X-Object-Meta-')]:
- response[smart_str(k, strings_only=True)] = smart_str(meta[k], strings_only=True)
- for k in ('Content-Encoding', 'Content-Disposition', 'X-Object-Manifest',
- 'X-Object-Sharing', 'X-Object-Shared-By', 'X-Object-Allowed-To',
- 'X-Object-Public'):
+ response[smart_str(
+ k, strings_only=True)] = smart_str(meta[k], strings_only=True)
+ for k in (
+ 'Content-Encoding', 'Content-Disposition', 'X-Object-Manifest',
+ 'X-Object-Sharing', 'X-Object-Shared-By', 'X-Object-Allowed-To',
+ 'X-Object-Public'):
if k in meta:
response[k] = smart_str(meta[k], strings_only=True)
else:
if k in meta:
response[k] = smart_str(meta[k], strings_only=True)
+
def update_manifest_meta(request, v_account, meta):
"""Update metadata if the object has an X-Object-Manifest."""
-
+
if 'X-Object-Manifest' in meta:
etag = ''
bytes = 0
try:
- src_container, src_name = split_container_object_string('/' + meta['X-Object-Manifest'])
- objects = request.backend.list_objects(request.user_uniq, v_account,
- src_container, prefix=src_name, virtual=False)
+ src_container, src_name = split_container_object_string(
+ '/' + meta['X-Object-Manifest'])
+ objects = request.backend.list_objects(
+ request.user_uniq, v_account,
+ src_container, prefix=src_name, virtual=False)
for x in objects:
src_meta = request.backend.get_object_meta(request.user_uniq,
- v_account, src_container, x[0], 'pithos', x[1])
+ v_account, src_container, x[0], 'pithos', x[1])
etag += src_meta['checksum']
bytes += src_meta['bytes']
except:
md5.update(etag)
meta['checksum'] = md5.hexdigest().lower()
+
def update_sharing_meta(request, permissions, v_account, v_container, v_object, meta):
if permissions is None:
return
if request.user_uniq != v_account:
meta['X-Object-Allowed-To'] = allowed
+
def update_public_meta(public, meta):
if not public:
return
meta['X-Object-Public'] = '/public/' + encode_url(public)
+
def validate_modification_preconditions(request, meta):
"""Check that the modified timestamp conforms with the preconditions set."""
-
+
if 'modified' not in meta:
- return # TODO: Always return?
-
+ return # TODO: Always return?
+
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None and int(meta['modified']) <= if_modified_since:
raise NotModified('Resource has not been modified')
-
+
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
if if_unmodified_since is not None:
if_unmodified_since = parse_http_date_safe(if_unmodified_since)
if if_unmodified_since is not None and int(meta['modified']) > if_unmodified_since:
raise PreconditionFailed('Resource has been modified')
+
def validate_matching_preconditions(request, meta):
"""Check that the ETag conforms with the preconditions set."""
-
+
etag = meta['checksum']
if not etag:
etag = None
-
+
if_match = request.META.get('HTTP_IF_MATCH')
if if_match is not None:
if etag is None:
raise PreconditionFailed('Resource does not exist')
if if_match != '*' and etag not in [x.lower() for x in parse_etags(if_match)]:
raise PreconditionFailed('Resource ETag does not match')
-
+
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match is not None:
# TODO: If this passes, must ignore If-Modified-Since header.
raise NotModified('Resource ETag matches')
raise PreconditionFailed('Resource exists or ETag matches')
+
def split_container_object_string(s):
if not len(s) > 0 or s[0] != '/':
raise ValueError
raise ValueError
return s[:pos], s[(pos + 1):]
+
def copy_or_move_object(request, src_account, src_container, src_name, dest_account, dest_container, dest_name, move=False, delimiter=None):
"""Copy or move an object."""
-
+
if 'ignore_content_type' in request.GET and 'CONTENT_TYPE' in request.META:
del(request.META['CONTENT_TYPE'])
content_type, meta, permissions, public = get_object_headers(request)
src_version = request.META.get('HTTP_X_SOURCE_VERSION')
try:
if move:
- version_id = request.backend.move_object(request.user_uniq, src_account, src_container, src_name,
- dest_account, dest_container, dest_name,
- content_type, 'pithos', meta, False, permissions, delimiter)
+ version_id = request.backend.move_object(
+ request.user_uniq, src_account, src_container, src_name,
+ dest_account, dest_container, dest_name,
+ content_type, 'pithos', meta, False, permissions, delimiter)
else:
- version_id = request.backend.copy_object(request.user_uniq, src_account, src_container, src_name,
- dest_account, dest_container, dest_name,
- content_type, 'pithos', meta, False, permissions, src_version, delimiter)
+ version_id = request.backend.copy_object(
+ request.user_uniq, src_account, src_container, src_name,
+ dest_account, dest_container, dest_name,
+ content_type, 'pithos', meta, False, permissions, src_version, delimiter)
except NotAllowedError:
raise Forbidden('Not allowed')
except (ItemNotExists, VersionNotExists):
raise ItemNotFound('Object does not exist')
return version_id
+
def get_int_parameter(p):
if p is not None:
try:
return None
return p
+
def get_content_length(request):
content_length = get_int_parameter(request.META.get('CONTENT_LENGTH'))
if content_length is None:
raise LengthRequired('Missing or invalid Content-Length header')
return content_length
+
def get_range(request, size):
"""Parse a Range header from the request.
-
+
Either returns None, when the header is not existent or should be ignored,
or a list of (offset, length) tuples - should be further checked.
"""
-
+
ranges = request.META.get('HTTP_RANGE', '').replace(' ', '')
if not ranges.startswith('bytes='):
return None
-
+
ret = []
for r in (x.strip() for x in ranges[6:].split(',')):
p = re.compile('^(?P<offset>\d*)-(?P<upto>\d*)$')
upto = m.group('upto')
if offset == '' and upto == '':
return None
-
+
if offset != '':
offset = int(offset)
if upto != '':
else:
length = int(upto)
ret.append((size - length, length))
-
+
return ret
+
def get_content_range(request):
"""Parse a Content-Range header from the request.
-
+
Either returns None, when the header is not existent or should be ignored,
or an (offset, length, total) tuple - check as length, total may be None.
Returns (None, None, None) if the provided range is '*/*'.
"""
-
+
ranges = request.META.get('HTTP_CONTENT_RANGE', '')
if not ranges:
return None
-
+
p = re.compile('^bytes (?P<offset>\d+)-(?P<upto>\d*)/(?P<total>(\d+|\*))$')
m = p.match(ranges)
if not m:
total = None
if (upto is not None and offset > upto) or \
(total is not None and offset >= total) or \
- (total is not None and upto is not None and upto >= total):
+ (total is not None and upto is not None and upto >= total):
return None
-
+
if upto is None:
length = None
else:
length = upto - offset + 1
return (offset, length, total)
+
def get_sharing(request):
"""Parse an X-Object-Sharing header from the request.
-
+
Raises BadRequest on error.
"""
-
+
permissions = request.META.get('HTTP_X_OBJECT_SHARING')
if permissions is None:
return None
-
+
# TODO: Document or remove '~' replacing.
permissions = permissions.replace('~', '')
-
+
ret = {}
permissions = permissions.replace(' ', '')
if permissions == '':
return ret
for perm in (x for x in permissions.split(';')):
if perm.startswith('read='):
- ret['read'] = list(set([v.replace(' ','').lower() for v in perm[5:].split(',')]))
+ ret['read'] = list(set(
+ [v.replace(' ', '').lower() for v in perm[5:].split(',')]))
if '' in ret['read']:
ret['read'].remove('')
if '*' in ret['read']:
if len(ret['read']) == 0:
raise BadRequest('Bad X-Object-Sharing header value')
elif perm.startswith('write='):
- ret['write'] = list(set([v.replace(' ','').lower() for v in perm[6:].split(',')]))
+ ret['write'] = list(set(
+ [v.replace(' ', '').lower() for v in perm[6:].split(',')]))
if '' in ret['write']:
ret['write'].remove('')
if '*' in ret['write']:
raise BadRequest('Bad X-Object-Sharing header value')
else:
raise BadRequest('Bad X-Object-Sharing header value')
-
+
# Keep duplicates only in write list.
- dups = [x for x in ret.get('read', []) if x in ret.get('write', []) and x != '*']
+ dups = [x for x in ret.get(
+ 'read', []) if x in ret.get('write', []) and x != '*']
if dups:
for x in dups:
ret['read'].remove(x)
if len(ret['read']) == 0:
del(ret['read'])
-
+
return ret
+
def get_public(request):
"""Parse an X-Object-Public header from the request.
-
+
Raises BadRequest on error.
"""
-
+
public = request.META.get('HTTP_X_OBJECT_PUBLIC')
if public is None:
return None
-
+
public = public.replace(' ', '').lower()
if public == 'true':
return True
return False
raise BadRequest('Bad X-Object-Public header value')
+
def raw_input_socket(request):
"""Return the socket for reading the rest of the request."""
-
+
server_software = request.META.get('SERVER_SOFTWARE')
if server_software and server_software.startswith('mod_python'):
return request._req
return request.environ['wsgi.input']
raise NotImplemented('Unknown server software')
-MAX_UPLOAD_SIZE = 5 * (1024 * 1024 * 1024) # 5GB
+MAX_UPLOAD_SIZE = 5 * (1024 * 1024 * 1024) # 5GB
+
def socket_read_iterator(request, length=0, blocksize=4096):
"""Return a maximum of blocksize data read from the socket in each iteration.
-
+
Read up to 'length'. If 'length' is negative, will attempt a chunked read.
The maximum ammount of data read is controlled by MAX_UPLOAD_SIZE.
"""
-
+
sock = raw_input_socket(request)
- if length < 0: # Chunked transfers
+ if length < 0: # Chunked transfers
# Small version (server does the dechunking).
if request.environ.get('mod_wsgi.input_chunked', None) or request.META['SERVER_SOFTWARE'].startswith('gunicorn'):
while length < MAX_UPLOAD_SIZE:
return
yield data
raise BadRequest('Maximum size is reached')
-
+
# Long version (do the dechunking).
data = ''
while length < MAX_UPLOAD_SIZE:
try:
chunk_length = int(chunk_length, 16)
except Exception, e:
- raise BadRequest('Bad chunk size') # TODO: Change to something more appropriate.
+ raise BadRequest('Bad chunk size')
+ # TODO: Change to something more appropriate.
# Check if done.
if chunk_length == 0:
if len(data) > 0:
ret = data[:blocksize]
data = data[blocksize:]
yield ret
- sock.read(2) # CRLF
+ sock.read(2) # CRLF
raise BadRequest('Maximum size is reached')
else:
if length > MAX_UPLOAD_SIZE:
length -= len(data)
yield data
+
class SaveToBackendHandler(FileUploadHandler):
"""Handle a file from an HTML form the django way."""
-
+
def __init__(self, request=None):
super(SaveToBackendHandler, self).__init__(request)
self.backend = request.backend
-
+
def put_data(self, length):
if len(self.data) >= length:
block = self.data[:length]
self.file.hashmap.append(self.backend.put_block(block))
self.md5.update(block)
self.data = self.data[length:]
-
+
def new_file(self, field_name, file_name, content_type, content_length, charset=None):
- self.md5 = hashlib.md5()
+ self.md5 = hashlib.md5()
self.data = ''
- self.file = UploadedFile(name=file_name, content_type=content_type, charset=charset)
+ self.file = UploadedFile(
+ name=file_name, content_type=content_type, charset=charset)
self.file.size = 0
self.file.hashmap = []
-
+
def receive_data_chunk(self, raw_data, start):
self.data += raw_data
self.file.size += len(raw_data)
self.put_data(self.request.backend.block_size)
return None
-
+
def file_complete(self, file_size):
l = len(self.data)
if l > 0:
self.file.etag = self.md5.hexdigest().lower()
return self.file
+
class ObjectWrapper(object):
"""Return the object's data block-per-block in each iteration.
-
+
Read from the object using the offset and length provided in each entry of the range list.
"""
-
+
def __init__(self, backend, ranges, sizes, hashmaps, boundary):
self.backend = backend
self.ranges = ranges
self.hashmaps = hashmaps
self.boundary = boundary
self.size = sum(self.sizes)
-
+
self.file_index = 0
self.block_index = 0
self.block_hash = -1
self.block = ''
-
+
self.range_index = -1
self.offset, self.length = self.ranges[0]
-
+
def __iter__(self):
return self
-
+
def part_iterator(self):
if self.length > 0:
# Get the file for the current offset.
self.offset -= file_size
self.file_index += 1
file_size = self.sizes[self.file_index]
-
+
# Get the block for the current position.
self.block_index = int(self.offset / self.backend.block_size)
if self.block_hash != self.hashmaps[self.file_index][self.block_index]:
- self.block_hash = self.hashmaps[self.file_index][self.block_index]
+ self.block_hash = self.hashmaps[
+ self.file_index][self.block_index]
try:
self.block = self.backend.get_block(self.block_hash)
except ItemNotExists:
raise ItemNotFound('Block does not exist')
-
+
# Get the data from the block.
bo = self.offset % self.backend.block_size
bs = self.backend.block_size
if (self.block_index == len(self.hashmaps[self.file_index]) - 1 and
- self.sizes[self.file_index] % self.backend.block_size):
+ self.sizes[self.file_index] % self.backend.block_size):
bs = self.sizes[self.file_index] % self.backend.block_size
bl = min(self.length, bs - bo)
data = self.block[bo:bo + bl]
return data
else:
raise StopIteration
-
+
def next(self):
if len(self.ranges) == 1:
return self.part_iterator()
if self.range_index > 0:
out.append('')
out.append('--' + self.boundary)
- out.append('Content-Range: bytes %d-%d/%d' % (self.offset, self.offset + self.length - 1, self.size))
+ out.append('Content-Range: bytes %d-%d/%d' % (
+ self.offset, self.offset + self.length - 1, self.size))
out.append('Content-Transfer-Encoding: binary')
out.append('')
out.append('')
out.append('')
return '\r\n'.join(out)
+
def object_data_response(request, sizes, hashmaps, meta, public=False):
"""Get the HttpResponse object for replying with the object's data."""
-
+
# Range handling.
size = sum(sizes)
ranges = get_range(request, size)
ret = 200
else:
check = [True for offset, length in ranges if
- length <= 0 or length > size or
- offset < 0 or offset >= size or
- offset + length > size]
+ length <= 0 or length > size or
+ offset < 0 or offset >= size or
+ offset + length > size]
if len(check) > 0:
raise RangeNotSatisfiable('Requested range exceeds object limits')
ret = 206
if if_range != meta['checksum']:
ranges = [(0, size)]
ret = 200
-
+
if ret == 206 and len(ranges) > 1:
boundary = uuid.uuid4().hex
else:
if ret == 206:
if len(ranges) == 1:
offset, length = ranges[0]
- response['Content-Length'] = length # Update with the correct length.
- response['Content-Range'] = 'bytes %d-%d/%d' % (offset, offset + length - 1, size)
+ response[
+ 'Content-Length'] = length # Update with the correct length.
+ response['Content-Range'] = 'bytes %d-%d/%d' % (
+ offset, offset + length - 1, size)
else:
del(response['Content-Length'])
- response['Content-Type'] = 'multipart/byteranges; boundary=%s' % (boundary,)
+ response['Content-Type'] = 'multipart/byteranges; boundary=%s' % (
+ boundary,)
return response
+
def put_object_block(request, hashmap, data, offset):
"""Put one block of data at the given offset."""
-
+
bi = int(offset / request.backend.block_size)
bo = offset % request.backend.block_size
bl = min(len(data), request.backend.block_size - bo)
hashmap[bi] = request.backend.update_block(hashmap[bi], data[:bl], bo)
else:
hashmap.append(request.backend.put_block(('\x00' * bo) + data[:bl]))
- return bl # Return ammount of data written.
+ return bl # Return ammount of data written.
+
def hashmap_md5(backend, hashmap, size):
"""Produce the MD5 sum from the data in the hashmap."""
-
+
# TODO: Search backend for the MD5 of another object with the same hashmap and size...
md5 = hashlib.md5()
bs = backend.block_size
for bi, hash in enumerate(hashmap):
- data = backend.get_block(hash) # Blocks come in padded.
+ data = backend.get_block(hash) # Blocks come in padded.
if bi == len(hashmap) - 1:
data = data[:size % bs]
md5.update(data)
return md5.hexdigest().lower()
+
def simple_list_response(request, l):
if request.serialization == 'text':
return '\n'.join(l) + '\n'
if request.serialization == 'json':
return json.dumps(l)
+
def get_backend():
backend = connect_backend(db_module=BACKEND_DB_MODULE,
db_connection=BACKEND_DB_CONNECTION,
backend.default_policy['versioning'] = BACKEND_VERSIONING
return backend
+
def update_request_headers(request):
# Handle URL-encoded keys and values.
- meta = dict([(k, v) for k, v in request.META.iteritems() if k.startswith('HTTP_')])
+ meta = dict([(
+ k, v) for k, v in request.META.iteritems() if k.startswith('HTTP_')])
for k, v in meta.iteritems():
try:
k.decode('ascii')
raise BadRequest('Bad character in headers.')
if '%' in k or '%' in v:
del(request.META[k])
- request.META[unquote(k)] = smart_unicode(unquote(v), strings_only=True)
+ request.META[unquote(k)] = smart_unicode(unquote(
+ v), strings_only=True)
+
def update_response_headers(request, response):
if request.serialization == 'xml':
response['Content-Type'] = 'application/json; charset=UTF-8'
elif not response['Content-Type']:
response['Content-Type'] = 'text/plain; charset=UTF-8'
-
+
if (not response.has_header('Content-Length') and
not (response.has_header('Content-Type') and
response['Content-Type'].startswith('multipart/byteranges'))):
response['Content-Length'] = len(response.content)
-
+
# URL-encode unicode in headers.
meta = response.items()
for k, v in meta:
if (k.startswith('X-Account-') or k.startswith('X-Container-') or
- k.startswith('X-Object-') or k.startswith('Content-')):
+ k.startswith('X-Object-') or k.startswith('Content-')):
del(response[k])
response[quote(k)] = quote(v, safe='/=,:@; ')
+
def render_fault(request, fault):
if isinstance(fault, InternalServerError) and settings.DEBUG:
fault.details = format_exc(fault)
-
+
request.serialization = 'text'
data = fault.message + '\n'
if fault.details:
update_response_headers(request, response)
return response
+
def request_serialization(request, format_allowed=False):
"""Return the serialization format requested.
-
+
Valid formats are 'text' and 'json', 'xml' if 'format_allowed' is True.
"""
-
+
if not format_allowed:
return 'text'
-
+
format = request.GET.get('format')
if format == 'json':
return 'json'
elif format == 'xml':
return 'xml'
-
+
for item in request.META.get('HTTP_ACCEPT', '').split(','):
accept, sep, rest = item.strip().partition(';')
if accept == 'application/json':
return 'json'
elif accept == 'application/xml' or accept == 'text/xml':
return 'xml'
-
+
return 'text'
+
def api_method(http_method=None, format_allowed=False, user_required=True):
"""Decorator function for views that implement an API method."""
-
+
def decorator(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
if http_method and request.method != http_method:
raise BadRequest('Method not allowed.')
-
+
if user_required:
token = None
if request.method in ('HEAD', 'GET') and COOKIE_NAME in request.COOKIES:
- cookie_value = unquote(request.COOKIES.get(COOKIE_NAME, ''))
+ cookie_value = unquote(
+ request.COOKIES.get(COOKIE_NAME, ''))
if cookie_value and '|' in cookie_value:
token = cookie_value.split('|', 1)[1]
- get_user(request, AUTHENTICATION_URL, AUTHENTICATION_USERS, token)
+ get_user(request,
+ AUTHENTICATION_URL, AUTHENTICATION_USERS, token)
if getattr(request, 'user', None) is None:
raise Unauthorized('Access denied')
-
+
# The args variable may contain up to (account, container, object).
if len(args) > 1 and len(args[1]) > 256:
raise BadRequest('Container name too large.')
if len(args) > 2 and len(args[2]) > 1024:
raise BadRequest('Object name too large.')
-
+
# Format and check headers.
update_request_headers(request)
-
+
# Fill in custom request variables.
- request.serialization = request_serialization(request, format_allowed)
+ request.serialization = request_serialization(
+ request, format_allowed)
request.backend = get_backend()
-
+
response = func(request, *args, **kwargs)
update_response_headers(request, response)
return response
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
+
+
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
- show_ignored=False):
+ show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
- or fn.lower() == pattern.lower()):
+ or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
- and not prefix):
+ and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
- stack.append((fn, prefix + name + "/", package, only_in_packages))
+ stack.append(
+ (fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
- or fn.lower() == pattern.lower()):
+ or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
break
if bad_name:
continue
- out.setdefault(package, []).append(prefix+name)
+ out.setdefault(package, []).append(prefix + name)
return out
setup(
- name = 'snf-pithos-app',
- version = VERSION,
- license = 'BSD',
- url = 'http://code.grnet.gr/',
- description = SHORT_DESCRIPTION,
- long_description=README + '\n\n' + CHANGES,
- classifiers = CLASSIFIERS,
-
- author = 'Package author',
- author_email = 'author@grnet.gr',
- maintainer = 'Package maintainer',
- maintainer_email = 'maintainer@grnet.gr',
-
- namespace_packages = ['pithos'],
- packages = PACKAGES,
- package_dir= {'': PACKAGES_ROOT},
- include_package_data = True,
- package_data = find_package_data('.'),
- zip_safe = False,
-
- dependency_links = [
+ name='snf-pithos-app',
+ version=VERSION,
+ license='BSD',
+ url='http://code.grnet.gr/',
+ description=SHORT_DESCRIPTION,
+ long_description=README + '\n\n' + CHANGES,
+ classifiers=CLASSIFIERS,
+
+ author='Package author',
+ author_email='author@grnet.gr',
+ maintainer='Package maintainer',
+ maintainer_email='maintainer@grnet.gr',
+
+ namespace_packages=['pithos'],
+ packages=PACKAGES,
+ package_dir={'': PACKAGES_ROOT},
+ include_package_data=True,
+ package_data=find_package_data('.'),
+ zip_safe=False,
+
+ dependency_links=[
'http://docs.dev.grnet.gr/pypi/'],
- install_requires = INSTALL_REQUIRES,
- extras_require = EXTRAS_REQUIRES,
- tests_require = TESTS_REQUIRES,
-
- entry_points = {
- 'console_scripts': [
- ],
- 'synnefo': [
- 'default_settings = pithos.api.synnefo_settings',
- 'web_apps = pithos.api.synnefo_settings:synnefo_installed_apps',
- 'web_middleware = pithos.api.synnefo_settings:synnefo_middlewares',
- 'urls = pithos.api.urls:urlpatterns',
- 'loggers = pithos.api.synnefo_settings:loggers'
- ]
- },
+ install_requires=INSTALL_REQUIRES,
+ extras_require=EXTRAS_REQUIRES,
+ tests_require=TESTS_REQUIRES,
+
+ entry_points={
+ 'console_scripts': [
+ ],
+ 'synnefo': [
+ 'default_settings = pithos.api.synnefo_settings',
+ 'web_apps = pithos.api.synnefo_settings:synnefo_installed_apps',
+ 'web_middleware = pithos.api.synnefo_settings:synnefo_middlewares',
+ 'urls = pithos.api.urls:urlpatterns',
+ 'loggers = pithos.api.synnefo_settings:loggers'
+ ]
+ },
)
-
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
- pkg_resources.require("distribute>="+version)
+ pkg_resources.require("distribute>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
- "The required version of distribute (>=%s) is not available,\n"
- "and can't be installed while this script is running. Please\n"
- "install a more recent version first, using\n"
- "'easy_install -U distribute'."
- "\n\n(Currently using %r)\n" % (version, e.args[0]))
+ "The required version of distribute (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U distribute'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
+
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
dst.close()
return os.path.realpath(saveto)
+
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
return __no_sandbox
+
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
_patch_file = _no_sandbox(_patch_file)
+
def _same_content(path, content):
return open(path).read() == content
+
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
+
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
+
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
- (SETUPTOOLS_FAKED_VERSION, pyver)
+ (SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
finally:
f.close()
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+_create_fake_setuptools_pkg_info = _no_sandbox(
+ _create_fake_setuptools_pkg_info)
+
def _patch_egg_dir(path):
# let's check if it's already patched
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
- args = sys.argv[sys.argv.index('install')+1:]
+ args = sys.argv[sys.argv.index('install') + 1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
- top_dir = args[index+1]
+ top_dir = args[index + 1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
replacement=False))
except TypeError:
# old distribute API
- setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+ setuptools_dist = ws.find(
+ pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
- _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+ _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 448 # decimal for oct 0700
+ tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
# or implied, of GRNET S.A.
# Default setting for new accounts.
-DEFAULT_QUOTA = 0 # No quota.
+DEFAULT_QUOTA = 0 # No quota.
DEFAULT_VERSIONING = 'auto'
from synnefo.lib.singleton import ArgBasedSingleton
+
class NotAllowedError(Exception):
pass
+
class QuotaError(Exception):
pass
+
class AccountExists(NameError):
pass
-
+
+
class ContainerExists(NameError):
pass
+
class AccountNotEmpty(IndexError):
pass
+
class ContainerNotEmpty(IndexError):
pass
+
class ItemNotExists(NameError):
pass
+
class VersionNotExists(IndexError):
pass
-class BaseBackend(ArgBasedSingleton):
+
+class BaseBackend(object):
"""Abstract backend class that serves as a reference for actual implementations.
-
+
The purpose of the backend is to provide the necessary functions for handling data
and metadata. It is responsible for the actual storage and retrieval of information.
-
+
Note that the account level is always valid as it is checked from another subsystem.
-
+
When not replacing metadata/groups/policy, keys with empty values should be deleted.
-
+
The following variables should be available:
'hash_algorithm': Suggested is 'sha256'
-
+
'block_size': Suggested is 4MB
-
+
'default_policy': A dictionary with default policy settings
"""
-
+
def close(self):
"""Close the backend connection."""
pass
-
+
def list_accounts(self, user, marker=None, limit=10000):
"""Return a list of accounts the user can access.
-
+
Parameters:
'marker': Start list from the next item after 'marker'
-
+
'limit': Number of containers to return
"""
return []
-
+
def get_account_meta(self, user, account, domain, until=None, include_user_defined=True):
"""Return a dictionary with the account metadata for the domain.
-
+
The keys returned are all user-defined, except:
'name': The account name
-
+
'count': The number of containers (or 0)
-
+
'bytes': The total data size (or 0)
-
+
'modified': Last modification timestamp (overall)
-
+
'until_timestamp': Last modification until the timestamp provided
-
+
Raises:
NotAllowedError: Operation not permitted
"""
return {}
-
+
def update_account_meta(self, user, account, domain, meta, replace=False):
"""Update the metadata associated with the account for the domain.
-
+
Parameters:
'domain': Metadata domain
-
+
'meta': Dictionary with metadata to update
-
+
'replace': Replace instead of update
-
+
Raises:
NotAllowedError: Operation not permitted
"""
return
-
+
def get_account_groups(self, user, account):
"""Return a dictionary with the user groups defined for this account.
-
+
Raises:
NotAllowedError: Operation not permitted
"""
return {}
-
+
def update_account_groups(self, user, account, groups, replace=False):
"""Update the groups associated with the account.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ValueError: Invalid data in groups
"""
return
-
+
def get_account_policy(self, user, account):
"""Return a dictionary with the account policy.
-
+
The keys returned are:
'quota': The maximum bytes allowed (default is 0 - unlimited)
-
+
'versioning': Can be 'auto', 'manual' or 'none' (default is 'manual')
-
+
Raises:
NotAllowedError: Operation not permitted
"""
return {}
-
+
def update_account_policy(self, user, account, policy, replace=False):
"""Update the policy associated with the account.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ValueError: Invalid policy defined
"""
return
-
+
def put_account(self, user, account, policy={}):
"""Create a new account with the given name.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ValueError: Invalid policy defined
"""
return
-
+
def delete_account(self, user, account):
"""Delete the account with the given name.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
AccountNotEmpty: Account is not empty
"""
return
-
+
def list_containers(self, user, account, marker=None, limit=10000, shared=False, until=None, public=False):
"""Return a list of container names existing under an account.
-
+
Parameters:
'marker': Start list from the next item after 'marker'
-
+
'limit': Number of containers to return
-
+
'shared': Only list containers with permissions set
-
+
'public': Only list containers containing public objects
-
-
+
+
Raises:
NotAllowedError: Operation not permitted
"""
return []
-
+
def list_container_meta(self, user, account, container, domain, until=None):
"""Return a list with all the container's object meta keys for the domain.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
"""
return []
-
+
def get_container_meta(self, user, account, container, domain, until=None, include_user_defined=True):
"""Return a dictionary with the container metadata for the domain.
-
+
The keys returned are all user-defined, except:
'name': The container name
-
+
'count': The number of objects
-
+
'bytes': The total data size
-
+
'modified': Last modification timestamp (overall)
-
+
'until_timestamp': Last modification until the timestamp provided
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
"""
return {}
-
+
def update_container_meta(self, user, account, container, domain, meta, replace=False):
"""Update the metadata associated with the container for the domain.
-
+
Parameters:
'domain': Metadata domain
-
+
'meta': Dictionary with metadata to update
-
+
'replace': Replace instead of update
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
"""
return
-
+
def get_container_policy(self, user, account, container):
"""Return a dictionary with the container policy.
-
+
The keys returned are:
'quota': The maximum bytes allowed (default is 0 - unlimited)
-
+
'versioning': Can be 'auto', 'manual' or 'none' (default is 'manual')
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
"""
return {}
-
+
def update_container_policy(self, user, account, container, policy, replace=False):
"""Update the policy associated with the container.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
-
+
ValueError: Invalid policy defined
"""
return
-
+
def put_container(self, user, account, container, policy={}, delimiter=None):
"""Create a new container with the given name.
-
+
Parameters:
'delimiter': If present deletes container contents instead of the container
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ContainerExists: Container already exists
-
+
ValueError: Invalid policy defined
"""
return
-
+
def delete_container(self, user, account, container, until=None):
"""Delete/purge the container with the given name.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
-
+
ContainerNotEmpty: Container is not empty
"""
return
-
+
def list_objects(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None, public=False):
"""Return a list of object (name, version_id) tuples existing under a container.
-
+
Parameters:
'prefix': List objects starting with 'prefix'
-
+
'delimiter': Return unique names before 'delimiter' and after 'prefix'
-
+
'marker': Start list from the next item after 'marker'
-
+
'limit': Number of objects to return
-
+
'virtual': If not set, the result will only include names starting
with 'prefix' and ending without a 'delimiter' or with
the first occurance of the 'delimiter' after 'prefix'.
If set, the result will include all names after 'prefix',
up to and including the 'delimiter' if it is found
-
+
'domain': Metadata domain for keys
-
+
'keys': Include objects that satisfy the key queries in the list.
Use 'key', '!key' for existence queries, 'key op value' for
value queries, where 'op' can be one of =, !=, <=, >=, <, >
-
+
'shared': Only list objects with permissions set
-
+
'size_range': Include objects with byte size in (from, to).
Use None to specify unlimited
-
+
'public': Only list public objects
-
-
+
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
"""
return []
-
+
def list_object_meta(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None):
"""Return a list of object metadata dicts existing under a container.
-
+
Same parameters with list_objects. Returned dicts have no user-defined
metadata and, if until is not None, a None 'modified' timestamp.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
"""
return []
-
+
def list_object_permissions(self, user, account, container, prefix=''):
"""Return a list of paths that enforce permissions under a container.
-
+
Raises:
NotAllowedError: Operation not permitted
"""
return []
-
+
def list_object_public(self, user, account, container, prefix=''):
"""Return a dict mapping paths to public ids for objects that are public under a container."""
return {}
-
+
def get_object_meta(self, user, account, container, name, domain, version=None, include_user_defined=True):
"""Return a dictionary with the object metadata for the domain.
-
+
The keys returned are all user-defined, except:
'name': The object name
-
+
'bytes': The total data size
-
+
'type': The content type
-
+
'hash': The hashmap hash
-
+
'modified': Last modification timestamp (overall)
-
+
'modified_by': The user that committed the object (version requested)
-
+
'version': The version identifier
-
+
'version_timestamp': The version's modification timestamp
-
+
'uuid': A unique identifier that persists data or metadata updates and renames
-
+
'checksum': The MD5 sum of the object (may be empty)
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
-
+
VersionNotExists: Version does not exist
"""
return {}
-
+
def update_object_meta(self, user, account, container, name, domain, meta, replace=False):
"""Update the metadata associated with the object for the domain and return the new version.
-
+
Parameters:
'domain': Metadata domain
-
+
'meta': Dictionary with metadata to update
-
+
'replace': Replace instead of update
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
"""
return ''
-
+
def get_object_permissions(self, user, account, container, name):
"""Return the action allowed on the object, the path
from which the object gets its permissions from,
along with a dictionary containing the permissions.
-
+
The dictionary keys are (also used for defining the action):
'read': The object is readable by the users/groups in the list
-
+
'write': The object is writable by the users/groups in the list
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
"""
return {}
-
+
def update_object_permissions(self, user, account, container, name, permissions):
"""Update (set) the permissions associated with the object.
-
+
Parameters:
'permissions': Dictionary with permissions to set
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
-
+
ValueError: Invalid users/groups in permissions
"""
return
-
+
def get_object_public(self, user, account, container, name):
"""Return the public id of the object if applicable.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
"""
return None
-
+
def update_object_public(self, user, account, container, name, public):
"""Update the public status of the object.
-
+
Parameters:
'public': Boolean value
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
"""
return
-
+
def get_object_hashmap(self, user, account, container, name, version=None):
"""Return the object's size and a list with partial hashes.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
-
+
VersionNotExists: Version does not exist
"""
return 0, []
-
+
def update_object_hashmap(self, user, account, container, name, size, type, hashmap, checksum, domain, meta={}, replace_meta=False, permissions=None):
"""Create/update an object with the specified size and partial hashes and return the new version.
-
+
Parameters:
'domain': Metadata domain
-
+
'meta': Dictionary with metadata to change
-
+
'replace_meta': Replace metadata instead of update
-
+
'permissions': Updated object permissions
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container does not exist
-
+
ValueError: Invalid users/groups in permissions
-
+
QuotaError: Account or container quota exceeded
"""
return ''
-
+
def update_object_checksum(self, user, account, container, name, version, checksum):
"""Update an object's checksum."""
return
-
+
def copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None, src_version=None, delimiter=None):
"""Copy an object's data and metadata and return the new version.
-
+
Parameters:
'domain': Metadata domain
-
+
'meta': Dictionary with metadata to change from source to destination
-
+
'replace_meta': Replace metadata instead of update
-
+
'permissions': New object permissions
-
+
'src_version': Copy from the version provided
-
+
'delimiter': Copy objects whose path starts with src_name + delimiter
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
-
+
VersionNotExists: Version does not exist
-
+
ValueError: Invalid users/groups in permissions
-
+
QuotaError: Account or container quota exceeded
"""
return ''
-
+
def move_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None, delimiter=None):
"""Move an object's data and metadata and return the new version.
-
+
Parameters:
'domain': Metadata domain
-
+
'meta': Dictionary with metadata to change from source to destination
-
+
'replace_meta': Replace metadata instead of update
-
+
'permissions': New object permissions
-
+
'delimiter': Move objects whose path starts with src_name + delimiter
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
-
+
ValueError: Invalid users/groups in permissions
-
+
QuotaError: Account or container quota exceeded
"""
return ''
-
+
def delete_object(self, user, account, container, name, until=None, delimiter=None):
"""Delete/purge an object.
-
+
Parameters:
'delimiter': Delete objects whose path starting with name + delimiter
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
ItemNotExists: Container/object does not exist
"""
return
-
+
def list_versions(self, user, account, container, name):
"""Return a list of all (version, version_timestamp) tuples for an object.
-
+
Raises:
NotAllowedError: Operation not permitted
"""
return []
-
+
def get_uuid(self, user, uuid):
"""Return the (account, container, name) for the UUID given.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
NameError: UUID does not exist
"""
return None
-
+
def get_public(self, user, public):
"""Return the (account, container, name) for the public id given.
-
+
Raises:
NotAllowedError: Operation not permitted
-
+
NameError: Public id does not exist
"""
return None
-
+
def get_block(self, hash):
"""Return a block's data.
-
+
Raises:
ItemNotExists: Block does not exist
"""
return ''
-
+
def put_block(self, data):
"""Store a block and return the hash."""
return 0
-
+
def update_block(self, hash, data, offset=0):
"""Update a known block and return the hash.
-
+
Raises:
IndexError: Offset or data outside block limits
"""
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
import re
-_regexfilter = re.compile('(!?)\s*(\S+?)\s*(?:(=|!=|<=|>=|<|>)\s*(\S*?)\s*)?$', re.UNICODE)
+_regexfilter = re.compile(
+ '(!?)\s*(\S+?)\s*(?:(=|!=|<=|>=|<|>)\s*(\S*?)\s*)?$', re.UNICODE)
def parse_filters(terms):
opers.append((key, op, value))
elif not value:
included.append(key)
-
+
return included, excluded, opers
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from store import Store
__all__ = ["Store"]
-
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
if not exists(blockpath):
makedirs(blockpath)
else:
- raise ValueError("Variable blockpath '%s' is not a directory" % (blockpath,))
+ raise ValueError("Variable blockpath '%s' is not a directory" %
+ (blockpath,))
hashtype = params['hashtype']
try:
if not rbl:
break
for block in rbl.sync_read_chunks(blocksize, 1, 0):
- break # there should be just one block there
+ break # there should be just one block there
if not block:
break
append(self._pad(block))
block_hash = self.block_hash
hashlist = [block_hash(b) for b in blocklist]
mf = None
- missing = [i for i, h in enumerate(hashlist) if not self._check_rear_block(h)]
+ missing = [i for i, h in enumerate(hashlist)
+ if not self._check_rear_block(h)]
for i in missing:
with self._get_rear_block(hashlist[i], 1) as rbl:
- rbl.sync_write(blocklist[i]) #XXX: verify?
+ rbl.sync_write(blocklist[i]) # XXX: verify?
return hashlist, missing
block = self.block_retr((blkhash,))
if not block:
return None, None
-
+
block = block[0]
newblock = block[:offset] + data
if len(newblock) > blocksize:
sextend(sl)
lastsize = len(block)
- size = (len(hashlist) -1) * blocksize + lastsize if hashlist else 0
+ size = (len(hashlist) - 1) * blocksize + lastsize if hashlist else 0
return size, hashlist, storedlist
-
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
for chunk in chunks:
if padding:
if seek:
- seek(padding -1, SEEK_CUR)
+ seek(padding - 1, SEEK_CUR)
fwrite("\x00")
else:
fwrite(buffer(zeros(chunksize), 0, padding))
#if self.dirty:
# fsync(fdesc.fileno())
fdesc.close()
- return False # propagate exceptions
+ return False # propagate exceptions
def seek(self, offset, whence=SEEK_SET):
return self.fdesc.seek(offset, whence)
def sync_read_chunks(self, chunksize, nr, offset=0):
return file_sync_read_chunks(self.fdesc, chunksize, nr, offset)
-
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
"""Mapper.
Required constructor parameters: mappath, namelen.
"""
-
+
mappath = None
namelen = None
if not exists(mappath):
makedirs(mappath)
else:
- raise ValueError("Variable mappath '%s' is not a directory" % (mappath,))
+ raise ValueError(
+ "Variable mappath '%s' is not a directory" % (mappath,))
self.mappath = mappath
def _get_rear_map(self, maphash, create=0):
return
with self._get_rear_map(maphash, 1) as rmap:
rmap.sync_write_chunks(namelen, blkoff, hashes, None)
-
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from blocker import Blocker
from mapper import Mapper
+
class Store(object):
"""Store.
Required constructor parameters: path, block_size, hash_algorithm, umask.
"""
-
+
def __init__(self, **params):
umask = params['umask']
if umask is not None:
os.umask(umask)
-
+
path = params['path']
if path and not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise RuntimeError("Cannot open path '%s'" % (path,))
-
+
p = {'blocksize': params['block_size'],
'blockpath': os.path.join(path + '/blocks'),
'hashtype': params['hash_algorithm']}
p = {'mappath': os.path.join(path + '/maps'),
'namelen': self.blocker.hashlen}
self.mapper = Mapper(**p)
-
+
def map_get(self, name):
return self.mapper.map_retr(name)
-
+
def map_put(self, name, map):
self.mapper.map_stor(name, map)
-
+
def map_delete(self, name):
pass
-
+
def block_get(self, hash):
blocks = self.blocker.block_retr((hash,))
if not blocks:
return None
return blocks[0]
-
+
def block_put(self, data):
hashes, absent = self.blocker.block_stor((data,))
return hashes[0]
-
+
def block_update(self, hash, offset, data):
h, e = self.blocker.block_delta(hash, offset, data)
return h
-
+
def block_search(self, map):
return self.blocker.block_ping(map)
-
# Copyright 2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from queue import Queue
__all__ = ["Queue"]
-
# Copyright 2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
"""Queue.
Required constructor parameters: exchange, client_id.
"""
-
+
def __init__(self, **params):
exchange = params['exchange']
self.conn = exchange_connect(exchange)
self.client_id = params['client_id']
-
+
def send(self, message_key, user, instance, resource, value, details):
- body = Receipt(self.client_id, user, instance, resource, value, details).format()
+ body = Receipt(
+ self.client_id, user, instance, resource, value, details).format()
exchange_send(self.conn, message_key, body)
-
+
def close(self):
exchange_close(self.conn)
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
__all__ = ["DBWrapper",
"Node", "ROOTNODE", "SERIAL", "HASH", "SIZE", "TYPE", "MTIME", "MUSER", "UUID", "CHECKSUM", "CLUSTER", "MATCH_PREFIX", "MATCH_EXACT",
"Permissions", "READ", "WRITE"]
-
db = config.get_main_option("sqlalchemy.url", PITHOS_BACKEND_DB_CONNECTION)
config.set_main_option("sqlalchemy.url", db)
+
def run_migrations_offline():
"""Run migrations in 'offline' mode.
with context.begin_transaction():
context.run_migrations()
+
def run_migrations_online():
"""Run migrations in 'online' mode.
"""
engine = engine_from_config(
- config.get_section(config.config_ini_section),
- prefix='sqlalchemy.',
- poolclass=pool.NullPool)
+ config.get_section(config.config_ini_section),
+ prefix='sqlalchemy.',
+ poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
- connection=connection,
- target_metadata=target_metadata
- )
+ connection=connection,
+ target_metadata=target_metadata
+ )
try:
with context.begin_transaction():
run_migrations_offline()
else:
run_migrations_online()
-
import sqlalchemy as sa
+
def upgrade():
op.add_column('nodes', sa.Column('latest_version', sa.INTEGER))
-
- n = table('nodes',
- column('node', sa.Integer),
- column('latest_version', sa.Integer)
- )
- v = table('versions',
- column('node', sa.Integer),
- column('mtime', sa.Integer),
- column('serial', sa.Integer),
- )
-
- s = sa.select([v.c.serial]).where(n.c.node == v.c.node).order_by(v.c.mtime).limit(1)
+
+ n = table('nodes',
+ column('node', sa.Integer),
+ column('latest_version', sa.Integer)
+ )
+ v = table('versions',
+ column('node', sa.Integer),
+ column('mtime', sa.Integer),
+ column('serial', sa.Integer),
+ )
+
+ s = sa.select(
+ [v.c.serial]).where(n.c.node == v.c.node).order_by(v.c.mtime).limit(1)
op.execute(
- n.update().\
- values({'latest_version':s})
- )
+ n.update().
+ values({'latest_version': s})
+ )
+
def downgrade():
op.drop_column('nodes', 'latest_version')
from sqlalchemy.sql import table, column
from sqlalchemy.sql.expression import desc
+
def upgrade():
- n = table('nodes',
- column('node', sa.Integer),
- column('latest_version', sa.Integer)
- )
- v = table('versions',
- column('node', sa.Integer),
- column('mtime', sa.Integer),
- column('serial', sa.Integer),
- )
-
- s = sa.select([v.c.serial]).where(n.c.node == v.c.node).order_by(desc(v.c.mtime)).limit(1)
+ n = table('nodes',
+ column('node', sa.Integer),
+ column('latest_version', sa.Integer)
+ )
+ v = table('versions',
+ column('node', sa.Integer),
+ column('mtime', sa.Integer),
+ column('serial', sa.Integer),
+ )
+
+ s = sa.select([v.c.serial]).where(
+ n.c.node == v.c.node).order_by(desc(v.c.mtime)).limit(1)
op.execute(
- n.update().\
- values({'latest_version':s})
- )
+ n.update().
+ values({'latest_version': s})
+ )
def downgrade():
from alembic import op
import sqlalchemy as sa
+
def upgrade():
op.create_index('idx_nodes_parent', 'nodes', ['parent'])
+
def downgrade():
op.drop_index('idx_nodes_parent', tablename='nodes')
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
class DBWorker(object):
"""Database connection handler."""
-
+
def __init__(self, **params):
self.params = params
self.conn = params['wrapper'].conn
self.engine = params['wrapper'].engine
-
+
def escape_like(self, s):
return s.replace('\\', '\\\\').replace('%', '\%').replace('_', '\_')
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from sqlalchemy import create_engine
#from sqlalchemy.event import listen
from sqlalchemy.engine import Engine
-from sqlalchemy.pool import NullPool
+from sqlalchemy.pool import NullPool, QueuePool
from sqlalchemy.interfaces import PoolListener
class DBWrapper(object):
"""Database connection wrapper."""
-
+
def __init__(self, db):
if db.startswith('sqlite://'):
class ForeignKeysListener(PoolListener):
def connect(self, dbapi_con, con_record):
db_cursor = dbapi_con.execute('pragma foreign_keys=ON;')
- db_cursor = dbapi_con.execute('pragma case_sensitive_like=ON;')
+ db_cursor = dbapi_con.execute(
+ 'pragma case_sensitive_like=ON;')
self.engine = create_engine(db, connect_args={'check_same_thread': False}, poolclass=NullPool, listeners=[ForeignKeysListener()])
#elif db.startswith('mysql://'):
# db = '%s?charset=utf8&use_unicode=0' %db
# self.engine = create_engine(db, convert_unicode=True)
else:
+ #self.engine = create_engine(db, pool_size=0, max_overflow=-1)
self.engine = create_engine(db, poolclass=NullPool)
- #self.engine.echo = True
+ self.engine.echo = True
+ self.engine.echo_pool = False
self.conn = self.engine.connect()
self.trans = None
-
+
def close(self):
self.conn.close()
-
+
def execute(self):
self.trans = self.conn.begin()
-
+
def commit(self):
self.trans.commit()
self.trans = None
-
+
def rollback(self):
self.trans.rollback()
self.trans = None
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from dbworker import DBWorker
+
def create_tables(engine):
metadata = MetaData()
- columns=[]
+ columns = []
columns.append(Column('owner', String(256), primary_key=True))
columns.append(Column('name', String(256), primary_key=True))
columns.append(Column('member', String(256), primary_key=True))
groups = Table('groups', metadata, *columns, mysql_engine='InnoDB')
-
+
# place an index on member
Index('idx_groups_member', groups.c.member)
-
+
metadata.create_all(engine)
return metadata.sorted_tables
-
+
+
class Groups(DBWorker):
"""Groups are named collections of members, belonging to an owner."""
-
+
def __init__(self, **params):
DBWorker.__init__(self, **params)
try:
except NoSuchTableError:
tables = create_tables(self.engine)
map(lambda t: self.__setattr__(t.name, t), tables)
-
+
def group_names(self, owner):
"""List all group names belonging to owner."""
-
+
s = select([self.groups.c.name],
- self.groups.c.owner==owner).distinct()
+ self.groups.c.owner == owner).distinct()
r = self.conn.execute(s)
l = [row[0] for row in r.fetchall()]
r.close()
return l
-
+
def group_dict(self, owner):
"""Return a dict mapping group names to member lists for owner."""
-
+
s = select([self.groups.c.name, self.groups.c.member],
- self.groups.c.owner==owner)
+ self.groups.c.owner == owner)
r = self.conn.execute(s)
d = defaultdict(list)
for group, member in r.fetchall():
d[group].append(member)
r.close()
return d
-
+
def group_add(self, owner, group, member):
"""Add a member to a group."""
-
+
s = self.groups.select()
s = s.where(self.groups.c.owner == owner)
s = s.where(self.groups.c.name == group)
r = self.conn.execute(s)
groups = r.fetchall()
r.close()
- if len(groups) == 0:
+ if len(groups) == 0:
s = self.groups.insert()
r = self.conn.execute(s, owner=owner, name=group, member=member)
r.close()
-
+
def group_addmany(self, owner, group, members):
"""Add members to a group."""
-
+
#TODO: more efficient way to do it
for member in members:
self.group_add(owner, group, member)
-
+
def group_remove(self, owner, group, member):
"""Remove a member from a group."""
-
- s = self.groups.delete().where(and_(self.groups.c.owner==owner,
- self.groups.c.name==group,
- self.groups.c.member==member))
+
+ s = self.groups.delete().where(and_(self.groups.c.owner == owner,
+ self.groups.c.name == group,
+ self.groups.c.member == member))
r = self.conn.execute(s)
r.close()
-
+
def group_delete(self, owner, group):
"""Delete a group."""
-
- s = self.groups.delete().where(and_(self.groups.c.owner==owner,
- self.groups.c.name==group))
+
+ s = self.groups.delete().where(and_(self.groups.c.owner == owner,
+ self.groups.c.name == group))
r = self.conn.execute(s)
r.close()
-
+
def group_destroy(self, owner):
"""Delete all groups belonging to owner."""
-
- s = self.groups.delete().where(self.groups.c.owner==owner)
+
+ s = self.groups.delete().where(self.groups.c.owner == owner)
r = self.conn.execute(s)
r.close()
-
+
def group_members(self, owner, group):
"""Return the list of members of a group."""
-
- s = select([self.groups.c.member], and_(self.groups.c.owner==owner,
- self.groups.c.name==group))
+
+ s = select([self.groups.c.member], and_(self.groups.c.owner == owner,
+ self.groups.c.name == group))
r = self.conn.execute(s)
l = [row[0] for row in r.fetchall()]
r.close()
return l
-
+
def group_check(self, owner, group, member):
"""Check if a member is in a group."""
-
- s = select([self.groups.c.member], and_(self.groups.c.owner==owner,
- self.groups.c.name==group,
- self.groups.c.member==member))
+
+ s = select([self.groups.c.member], and_(self.groups.c.owner == owner,
+ self.groups.c.name == group,
+ self.groups.c.member == member))
r = self.conn.execute(s)
l = r.fetchone()
r.close()
return bool(l)
-
+
def group_parents(self, member):
"""Return all (owner, group) tuples that contain member."""
-
+
s = select([self.groups.c.owner, self.groups.c.name],
- self.groups.c.member==member)
+ self.groups.c.member == member)
r = self.conn.execute(s)
l = r.fetchall()
r.close()
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from pithos.backends.filter import parse_filters
-ROOTNODE = 0
+ROOTNODE = 0
-( SERIAL, NODE, HASH, SIZE, TYPE, SOURCE, MTIME, MUSER, UUID, CHECKSUM, CLUSTER ) = range(11)
+(SERIAL, NODE, HASH, SIZE, TYPE, SOURCE, MTIME, MUSER, UUID, CHECKSUM,
+ CLUSTER) = range(11)
-( MATCH_PREFIX, MATCH_EXACT ) = range(2)
+(MATCH_PREFIX, MATCH_EXACT) = range(2)
inf = float('inf')
c = ord(prefix[-1])
if c >= 0xffff:
raise RuntimeError
- s += unichr(c+1)
+ s += unichr(c + 1)
return s
+
def strprevling(prefix):
"""Return an approximation of the last unicode string
less than but not starting with given prefix.
s = prefix[:-1]
c = ord(prefix[-1])
if c > 0:
- s += unichr(c-1) + unichr(0xffff)
+ s += unichr(c - 1) + unichr(0xffff)
return s
_propnames = {
- 'serial' : 0,
- 'node' : 1,
- 'hash' : 2,
- 'size' : 3,
- 'type' : 4,
- 'source' : 5,
- 'mtime' : 6,
- 'muser' : 7,
- 'uuid' : 8,
- 'checksum' : 9,
- 'cluster' : 10
+ 'serial': 0,
+ 'node': 1,
+ 'hash': 2,
+ 'size': 3,
+ 'type': 4,
+ 'source': 5,
+ 'mtime': 6,
+ 'muser': 7,
+ 'uuid': 8,
+ 'checksum': 9,
+ 'cluster': 10
}
+
def create_tables(engine):
metadata = MetaData()
-
+
#create nodes table
- columns=[]
+ columns = []
columns.append(Column('node', Integer, primary_key=True))
columns.append(Column('parent', Integer,
ForeignKey('nodes.node',
nodes = Table('nodes', metadata, *columns, mysql_engine='InnoDB')
Index('idx_nodes_path', nodes.c.path, unique=True)
Index('idx_nodes_parent', nodes.c.parent)
-
+
#create policy table
- columns=[]
+ columns = []
columns.append(Column('node', Integer,
ForeignKey('nodes.node',
ondelete='CASCADE',
columns.append(Column('key', String(128), primary_key=True))
columns.append(Column('value', String(256)))
policy = Table('policy', metadata, *columns, mysql_engine='InnoDB')
-
+
#create statistics table
- columns=[]
+ columns = []
columns.append(Column('node', Integer,
ForeignKey('nodes.node',
ondelete='CASCADE',
columns.append(Column('cluster', Integer, nullable=False, default=0,
primary_key=True, autoincrement=False))
statistics = Table('statistics', metadata, *columns, mysql_engine='InnoDB')
-
+
#create versions table
- columns=[]
+ columns = []
columns.append(Column('serial', Integer, primary_key=True))
columns.append(Column('node', Integer,
ForeignKey('nodes.node',
versions = Table('versions', metadata, *columns, mysql_engine='InnoDB')
Index('idx_versions_node_mtime', versions.c.node, versions.c.mtime)
Index('idx_versions_node_uuid', versions.c.uuid)
-
+
#create attributes table
columns = []
columns.append(Column('serial', Integer,
columns.append(Column('key', String(128), primary_key=True))
columns.append(Column('value', String(256)))
attributes = Table('attributes', metadata, *columns, mysql_engine='InnoDB')
-
+
metadata.create_all(engine)
return metadata.sorted_tables
+
class Node(DBWorker):
"""Nodes store path organization and have multiple versions.
Versions store object history and have multiple attributes.
Attributes store metadata.
"""
-
+
# TODO: Provide an interface for included and excluded clusters.
-
+
def __init__(self, **params):
DBWorker.__init__(self, **params)
try:
except NoSuchTableError:
tables = create_tables(self.engine)
map(lambda t: self.__setattr__(t.name, t), tables)
-
+
s = self.nodes.select().where(and_(self.nodes.c.node == ROOTNODE,
self.nodes.c.parent == ROOTNODE))
rp = self.conn.execute(s)
r = rp.fetchone()
rp.close()
if not r:
- s = self.nodes.insert().values(node=ROOTNODE, parent=ROOTNODE, path='')
+ s = self.nodes.insert(
+ ).values(node=ROOTNODE, parent=ROOTNODE, path='')
self.conn.execute(s)
-
+
def node_create(self, parent, path):
"""Create a new node from the given properties.
Return the node identifier of the new node.
inserted_primary_key = r.inserted_primary_key[0]
r.close()
return inserted_primary_key
-
+
def node_lookup(self, path):
"""Lookup the current node of the given path.
Return None if the path is not found.
"""
-
+
# Use LIKE for comparison to avoid MySQL problems with trailing spaces.
- s = select([self.nodes.c.node], self.nodes.c.path.like(self.escape_like(path), escape='\\'))
+ s = select([self.nodes.c.node], self.nodes.c.path.like(
+ self.escape_like(path), escape='\\'))
r = self.conn.execute(s)
row = r.fetchone()
r.close()
if row:
return row[0]
return None
-
+
def node_lookup_bulk(self, paths):
"""Lookup the current nodes for the given paths.
Return () if the path is not found.
"""
-
+
# Use LIKE for comparison to avoid MySQL problems with trailing spaces.
s = select([self.nodes.c.node], self.nodes.c.path.in_(paths))
r = self.conn.execute(s)
rows = r.fetchall()
r.close()
return [row[0] for row in rows]
-
+
def node_get_properties(self, node):
"""Return the node's (parent, path).
Return None if the node is not found.
"""
-
+
s = select([self.nodes.c.parent, self.nodes.c.path])
s = s.where(self.nodes.c.node == node)
r = self.conn.execute(s)
l = r.fetchone()
r.close()
return l
-
+
def node_get_versions(self, node, keys=(), propnames=_propnames):
"""Return the properties of all versions at node.
If keys is empty, return all properties in the order
(serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
"""
-
+
s = select([self.versions.c.serial,
self.versions.c.node,
self.versions.c.hash,
r.close()
if not rows:
return rows
-
+
if not keys:
return rows
-
+
return [[p[propnames[k]] for k in keys if k in propnames] for p in rows]
-
+
def node_count_children(self, node):
"""Return node's child count."""
-
+
s = select([func.count(self.nodes.c.node)])
s = s.where(and_(self.nodes.c.parent == node,
self.nodes.c.node != ROOTNODE))
row = r.fetchone()
r.close()
return row[0]
-
+
def node_purge_children(self, parent, before=inf, cluster=0):
"""Delete all versions with the specified
parent and cluster, and return
"""
#update statistics
c1 = select([self.nodes.c.node],
- self.nodes.c.parent == parent)
+ self.nodes.c.parent == parent)
where_clause = and_(self.versions.c.node.in_(c1),
self.versions.c.cluster == cluster)
s = select([func.count(self.versions.c.serial),
mtime = time()
self.statistics_update(parent, -nr, size, mtime, cluster)
self.statistics_update_ancestors(parent, -nr, size, mtime, cluster)
-
+
s = select([self.versions.c.hash])
s = s.where(where_clause)
r = self.conn.execute(s)
hashes = [row[0] for row in r.fetchall()]
r.close()
-
+
#delete versions
s = self.versions.delete().where(where_clause)
r = self.conn.execute(s)
r.close()
-
+
#delete nodes
s = select([self.nodes.c.node],
- and_(self.nodes.c.parent == parent,
- select([func.count(self.versions.c.serial)],
- self.versions.c.node == self.nodes.c.node).as_scalar() == 0))
+ and_(self.nodes.c.parent == parent,
+ select([func.count(self.versions.c.serial)],
+ self.versions.c.node == self.nodes.c.node).as_scalar() == 0))
rp = self.conn.execute(s)
nodes = [r[0] for r in rp.fetchall()]
rp.close()
s = self.nodes.delete().where(self.nodes.c.node.in_(nodes))
self.conn.execute(s).close()
-
+
return hashes, size
-
+
def node_purge(self, node, before=inf, cluster=0):
"""Delete all versions with the specified
node and cluster, and return
the hashes and size of versions deleted.
Clears out the node if it has no remaining versions.
"""
-
+
#update statistics
s = select([func.count(self.versions.c.serial),
func.sum(self.versions.c.size)])
where_clause = and_(self.versions.c.node == node,
- self.versions.c.cluster == cluster)
+ self.versions.c.cluster == cluster)
s = s.where(where_clause)
if before != inf:
s = s.where(self.versions.c.mtime <= before)
return (), 0
mtime = time()
self.statistics_update_ancestors(node, -nr, -size, mtime, cluster)
-
+
s = select([self.versions.c.hash])
s = s.where(where_clause)
r = self.conn.execute(s)
hashes = [r[0] for r in r.fetchall()]
r.close()
-
+
#delete versions
s = self.versions.delete().where(where_clause)
r = self.conn.execute(s)
r.close()
-
+
#delete nodes
s = select([self.nodes.c.node],
- and_(self.nodes.c.node == node,
- select([func.count(self.versions.c.serial)],
- self.versions.c.node == self.nodes.c.node).as_scalar() == 0))
+ and_(self.nodes.c.node == node,
+ select([func.count(self.versions.c.serial)],
+ self.versions.c.node == self.nodes.c.node).as_scalar() == 0))
r = self.conn.execute(s)
nodes = r.fetchall()
r.close()
s = self.nodes.delete().where(self.nodes.c.node.in_(nodes))
self.conn.execute(s).close()
-
+
return hashes, size
-
+
def node_remove(self, node):
"""Remove the node specified.
Return false if the node has children or is not found.
"""
-
+
if self.node_count_children(node):
return False
-
+
mtime = time()
s = select([func.count(self.versions.c.serial),
func.sum(self.versions.c.size),
s = s.group_by(self.versions.c.cluster)
r = self.conn.execute(s)
for population, size, cluster in r.fetchall():
- self.statistics_update_ancestors(node, -population, -size, mtime, cluster)
+ self.statistics_update_ancestors(
+ node, -population, -size, mtime, cluster)
r.close()
-
+
s = self.nodes.delete().where(self.nodes.c.node == node)
self.conn.execute(s).close()
return True
-
+
def policy_get(self, node):
s = select([self.policy.c.key, self.policy.c.value],
- self.policy.c.node==node)
+ self.policy.c.node == node)
r = self.conn.execute(s)
d = dict(r.fetchall())
r.close()
return d
-
+
def policy_set(self, node, policy):
#insert or replace
for k, v in policy.iteritems():
s = self.policy.update().where(and_(self.policy.c.node == node,
- self.policy.c.key == k))
- s = s.values(value = v)
+ self.policy.c.key == k))
+ s = s.values(value=v)
rp = self.conn.execute(s)
rp.close()
if rp.rowcount == 0:
s = self.policy.insert()
- values = {'node':node, 'key':k, 'value':v}
+ values = {'node': node, 'key': k, 'value': v}
r = self.conn.execute(s, values)
r.close()
-
+
def statistics_get(self, node, cluster=0):
"""Return population, total size and last mtime
for all versions under node that belong to the cluster.
"""
-
+
s = select([self.statistics.c.population,
self.statistics.c.size,
self.statistics.c.mtime])
row = r.fetchone()
r.close()
return row
-
+
def statistics_update(self, node, population, size, mtime, cluster=0):
"""Update the statistics of the given node.
Statistics keep track the population, total
May be zero or positive or negative numbers.
"""
s = select([self.statistics.c.population, self.statistics.c.size],
- and_(self.statistics.c.node == node,
- self.statistics.c.cluster == cluster))
+ and_(self.statistics.c.node == node,
+ self.statistics.c.cluster == cluster))
rp = self.conn.execute(s)
r = rp.fetchone()
rp.close()
prepopulation, presize = r
population += prepopulation
size += presize
-
+
#insert or replace
#TODO better upsert
- u = self.statistics.update().where(and_(self.statistics.c.node==node,
- self.statistics.c.cluster==cluster))
+ u = self.statistics.update().where(and_(self.statistics.c.node == node,
+ self.statistics.c.cluster == cluster))
u = u.values(population=population, size=size, mtime=mtime)
rp = self.conn.execute(u)
rp.close()
ins = ins.values(node=node, population=population, size=size,
mtime=mtime, cluster=cluster)
self.conn.execute(ins).close()
-
+
def statistics_update_ancestors(self, node, population, size, mtime, cluster=0):
"""Update the statistics of the given node's parent.
Then recursively update all parents up to the root.
Population is not recursive.
"""
-
+
while True:
if node == ROOTNODE:
break
parent, path = props
self.statistics_update(parent, population, size, mtime, cluster)
node = parent
- population = 0 # Population isn't recursive
-
+ population = 0 # Population isn't recursive
+
def statistics_latest(self, node, before=inf, except_cluster=0):
"""Return population, total size and last mtime
for all latest versions under node that
do not belong to the cluster.
"""
-
+
# The node.
props = self.node_get_properties(node)
if props is None:
return None
parent, path = props
-
+
# The latest version.
s = select([self.versions.c.serial,
self.versions.c.node,
self.versions.c.cluster])
if before != inf:
filtered = select([func.max(self.versions.c.serial)],
- self.versions.c.node == node)
+ self.versions.c.node == node)
filtered = filtered.where(self.versions.c.mtime < before)
else:
filtered = select([self.nodes.c.latest_version],
- self.versions.c.node == node)
+ self.versions.c.node == node)
s = s.where(and_(self.versions.c.cluster != except_cluster,
self.versions.c.serial == filtered))
r = self.conn.execute(s)
if not props:
return None
mtime = props[MTIME]
-
+
# First level, just under node (get population).
v = self.versions.alias('v')
s = select([func.count(v.c.serial),
mtime = max(mtime, r[2])
if count == 0:
return (0, 0, mtime)
-
+
# All children (get size and mtime).
# This is why the full path is stored.
s = select([func.count(v.c.serial),
func.max(v.c.mtime)])
if before != inf:
c1 = select([func.max(self.versions.c.serial)],
- self.versions.c.node == v.c.node)
+ self.versions.c.node == v.c.node)
c1 = c1.where(self.versions.c.mtime < before)
else:
c1 = select([self.nodes.c.serial],
- self.nodes.c.node == v.c.node)
- c2 = select([self.nodes.c.node], self.nodes.c.path.like(self.escape_like(path) + '%', escape='\\'))
+ self.nodes.c.node == v.c.node)
+ c2 = select([self.nodes.c.node], self.nodes.c.path.like(
+ self.escape_like(path) + '%', escape='\\'))
s = s.where(and_(v.c.serial == c1,
v.c.cluster != except_cluster,
v.c.node.in_(c2)))
size = r[1] - props[SIZE]
mtime = max(mtime, r[2])
return (count, size, mtime)
-
+
def nodes_set_latest_version(self, node, serial):
s = self.nodes.update().where(self.nodes.c.node == node)
- s = s.values(latest_version = serial)
+ s = s.values(latest_version=serial)
self.conn.execute(s).close()
-
+
def version_create(self, node, hash, size, type, source, muser, uuid, checksum, cluster=0):
"""Create a new version from the given properties.
Return the (serial, mtime) of the new version.
"""
-
+
mtime = time()
- s = self.versions.insert().values(node=node, hash=hash, size=size, type=type, source=source,
- mtime=mtime, muser=muser, uuid=uuid, checksum=checksum, cluster=cluster)
+ s = self.versions.insert(
+ ).values(node=node, hash=hash, size=size, type=type, source=source,
+ mtime=mtime, muser=muser, uuid=uuid, checksum=checksum, cluster=cluster)
serial = self.conn.execute(s).inserted_primary_key[0]
self.statistics_update_ancestors(node, 1, size, mtime, cluster)
-
+
self.nodes_set_latest_version(node, serial)
-
+
return serial, mtime
-
+
def version_lookup(self, node, before=inf, cluster=0, all_props=True):
"""Lookup the current version of the given node.
Return a list with its properties:
- (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster)
+ (serial, node, hash, size, type, source, mtime,
+ muser, uuid, checksum, cluster)
or None if the current version is not found in the given cluster.
"""
-
+
v = self.versions.alias('v')
if not all_props:
s = select([v.c.serial])
v.c.checksum, v.c.cluster])
if before != inf:
c = select([func.max(self.versions.c.serial)],
- self.versions.c.node == node)
+ self.versions.c.node == node)
c = c.where(self.versions.c.mtime < before)
else:
c = select([self.nodes.c.latest_version],
- self.nodes.c.node == node)
+ self.nodes.c.node == node)
s = s.where(and_(v.c.serial == c,
v.c.cluster == cluster))
r = self.conn.execute(s)
if props:
return props
return None
-
+
def version_lookup_bulk(self, nodes, before=inf, cluster=0, all_props=True):
"""Lookup the current versions of the given nodes.
Return a list with their properties:
v.c.checksum, v.c.cluster])
if before != inf:
c = select([func.max(self.versions.c.serial)],
- self.versions.c.node.in_(nodes))
+ self.versions.c.node.in_(nodes))
c = c.where(self.versions.c.mtime < before)
c = c.group_by(self.versions.c.node)
else:
c = select([self.nodes.c.latest_version],
- self.nodes.c.node.in_(nodes))
+ self.nodes.c.node.in_(nodes))
s = s.where(and_(v.c.serial.in_(c),
v.c.cluster == cluster))
s = s.order_by(v.c.node)
rproxy = r.fetchall()
r.close()
return (tuple(row.values()) for row in rproxy)
-
+
def version_get_properties(self, serial, keys=(), propnames=_propnames):
"""Return a sequence of values for the properties of
the version specified by serial and the keys, in the order given.
If keys is empty, return all properties in the order
(serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
"""
-
+
v = self.versions.alias()
s = select([v.c.serial, v.c.node, v.c.hash,
v.c.size, v.c.type, v.c.source,
rp.close()
if r is None:
return r
-
+
if not keys:
return r
return [r[propnames[k]] for k in keys if k in propnames]
-
+
def version_put_property(self, serial, key, value):
"""Set value for the property of version specified by key."""
-
+
if key not in _propnames:
return
s = self.versions.update()
s = s.where(self.versions.c.serial == serial)
s = s.values(**{key: value})
self.conn.execute(s).close()
-
+
def version_recluster(self, serial, cluster):
"""Move the version into another cluster."""
-
+
props = self.version_get_properties(serial)
if not props:
return
oldcluster = props[CLUSTER]
if cluster == oldcluster:
return
-
+
mtime = time()
self.statistics_update_ancestors(node, -1, -size, mtime, oldcluster)
self.statistics_update_ancestors(node, 1, size, mtime, cluster)
-
+
s = self.versions.update()
s = s.where(self.versions.c.serial == serial)
- s = s.values(cluster = cluster)
+ s = s.values(cluster=cluster)
self.conn.execute(s).close()
-
+
def version_remove(self, serial):
"""Remove the serial specified."""
-
+
props = self.version_get_properties(serial)
if not props:
return
hash = props[HASH]
size = props[SIZE]
cluster = props[CLUSTER]
-
+
mtime = time()
self.statistics_update_ancestors(node, -1, -size, mtime, cluster)
-
+
s = self.versions.delete().where(self.versions.c.serial == serial)
self.conn.execute(s).close()
-
+
props = self.version_lookup(node, cluster=cluster, all_props=False)
if props:
self.nodes_set_latest_version(v.node, serial)
-
+
return hash, size
-
+
def attribute_get(self, serial, domain, keys=()):
"""Return a list of (key, value) pairs of the version specified by serial.
If keys is empty, return all attributes.
Othwerise, return only those specified.
"""
-
+
if keys:
attrs = self.attributes.alias()
s = select([attrs.c.key, attrs.c.value])
l = r.fetchall()
r.close()
return l
-
+
def attribute_set(self, serial, domain, items):
"""Set the attributes of the version specified by serial.
Receive attributes as an iterable of (key, value) pairs.
s = s.where(and_(self.attributes.c.serial == serial,
self.attributes.c.domain == domain,
self.attributes.c.key == k))
- s = s.values(value = v)
+ s = s.values(value=v)
rp = self.conn.execute(s)
rp.close()
if rp.rowcount == 0:
s = self.attributes.insert()
s = s.values(serial=serial, domain=domain, key=k, value=v)
self.conn.execute(s).close()
-
+
def attribute_del(self, serial, domain, keys=()):
"""Delete attributes of the version specified by serial.
If keys is empty, delete all attributes.
Otherwise delete those specified.
"""
-
+
if keys:
#TODO more efficient way to do this?
for key in keys:
s = s.where(and_(self.attributes.c.serial == serial,
self.attributes.c.domain == domain))
self.conn.execute(s).close()
-
+
def attribute_copy(self, source, dest):
- s = select([dest, self.attributes.c.domain, self.attributes.c.key, self.attributes.c.value],
+ s = select(
+ [dest, self.attributes.c.domain,
+ self.attributes.c.key, self.attributes.c.value],
self.attributes.c.serial == source)
rp = self.conn.execute(s)
attributes = rp.fetchall()
rp.close()
if rp.rowcount == 0:
s = self.attributes.insert()
- values = {'serial':dest, 'domain':domain, 'key':k, 'value':v}
+ values = {'serial': dest, 'domain': domain,
+ 'key': k, 'value': v}
self.conn.execute(s, values).close()
-
+
def latest_attribute_keys(self, parent, domain, before=inf, except_cluster=0, pathq=[]):
"""Return a list with all keys pairs defined
for all latest versions under parent that
do not belong to the cluster.
"""
-
+
# TODO: Use another table to store before=inf results.
a = self.attributes.alias('a')
v = self.versions.alias('v')
s = s.where(v.c.serial == filtered)
s = s.where(v.c.cluster != except_cluster)
s = s.where(v.c.node.in_(select([self.nodes.c.node],
- self.nodes.c.parent == parent)))
+ self.nodes.c.parent == parent)))
s = s.where(a.c.serial == v.c.serial)
s = s.where(a.c.domain == domain)
s = s.where(n.c.node == v.c.node)
conj = []
for path, match in pathq:
if match == MATCH_PREFIX:
- conj.append(n.c.path.like(self.escape_like(path) + '%', escape='\\'))
+ conj.append(
+ n.c.path.like(self.escape_like(path) + '%', escape='\\'))
elif match == MATCH_EXACT:
conj.append(n.c.path == path)
if conj:
rows = rp.fetchall()
rp.close()
return [r[0] for r in rows]
-
+
def latest_version_list(self, parent, prefix='', delimiter=None,
start='', limit=10000, before=inf,
except_cluster=0, pathq=[], domain=None,
"""Return a (list of (path, serial) tuples, list of common prefixes)
for the current versions of the paths with the given parent,
matching the following criteria.
-
+
The property tuple for a version is returned if all
of these conditions are true:
-
+
a. parent matches
-
+
b. path > start
-
+
c. path starts with prefix (and paths in pathq)
-
+
d. version is the max up to before
-
+
e. version is not in cluster
-
+
f. the path does not have the delimiter occuring
after the prefix, or ends with the delimiter
-
+
g. serial matches the attribute filter query.
-
+
A filter query is a comma-separated list of
terms in one of these three forms:
-
+
key
an attribute with this key must exist
-
+
!key
an attribute with this key must not exist
-
+
key ?op value
the attribute with this key satisfies the value
where ?op is one of ==, != <=, >=, <, >.
-
+
h. the size is in the range set by sizeq
-
+
The list of common prefixes includes the prefixes
matching up to the first delimiter after prefix,
and are reported only once, as "virtual directories".
The delimiter is included in the prefixes.
-
+
If arguments are None, then the corresponding matching rule
will always match.
-
+
Limit applies to the first list of tuples returned.
-
+
If all_props is True, return all properties after path, not just serial.
"""
-
+
if not start or start < prefix:
start = strprevling(prefix)
nextling = strnextling(prefix)
-
+
v = self.versions.alias('v')
n = self.nodes.alias('n')
if not all_props:
filtered = filtered.where(self.versions.c.mtime < before)
else:
filtered = select([self.nodes.c.latest_version])
- s = s.where(v.c.serial == filtered.where(self.nodes.c.node == v.c.node))
+ s = s.where(
+ v.c.serial == filtered.where(self.nodes.c.node == v.c.node))
s = s.where(v.c.cluster != except_cluster)
s = s.where(v.c.node.in_(select([self.nodes.c.node],
- self.nodes.c.parent == parent)))
-
+ self.nodes.c.parent == parent)))
+
s = s.where(n.c.node == v.c.node)
s = s.where(and_(n.c.path > bindparam('start'), n.c.path < nextling))
conj = []
for path, match in pathq:
if match == MATCH_PREFIX:
- conj.append(n.c.path.like(self.escape_like(path) + '%', escape='\\'))
+ conj.append(
+ n.c.path.like(self.escape_like(path) + '%', escape='\\'))
elif match == MATCH_EXACT:
conj.append(n.c.path == path)
if conj:
s = s.where(or_(*conj))
-
+
if sizeq and len(sizeq) == 2:
if sizeq[0]:
s = s.where(v.c.size >= sizeq[0])
if sizeq[1]:
s = s.where(v.c.size < sizeq[1])
-
+
if domain and filterq:
a = self.attributes.alias('a')
included, excluded, opers = parse_filters(filterq)
subs = select([1])
subs = subs.where(a.c.serial == v.c.serial).correlate(v)
subs = subs.where(a.c.domain == domain)
- subs = subs.where(and_(a.c.key.op('=')(k), a.c.value.op(o)(val)))
+ subs = subs.where(
+ and_(a.c.key.op('=')(k), a.c.value.op(o)(val)))
s = s.where(exists(subs))
-
+
s = s.order_by(n.c.path)
-
+
if not delimiter:
s = s.limit(limit)
rp = self.conn.execute(s, start=start)
r = rp.fetchall()
rp.close()
return r, ()
-
+
pfz = len(prefix)
dz = len(delimiter)
count = 0
pappend = prefixes.append
matches = []
mappend = matches.append
-
+
rp = self.conn.execute(s, start=start)
while True:
props = rp.fetchone()
path = props[0]
serial = props[1]
idx = path.find(delimiter, pfz)
-
+
if idx < 0:
mappend(props)
count += 1
if count >= limit:
break
continue
-
+
if idx + dz == len(path):
mappend(props)
count += 1
- continue # Get one more, in case there is a path.
+ continue # Get one more, in case there is a path.
pf = path[:idx + dz]
pappend(pf)
- if count >= limit:
+ if count >= limit:
break
-
- rp = self.conn.execute(s, start=strnextling(pf)) # New start.
+
+ rp = self.conn.execute(s, start=strnextling(pf)) # New start.
rp.close()
-
+
return matches, prefixes
-
+
def latest_uuid(self, uuid):
"""Return a (path, serial) tuple, for the latest version of the given uuid."""
-
+
v = self.versions.alias('v')
n = self.nodes.alias('n')
s = select([n.c.path, v.c.serial])
filtered = select([func.max(self.versions.c.serial)])
s = s.where(v.c.serial == filtered.where(self.versions.c.uuid == uuid))
s = s.where(n.c.node == v.c.node)
-
+
r = self.conn.execute(s)
l = r.fetchone()
r.close()
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
class Permissions(XFeatures, Groups, Public):
-
+
def __init__(self, **params):
XFeatures.__init__(self, **params)
Groups.__init__(self, **params)
Public.__init__(self, **params)
-
+
def access_grant(self, path, access, members=()):
"""Grant members with access to path.
Members can also be '*' (all),
or some group specified as 'owner:group'."""
-
+
if not members:
return
feature = self.xfeature_create(path)
self.feature_setmany(feature, access, members)
-
+
def access_set(self, path, permissions):
"""Set permissions for path. The permissions dict
maps 'read', 'write' keys to member lists."""
-
+
r = permissions.get('read', [])
w = permissions.get('write', [])
if not r and not w:
self.feature_setmany(feature, READ, r)
if w:
self.feature_setmany(feature, WRITE, w)
-
+
def access_get(self, path):
"""Get permissions for path."""
-
+
feature = self.xfeature_get(path)
if not feature:
return {}
permissions['write'] = permissions[WRITE]
del(permissions[WRITE])
return permissions
-
+
def access_members(self, path):
feature = self.xfeature_get(path)
if not feature:
members.remove(m)
members.update(self.group_members(user, group))
return list(members)
-
+
def access_clear(self, path):
"""Revoke access to path (both permissions and public)."""
-
+
self.xfeature_destroy(path)
self.public_unset(path)
-
+
def access_clear_bulk(self, paths):
"""Revoke access to path (both permissions and public)."""
-
+
self.xfeature_destroy_bulk(paths)
self.public_unset_bulk(paths)
-
+
def access_check(self, path, access, member):
"""Return true if the member has this access to the path."""
-
+
feature = self.xfeature_get(path)
if not feature:
return False
if owner + ':' + group in members:
return True
return False
-
+
def access_inherit(self, path):
"""Return the paths influencing the access for path."""
-
+
# r = self.xfeature_inherit(path)
# if not r:
# return []
# # Compute valid.
# return [x[0] for x in r if x[0] in valid]
-
+
# Only keep path components.
parts = path.rstrip('/').split('/')
valid = []
if subp != path:
valid.append(subp + '/')
return [x for x in valid if self.xfeature_get(x)]
-
+
def access_list_paths(self, member, prefix=None):
"""Return the list of paths granted to member."""
-
- xfeatures_xfeaturevals = self.xfeatures.join(self.xfeaturevals)
-
+
+ xfeatures_xfeaturevals = self.xfeatures.join(self.xfeaturevals)
+
selectable = (self.groups.c.owner + ':' + self.groups.c.name)
member_groups = select([selectable.label('value')],
- self.groups.c.member == member)
-
+ self.groups.c.member == member)
+
members = select([literal(member).label('value')])
any = select([literal('*').label('value')])
-
+
u = union(member_groups, members, any).alias()
inner_join = join(xfeatures_xfeaturevals, u,
- self.xfeaturevals.c.value == u.c.value)
+ self.xfeaturevals.c.value == u.c.value)
s = select([self.xfeatures.c.path], from_obj=[inner_join]).distinct()
if prefix:
- s = s.where(self.xfeatures.c.path.like(self.escape_like(prefix) + '%', escape='\\'))
+ s = s.where(self.xfeatures.c.path.like(
+ self.escape_like(prefix) + '%', escape='\\'))
r = self.conn.execute(s)
l = [row[0] for row in r.fetchall()]
r.close()
return l
-
+
def access_list_shared(self, prefix=''):
"""Return the list of shared paths."""
-
+
s = select([self.xfeatures.c.path],
- self.xfeatures.c.path.like(self.escape_like(prefix) + '%', escape='\\')).order_by(self.xfeatures.c.path.asc())
+ self.xfeatures.c.path.like(self.escape_like(prefix) + '%', escape='\\')).order_by(self.xfeatures.c.path.asc())
r = self.conn.execute(s)
l = [row[0] for row in r.fetchall()]
r.close()
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from sqlalchemy.schema import Index
from sqlalchemy.exc import NoSuchTableError
+
def create_tables(engine):
metadata = MetaData()
- columns=[]
+ columns = []
columns.append(Column('public_id', Integer, primary_key=True))
columns.append(Column('path', String(2048), nullable=False))
columns.append(Column('active', Boolean, nullable=False, default=True))
- public = Table('public', metadata, *columns, mysql_engine='InnoDB', sqlite_autoincrement=True)
+ public = Table('public', metadata, *columns, mysql_engine='InnoDB',
+ sqlite_autoincrement=True)
# place an index on path
Index('idx_public_path', public.c.path, unique=True)
metadata.create_all(engine)
return metadata.sorted_tables
+
class Public(DBWorker):
"""Paths can be marked as public."""
-
+
def __init__(self, **params):
DBWorker.__init__(self, **params)
try:
except NoSuchTableError:
tables = create_tables(self.engine)
map(lambda t: self.__setattr__(t.name, t), tables)
-
+
def public_set(self, path):
s = select([self.public.c.public_id])
s = s.where(self.public.c.path == path)
s = s.values(path=path, active=True)
r = self.conn.execute(s)
r.close()
-
+
def public_unset(self, path):
s = self.public.update()
s = s.where(self.public.c.path == path)
s = s.values(active=False)
r = self.conn.execute(s)
r.close()
-
+
def public_unset_bulk(self, paths):
s = self.public.update()
s = s.where(self.public.c.path.in_(paths))
s = s.values(active=False)
r = self.conn.execute(s)
r.close()
-
+
def public_get(self, path):
s = select([self.public.c.public_id])
s = s.where(and_(self.public.c.path == path,
if row:
return row[0]
return None
-
+
def public_list(self, prefix):
s = select([self.public.c.path, self.public.c.public_id])
- s = s.where(self.public.c.path.like(self.escape_like(prefix) + '%', escape='\\'))
+ s = s.where(self.public.c.path.like(
+ self.escape_like(prefix) + '%', escape='\\'))
s = s.where(self.public.c.active == True)
r = self.conn.execute(s)
rows = r.fetchall()
r.close()
return rows
-
+
def public_path(self, public):
s = select([self.public.c.path])
s = s.where(and_(self.public.c.public_id == public,
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from dbworker import DBWorker
+
def create_tables(engine):
metadata = MetaData()
- columns=[]
+ columns = []
columns.append(Column('feature_id', Integer, primary_key=True))
columns.append(Column('path', String(2048)))
xfeatures = Table('xfeatures', metadata, *columns, mysql_engine='InnoDB')
# place an index on path
Index('idx_features_path', xfeatures.c.path, unique=True)
-
- columns=[]
+
+ columns = []
columns.append(Column('feature_id', Integer,
ForeignKey('xfeatures.feature_id',
ondelete='CASCADE'),
columns.append(Column('key', Integer, primary_key=True,
autoincrement=False))
columns.append(Column('value', String(256), primary_key=True))
- xfeaturevals = Table('xfeaturevals', metadata, *columns, mysql_engine='InnoDB')
-
+ xfeaturevals = Table(
+ 'xfeaturevals', metadata, *columns, mysql_engine='InnoDB')
+
metadata.create_all(engine)
return metadata.sorted_tables
+
class XFeatures(DBWorker):
"""XFeatures are path properties that allow non-nested
inheritance patterns. Currently used for storing permissions.
"""
-
+
def __init__(self, **params):
DBWorker.__init__(self, **params)
try:
except NoSuchTableError:
tables = create_tables(self.engine)
map(lambda t: self.__setattr__(t.name, t), tables)
-
+
# def xfeature_inherit(self, path):
# """Return the (path, feature) inherited by the path, or None."""
-#
+#
# s = select([self.xfeatures.c.path, self.xfeatures.c.feature_id])
# s = s.where(self.xfeatures.c.path <= path)
# #s = s.where(self.xfeatures.c.path.like(self.escape_like(path) + '%', escape='\\')) # XXX: Implement reverse and escape like...
# l = r.fetchall()
# r.close()
# return l
-
+
def xfeature_get(self, path):
"""Return feature for path."""
-
+
s = select([self.xfeatures.c.feature_id])
s = s.where(self.xfeatures.c.path == path)
s = s.order_by(self.xfeatures.c.path)
if row:
return row[0]
return None
-
+
def xfeature_create(self, path):
"""Create and return a feature for path.
If the path has a feature, return it.
"""
-
+
feature = self.xfeature_get(path)
if feature is not None:
return feature
inserted_primary_key = r.inserted_primary_key[0]
r.close()
return inserted_primary_key
-
+
def xfeature_destroy(self, path):
"""Destroy a feature and all its key, value pairs."""
-
+
s = self.xfeatures.delete().where(self.xfeatures.c.path == path)
r = self.conn.execute(s)
r.close()
-
+
def xfeature_destroy_bulk(self, paths):
"""Destroy features and all their key, value pairs."""
-
+
s = self.xfeatures.delete().where(self.xfeatures.c.path.in_(paths))
r = self.conn.execute(s)
r.close()
-
+
def feature_dict(self, feature):
"""Return a dict mapping keys to list of values for feature."""
-
+
s = select([self.xfeaturevals.c.key, self.xfeaturevals.c.value])
s = s.where(self.xfeaturevals.c.feature_id == feature)
r = self.conn.execute(s)
d[key].append(value)
r.close()
return d
-
+
def feature_set(self, feature, key, value):
"""Associate a key, value pair with a feature."""
-
+
s = self.xfeaturevals.select()
s = s.where(self.xfeaturevals.c.feature_id == feature)
s = s.where(self.xfeaturevals.c.key == key)
s = self.xfeaturevals.insert()
r = self.conn.execute(s, feature_id=feature, key=key, value=value)
r.close()
-
+
def feature_setmany(self, feature, key, values):
"""Associate the given key, and values with a feature."""
-
+
#TODO: more efficient way to do it
for v in values:
self.feature_set(feature, key, v)
-
+
def feature_unset(self, feature, key, value):
"""Disassociate a key, value pair from a feature."""
-
+
s = self.xfeaturevals.delete()
s = s.where(and_(self.xfeaturevals.c.feature_id == feature,
- self.xfeaturevals.c.key == key,
- self.xfeaturevals.c.value == value))
+ self.xfeaturevals.c.key == key,
+ self.xfeaturevals.c.value == value))
r = self.conn.execute(s)
r.close()
-
+
def feature_unsetmany(self, feature, key, values):
"""Disassociate the key for the values given, from a feature."""
-
+
for v in values:
conditional = and_(self.xfeaturevals.c.feature_id == feature,
self.xfeaturevals.c.key == key,
s = self.xfeaturevals.delete().where(conditional)
r = self.conn.execute(s)
r.close()
-
+
def feature_get(self, feature, key):
"""Return the list of values for a key of a feature."""
-
+
s = select([self.xfeaturevals.c.value])
s = s.where(and_(self.xfeaturevals.c.feature_id == feature,
- self.xfeaturevals.c.key == key))
+ self.xfeaturevals.c.key == key))
r = self.conn.execute(s)
l = [row[0] for row in r.fetchall()]
r.close()
return l
-
+
def feature_clear(self, feature, key):
"""Delete all key, value pairs for a key of a feature."""
-
+
s = self.xfeaturevals.delete()
s = s.where(and_(self.xfeaturevals.c.feature_id == feature,
- self.xfeaturevals.c.key == key))
+ self.xfeaturevals.c.key == key))
r = self.conn.execute(s)
r.close()
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
__all__ = ["DBWrapper",
"Node", "ROOTNODE", "SERIAL", "HASH", "SIZE", "TYPE", "MTIME", "MUSER", "UUID", "CHECKSUM", "CLUSTER", "MATCH_PREFIX", "MATCH_EXACT",
"Permissions", "READ", "WRITE"]
-
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
class DBWorker(object):
"""Database connection handler."""
-
+
def __init__(self, **params):
self.params = params
conn = params['wrapper'].conn
self.fetchall = cur.fetchall
self.cur = cur
self.conn = conn
-
+
def escape_like(self, s):
return s.replace('\\', '\\\\').replace('%', '\%').replace('_', '\_')
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
class DBWrapper(object):
"""Database connection wrapper."""
-
+
def __init__(self, db):
self.conn = sqlite3.connect(db, check_same_thread=False)
self.conn.execute(""" pragma case_sensitive_like = on """)
-
+
def close(self):
self.conn.close()
-
+
def execute(self):
self.conn.execute('begin deferred')
-
+
def commit(self):
self.conn.commit()
-
+
def rollback(self):
self.conn.rollback()
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
class Groups(DBWorker):
"""Groups are named collections of members, belonging to an owner."""
-
+
def __init__(self, **params):
DBWorker.__init__(self, **params)
execute = self.execute
-
+
execute(""" create table if not exists groups
( owner text,
name text,
primary key (owner, name, member) ) """)
execute(""" create index if not exists idx_groups_member
on groups(member) """)
-
+
def group_names(self, owner):
"""List all group names belonging to owner."""
-
+
q = "select distinct name from groups where owner = ?"
self.execute(q, (owner,))
return [r[0] for r in self.fetchall()]
-
+
def group_dict(self, owner):
"""Return a dict mapping group names to member lists for owner."""
-
+
q = "select name, member from groups where owner = ?"
self.execute(q, (owner,))
d = defaultdict(list)
for group, member in self.fetchall():
d[group].append(member)
return d
-
+
def group_add(self, owner, group, member):
"""Add a member to a group."""
-
+
q = "insert or ignore into groups (owner, name, member) values (?, ?, ?)"
self.execute(q, (owner, group, member))
-
+
def group_addmany(self, owner, group, members):
"""Add members to a group."""
-
+
q = "insert or ignore into groups (owner, name, member) values (?, ?, ?)"
self.executemany(q, ((owner, group, member) for member in members))
-
+
def group_remove(self, owner, group, member):
"""Remove a member from a group."""
-
+
q = "delete from groups where owner = ? and name = ? and member = ?"
self.execute(q, (owner, group, member))
-
+
def group_delete(self, owner, group):
"""Delete a group."""
-
+
q = "delete from groups where owner = ? and name = ?"
self.execute(q, (owner, group))
-
+
def group_destroy(self, owner):
"""Delete all groups belonging to owner."""
-
+
q = "delete from groups where owner = ?"
self.execute(q, (owner,))
-
+
def group_members(self, owner, group):
"""Return the list of members of a group."""
-
+
q = "select member from groups where owner = ? and name = ?"
self.execute(q, (owner, group))
return [r[0] for r in self.fetchall()]
-
+
def group_check(self, owner, group, member):
"""Check if a member is in a group."""
-
+
q = "select 1 from groups where owner = ? and name = ? and member = ?"
self.execute(q, (group, member))
return bool(self.fetchone())
-
+
def group_parents(self, member):
"""Return all (owner, group) tuples that contain member."""
-
+
q = "select owner, name from groups where member = ?"
self.execute(q, (member,))
return self.fetchall()
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from pithos.backends.filter import parse_filters
-ROOTNODE = 0
+ROOTNODE = 0
-( SERIAL, NODE, HASH, SIZE, TYPE, SOURCE, MTIME, MUSER, UUID, CHECKSUM, CLUSTER ) = range(11)
+(SERIAL, NODE, HASH, SIZE, TYPE, SOURCE, MTIME, MUSER, UUID, CHECKSUM,
+ CLUSTER) = range(11)
-( MATCH_PREFIX, MATCH_EXACT ) = range(2)
+(MATCH_PREFIX, MATCH_EXACT) = range(2)
inf = float('inf')
c = ord(prefix[-1])
if c >= 0xffff:
raise RuntimeError
- s += unichr(c+1)
+ s += unichr(c + 1)
return s
+
def strprevling(prefix):
"""Return an approximation of the last unicode string
less than but not starting with given prefix.
s = prefix[:-1]
c = ord(prefix[-1])
if c > 0:
- s += unichr(c-1) + unichr(0xffff)
+ s += unichr(c - 1) + unichr(0xffff)
return s
_propnames = {
- 'serial' : 0,
- 'node' : 1,
- 'hash' : 2,
- 'size' : 3,
- 'type' : 4,
- 'source' : 5,
- 'mtime' : 6,
- 'muser' : 7,
- 'uuid' : 8,
- 'checksum' : 9,
- 'cluster' : 10
+ 'serial': 0,
+ 'node': 1,
+ 'hash': 2,
+ 'size': 3,
+ 'type': 4,
+ 'source': 5,
+ 'mtime': 6,
+ 'muser': 7,
+ 'uuid': 8,
+ 'checksum': 9,
+ 'cluster': 10
}
Versions store object history and have multiple attributes.
Attributes store metadata.
"""
-
+
# TODO: Provide an interface for included and excluded clusters.
-
+
def __init__(self, **params):
DBWorker.__init__(self, **params)
execute = self.execute
-
+
execute(""" pragma foreign_keys = on """)
-
+
execute(""" create table if not exists nodes
( node integer primary key,
parent integer default 0,
on nodes(path) """)
execute(""" create index if not exists idx_nodes_parent
on nodes(parent) """)
-
+
execute(""" create table if not exists policy
( node integer,
key text,
references nodes(node)
on update cascade
on delete cascade ) """)
-
+
execute(""" create table if not exists statistics
( node integer,
population integer not null default 0,
references nodes(node)
on update cascade
on delete cascade ) """)
-
+
execute(""" create table if not exists versions
( serial integer primary key,
node integer,
on versions(node, mtime) """)
execute(""" create index if not exists idx_versions_node_uuid
on versions(uuid) """)
-
+
execute(""" create table if not exists attributes
( serial integer,
domain text,
references versions(serial)
on update cascade
on delete cascade ) """)
-
+
q = "insert or ignore into nodes(node, parent) values (?, ?)"
execute(q, (ROOTNODE, ROOTNODE))
-
+
def node_create(self, parent, path):
"""Create a new node from the given properties.
Return the node identifier of the new node.
"""
-
+
q = ("insert into nodes (parent, path) "
"values (?, ?)")
props = (parent, path)
return self.execute(q, props).lastrowid
-
+
def node_lookup(self, path):
"""Lookup the current node of the given path.
Return None if the path is not found.
"""
-
+
q = "select node from nodes where path = ?"
self.execute(q, (path,))
r = self.fetchone()
if r is not None:
return r[0]
return None
-
+
def node_lookup_bulk(self, paths):
- """Lookup the current nodes for the given paths.
+ """Lookup the current nodes for the given paths.
Return () if the path is not found.
"""
-
+
placeholders = ','.join('?' for path in paths)
q = "select node from nodes where path in (%s)" % placeholders
self.execute(q, paths)
r = self.fetchall()
if r is not None:
- return [row[0] for row in r]
+ return [row[0] for row in r]
return None
-
+
def node_get_properties(self, node):
"""Return the node's (parent, path).
Return None if the node is not found.
"""
-
+
q = "select parent, path from nodes where node = ?"
self.execute(q, (node,))
return self.fetchone()
-
+
def node_get_versions(self, node, keys=(), propnames=_propnames):
"""Return the properties of all versions at node.
If keys is empty, return all properties in the order
(serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
"""
-
+
q = ("select serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster "
"from versions "
"where node = ?")
r = self.fetchall()
if r is None:
return r
-
+
if not keys:
return r
return [[p[propnames[k]] for k in keys if k in propnames] for p in r]
-
+
def node_count_children(self, node):
"""Return node's child count."""
-
+
q = "select count(node) from nodes where parent = ? and node != 0"
self.execute(q, (node,))
r = self.fetchone()
if r is None:
return 0
return r[0]
-
+
def node_purge_children(self, parent, before=inf, cluster=0):
"""Delete all versions with the specified
parent and cluster, and return
the hashes and size of versions deleted.
Clears out nodes with no remaining versions.
"""
-
+
execute = self.execute
q = ("select count(serial), sum(size) from versions "
"where node in (select node "
- "from nodes "
- "where parent = ?) "
+ "from nodes "
+ "where parent = ?) "
"and cluster = ? "
"and mtime <= ?")
args = (parent, cluster, before)
mtime = time()
self.statistics_update(parent, -nr, -size, mtime, cluster)
self.statistics_update_ancestors(parent, -nr, -size, mtime, cluster)
-
+
q = ("select hash from versions "
"where node in (select node "
- "from nodes "
- "where parent = ?) "
+ "from nodes "
+ "where parent = ?) "
"and cluster = ? "
"and mtime <= ?")
execute(q, args)
hashes = [r[0] for r in self.fetchall()]
q = ("delete from versions "
"where node in (select node "
- "from nodes "
- "where parent = ?) "
+ "from nodes "
+ "where parent = ?) "
"and cluster = ? "
"and mtime <= ?")
execute(q, args)
q = ("delete from nodes "
"where node in (select node from nodes n "
- "where (select count(serial) "
- "from versions "
- "where node = n.node) = 0 "
- "and parent = ?)")
+ "where (select count(serial) "
+ "from versions "
+ "where node = n.node) = 0 "
+ "and parent = ?)")
execute(q, (parent,))
return hashes, size
-
+
def node_purge(self, node, before=inf, cluster=0):
"""Delete all versions with the specified
node and cluster, and return
the hashes and size of versions deleted.
Clears out the node if it has no remaining versions.
"""
-
+
execute = self.execute
q = ("select count(serial), sum(size) from versions "
"where node = ? "
return (), 0
mtime = time()
self.statistics_update_ancestors(node, -nr, -size, mtime, cluster)
-
+
q = ("select hash from versions "
"where node = ? "
"and cluster = ? "
execute(q, args)
q = ("delete from nodes "
"where node in (select node from nodes n "
- "where (select count(serial) "
- "from versions "
- "where node = n.node) = 0 "
- "and node = ?)")
+ "where (select count(serial) "
+ "from versions "
+ "where node = n.node) = 0 "
+ "and node = ?)")
execute(q, (node,))
return hashes, size
-
+
def node_remove(self, node):
"""Remove the node specified.
Return false if the node has children or is not found.
"""
-
+
if self.node_count_children(node):
return False
-
+
mtime = time()
q = ("select count(serial), sum(size), cluster "
"from versions "
"group by cluster")
self.execute(q, (node,))
for population, size, cluster in self.fetchall():
- self.statistics_update_ancestors(node, -population, -size, mtime, cluster)
-
+ self.statistics_update_ancestors(
+ node, -population, -size, mtime, cluster)
+
q = "delete from nodes where node = ?"
self.execute(q, (node,))
return True
-
+
def policy_get(self, node):
q = "select key, value from policy where node = ?"
self.execute(q, (node,))
return dict(self.fetchall())
-
+
def policy_set(self, node, policy):
q = "insert or replace into policy (node, key, value) values (?, ?, ?)"
self.executemany(q, ((node, k, v) for k, v in policy.iteritems()))
-
+
def statistics_get(self, node, cluster=0):
"""Return population, total size and last mtime
for all versions under node that belong to the cluster.
"""
-
+
q = ("select population, size, mtime from statistics "
"where node = ? and cluster = ?")
self.execute(q, (node, cluster))
return self.fetchone()
-
+
def statistics_update(self, node, population, size, mtime, cluster=0):
"""Update the statistics of the given node.
Statistics keep track the population, total
size of objects and mtime in the node's namespace.
May be zero or positive or negative numbers.
"""
-
+
qs = ("select population, size from statistics "
"where node = ? and cluster = ?")
qu = ("insert or replace into statistics (node, population, size, mtime, cluster) "
population += prepopulation
size += presize
self.execute(qu, (node, population, size, mtime, cluster))
-
+
def statistics_update_ancestors(self, node, population, size, mtime, cluster=0):
"""Update the statistics of the given node's parent.
Then recursively update all parents up to the root.
Population is not recursive.
"""
-
+
while True:
if node == 0:
break
parent, path = props
self.statistics_update(parent, population, size, mtime, cluster)
node = parent
- population = 0 # Population isn't recursive
-
+ population = 0 # Population isn't recursive
+
def statistics_latest(self, node, before=inf, except_cluster=0):
"""Return population, total size and last mtime
for all latest versions under node that
do not belong to the cluster.
"""
-
+
execute = self.execute
fetchone = self.fetchone
-
+
# The node.
props = self.node_get_properties(node)
if props is None:
return None
parent, path = props
-
+
# The latest version.
q = ("select serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster "
"from versions v "
"where serial = %s "
"and cluster != ?")
- subq, args = self._construct_latest_version_subquery(node=node, before=before)
+ subq, args = self._construct_latest_version_subquery(
+ node=node, before=before)
execute(q % subq, args + [except_cluster])
props = fetchone()
if props is None:
return None
mtime = props[MTIME]
-
+
# First level, just under node (get population).
q = ("select count(serial), sum(size), max(mtime) "
"from versions v "
"where serial = %s "
"and cluster != ? "
"and node in (select node "
- "from nodes "
- "where parent = ?)")
- subq, args = self._construct_latest_version_subquery(node=None, before=before)
+ "from nodes "
+ "where parent = ?)")
+ subq, args = self._construct_latest_version_subquery(
+ node=None, before=before)
execute(q % subq, args + [except_cluster, node])
r = fetchone()
if r is None:
mtime = max(mtime, r[2])
if count == 0:
return (0, 0, mtime)
-
+
# All children (get size and mtime).
# This is why the full path is stored.
q = ("select count(serial), sum(size), max(mtime) "
"where serial = %s "
"and cluster != ? "
"and node in (select node "
- "from nodes "
- "where path like ? escape '\\')")
- subq, args = self._construct_latest_version_subquery(node=None, before=before)
- execute(q % subq, args + [except_cluster, self.escape_like(path) + '%'])
+ "from nodes "
+ "where path like ? escape '\\')")
+ subq, args = self._construct_latest_version_subquery(
+ node=None, before=before)
+ execute(
+ q % subq, args + [except_cluster, self.escape_like(path) + '%'])
r = fetchone()
if r is None:
return None
size = r[1] - props[SIZE]
mtime = max(mtime, r[2])
return (count, size, mtime)
-
+
def nodes_set_latest_version(self, node, serial):
- q = ("update nodes set latest_version = ? where node = ?")
+ q = ("update nodes set latest_version = ? where node = ?")
props = (serial, node)
self.execute(q, props)
-
+
def version_create(self, node, hash, size, type, source, muser, uuid, checksum, cluster=0):
"""Create a new version from the given properties.
Return the (serial, mtime) of the new version.
"""
-
+
q = ("insert into versions (node, hash, size, type, source, mtime, muser, uuid, checksum, cluster) "
"values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
mtime = time()
- props = (node, hash, size, type, source, mtime, muser, uuid, checksum, cluster)
+ props = (node, hash, size, type, source, mtime, muser,
+ uuid, checksum, cluster)
serial = self.execute(q, props).lastrowid
self.statistics_update_ancestors(node, 1, size, mtime, cluster)
-
+
self.nodes_set_latest_version(node, serial)
-
+
return serial, mtime
-
+
def version_lookup(self, node, before=inf, cluster=0, all_props=True):
"""Lookup the current version of the given node.
Return a list with its properties:
- (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster)
+ (serial, node, hash, size, type, source, mtime,
+ muser, uuid, checksum, cluster)
or None if the current version is not found in the given cluster.
"""
-
+
q = ("select %s "
"from versions v "
"where serial = %s "
"and cluster = ?")
- subq, args = self._construct_latest_version_subquery(node=node, before=before)
+ subq, args = self._construct_latest_version_subquery(
+ node=node, before=before)
if not all_props:
q = q % ("serial", subq)
else:
q = q % ("serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster", subq)
-
+
self.execute(q, args + [cluster])
props = self.fetchone()
if props is not None:
Return a list with their properties:
(serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
"""
-
+
if not nodes:
- return ()
+ return ()
q = ("select %s "
"from versions "
"where serial in %s "
"and cluster = ? %s")
- subq, args = self._construct_latest_versions_subquery(nodes=nodes, before = before)
+ subq, args = self._construct_latest_versions_subquery(
+ nodes=nodes, before=before)
if not all_props:
q = q % ("serial", subq, '')
else:
- q = q % ("serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster", subq, 'order by node')
-
+ q = q % ("serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster", subq, 'order by node')
+
args += [cluster]
self.execute(q, args)
return self.fetchall()
-
+
def version_get_properties(self, serial, keys=(), propnames=_propnames):
"""Return a sequence of values for the properties of
the version specified by serial and the keys, in the order given.
If keys is empty, return all properties in the order
(serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
"""
-
+
q = ("select serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster "
"from versions "
"where serial = ?")
r = self.fetchone()
if r is None:
return r
-
+
if not keys:
return r
return [r[propnames[k]] for k in keys if k in propnames]
-
+
def version_put_property(self, serial, key, value):
"""Set value for the property of version specified by key."""
-
+
if key not in _propnames:
return
q = "update versions set %s = ? where serial = ?" % key
self.execute(q, (value, serial))
-
+
def version_recluster(self, serial, cluster):
"""Move the version into another cluster."""
-
+
props = self.version_get_properties(serial)
if not props:
return
oldcluster = props[CLUSTER]
if cluster == oldcluster:
return
-
+
mtime = time()
self.statistics_update_ancestors(node, -1, -size, mtime, oldcluster)
self.statistics_update_ancestors(node, 1, size, mtime, cluster)
-
+
q = "update versions set cluster = ? where serial = ?"
self.execute(q, (cluster, serial))
-
+
def version_remove(self, serial):
"""Remove the serial specified."""
-
+
props = self.version_get_properties(serial)
if not props:
return
hash = props[HASH]
size = props[SIZE]
cluster = props[CLUSTER]
-
+
mtime = time()
self.statistics_update_ancestors(node, -1, -size, mtime, cluster)
-
+
q = "delete from versions where serial = ?"
self.execute(q, (serial,))
-
+
props = self.version_lookup(node, cluster=cluster, all_props=False)
if props:
- self.nodes_set_latest_version(node, props[0])
+ self.nodes_set_latest_version(node, props[0])
return hash, size
-
+
def attribute_get(self, serial, domain, keys=()):
"""Return a list of (key, value) pairs of the version specified by serial.
If keys is empty, return all attributes.
Othwerise, return only those specified.
"""
-
+
execute = self.execute
if keys:
marks = ','.join('?' for k in keys)
q = "select key, value from attributes where serial = ? and domain = ?"
execute(q, (serial, domain))
return self.fetchall()
-
+
def attribute_set(self, serial, domain, items):
"""Set the attributes of the version specified by serial.
Receive attributes as an iterable of (key, value) pairs.
"""
-
+
q = ("insert or replace into attributes (serial, domain, key, value) "
"values (?, ?, ?, ?)")
self.executemany(q, ((serial, domain, k, v) for k, v in items))
-
+
def attribute_del(self, serial, domain, keys=()):
"""Delete attributes of the version specified by serial.
If keys is empty, delete all attributes.
Otherwise delete those specified.
"""
-
+
if keys:
q = "delete from attributes where serial = ? and domain = ? and key = ?"
self.executemany(q, ((serial, domain, key) for key in keys))
else:
q = "delete from attributes where serial = ? and domain = ?"
self.execute(q, (serial, domain))
-
+
def attribute_copy(self, source, dest):
q = ("insert or replace into attributes "
"select ?, domain, key, value from attributes "
"where serial = ?")
self.execute(q, (dest, source))
-
+
def _construct_filters(self, domain, filterq):
if not domain or not filterq:
return None, None
-
+
subqlist = []
append = subqlist.append
included, excluded, opers = parse_filters(filterq)
args = []
-
+
if included:
subq = "exists (select 1 from attributes where serial = v.serial and domain = ? and "
subq += "(" + ' or '.join(('key = ?' for x in included)) + ")"
args += [domain]
args += included
append(subq)
-
+
if excluded:
subq = "not exists (select 1 from attributes where serial = v.serial and domain = ? and "
subq += "(" + ' or '.join(('key = ?' for x in excluded)) + ")"
args += [domain]
args += excluded
append(subq)
-
+
if opers:
for k, o, v in opers:
subq = "exists (select 1 from attributes where serial = v.serial and domain = ? and "
subq += ")"
args += [domain, k, v]
append(subq)
-
+
if not subqlist:
return None, None
-
+
subq = ' and ' + ' and '.join(subqlist)
-
+
return subq, args
-
+
def _construct_paths(self, pathq):
if not pathq:
return None, None
-
+
subqlist = []
args = []
for path, match in pathq:
elif match == MATCH_EXACT:
subqlist.append("n.path = ?")
args.append(path)
-
+
subq = ' and (' + ' or '.join(subqlist) + ')'
args = tuple(args)
-
+
return subq, args
-
+
def _construct_size(self, sizeq):
if not sizeq or len(sizeq) != 2:
return None, None
-
+
subq = ''
args = []
if sizeq[0]:
if sizeq[1]:
subq += " and v.size < ?"
args += [sizeq[1]]
-
+
return subq, args
-
+
def _construct_versions_nodes_latest_version_subquery(self, before=inf):
if before == inf:
q = ("n.latest_version ")
args = []
else:
q = ("(select max(serial) "
- "from versions "
- "where node = v.node and mtime < ?) ")
+ "from versions "
+ "where node = v.node and mtime < ?) ")
args = [before]
return q, args
-
+
def _construct_latest_version_subquery(self, node=None, before=inf):
where_cond = "node = v.node"
args = []
if node:
where_cond = "node = ? "
args = [node]
-
+
if before == inf:
q = ("(select latest_version "
- "from nodes "
- "where %s) ")
+ "from nodes "
+ "where %s) ")
else:
q = ("(select max(serial) "
- "from versions "
- "where %s and mtime < ?) ")
+ "from versions "
+ "where %s and mtime < ?) ")
args += [before]
return q % where_cond, args
-
+
def _construct_latest_versions_subquery(self, nodes=(), before=inf):
where_cond = ""
args = []
if nodes:
where_cond = "node in (%s) " % ','.join('?' for node in nodes)
args = nodes
-
+
if before == inf:
q = ("(select latest_version "
- "from nodes "
- "where %s ) ")
+ "from nodes "
+ "where %s ) ")
else:
q = ("(select max(serial) "
- "from versions "
- "where %s and mtime < ? group by node) ")
+ "from versions "
+ "where %s and mtime < ? group by node) ")
args += [before]
return q % where_cond, args
-
+
def latest_attribute_keys(self, parent, domain, before=inf, except_cluster=0, pathq=[]):
"""Return a list with all keys pairs defined
for all latest versions under parent that
do not belong to the cluster.
"""
-
+
# TODO: Use another table to store before=inf results.
q = ("select distinct a.key "
"from attributes a, versions v, nodes n "
"where v.serial = %s "
"and v.cluster != ? "
"and v.node in (select node "
- "from nodes "
- "where parent = ?) "
+ "from nodes "
+ "where parent = ?) "
"and a.serial = v.serial "
"and a.domain = ? "
"and n.node = v.node")
- subq, subargs = self._construct_latest_version_subquery(node=None, before=before)
+ subq, subargs = self._construct_latest_version_subquery(
+ node=None, before=before)
args = subargs + [except_cluster, parent, domain]
q = q % subq
subq, subargs = self._construct_paths(pathq)
args += subargs
self.execute(q, args)
return [r[0] for r in self.fetchall()]
-
+
def latest_version_list(self, parent, prefix='', delimiter=None,
start='', limit=10000, before=inf,
except_cluster=0, pathq=[], domain=None,
"""Return a (list of (path, serial) tuples, list of common prefixes)
for the current versions of the paths with the given parent,
matching the following criteria.
-
+
The property tuple for a version is returned if all
of these conditions are true:
-
+
a. parent matches
-
+
b. path > start
-
+
c. path starts with prefix (and paths in pathq)
-
+
d. version is the max up to before
-
+
e. version is not in cluster
-
+
f. the path does not have the delimiter occuring
after the prefix, or ends with the delimiter
-
+
g. serial matches the attribute filter query.
-
+
A filter query is a comma-separated list of
terms in one of these three forms:
-
+
key
an attribute with this key must exist
-
+
!key
an attribute with this key must not exist
-
+
key ?op value
the attribute with this key satisfies the value
where ?op is one of =, != <=, >=, <, >.
-
+
h. the size is in the range set by sizeq
-
+
The list of common prefixes includes the prefixes
matching up to the first delimiter after prefix,
and are reported only once, as "virtual directories".
The delimiter is included in the prefixes.
-
+
If arguments are None, then the corresponding matching rule
will always match.
-
+
Limit applies to the first list of tuples returned.
-
+
If all_props is True, return all properties after path, not just serial.
"""
-
+
execute = self.execute
-
+
if not start or start < prefix:
start = strprevling(prefix)
nextling = strnextling(prefix)
-
+
q = ("select distinct n.path, %s "
"from versions v, nodes n "
"where v.serial = %s "
"and v.cluster != ? "
"and v.node in (select node "
- "from nodes "
- "where parent = ?) "
+ "from nodes "
+ "where parent = ?) "
"and n.node = v.node "
"and n.path > ? and n.path < ?")
- subq, args = self._construct_versions_nodes_latest_version_subquery(before)
+ subq, args = self._construct_versions_nodes_latest_version_subquery(
+ before)
if not all_props:
q = q % ("v.serial", subq)
else:
q = q % ("v.serial, v.node, v.hash, v.size, v.type, v.source, v.mtime, v.muser, v.uuid, v.checksum, v.cluster", subq)
args += [except_cluster, parent, start, nextling]
start_index = len(args) - 2
-
+
subq, subargs = self._construct_paths(pathq)
if subq is not None:
q += subq
q = q.replace("attributes a, ", "")
q = q.replace("and a.serial = v.serial ", "")
q += " order by n.path"
-
+
if not delimiter:
q += " limit ?"
args.append(limit)
execute(q, args)
return self.fetchall(), ()
-
+
pfz = len(prefix)
dz = len(delimiter)
count = 0
pappend = prefixes.append
matches = []
mappend = matches.append
-
+
execute(q, args)
while True:
props = fetchone()
path = props[0]
serial = props[1]
idx = path.find(delimiter, pfz)
-
+
if idx < 0:
mappend(props)
count += 1
if count >= limit:
break
continue
-
+
if idx + dz == len(path):
mappend(props)
count += 1
- continue # Get one more, in case there is a path.
+ continue # Get one more, in case there is a path.
pf = path[:idx + dz]
pappend(pf)
- if count >= limit:
+ if count >= limit:
break
-
- args[start_index] = strnextling(pf) # New start.
+
+ args[start_index] = strnextling(pf) # New start.
execute(q, args)
-
+
return matches, prefixes
-
+
def latest_uuid(self, uuid):
"""Return a (path, serial) tuple, for the latest version of the given uuid."""
-
+
q = ("select n.path, v.serial "
"from versions v, nodes n "
"where v.serial = (select max(serial) "
- "from versions "
- "where uuid = ?) "
+ "from versions "
+ "where uuid = ?) "
"and n.node = v.node")
self.execute(q, (uuid,))
return self.fetchone()
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
class Permissions(XFeatures, Groups, Public):
-
+
def __init__(self, **params):
XFeatures.__init__(self, **params)
Groups.__init__(self, **params)
Public.__init__(self, **params)
-
+
def access_grant(self, path, access, members=()):
"""Grant members with access to path.
Members can also be '*' (all),
or some group specified as 'owner:group'."""
-
+
if not members:
return
feature = self.xfeature_create(path)
self.feature_setmany(feature, access, members)
-
+
def access_set(self, path, permissions):
"""Set permissions for path. The permissions dict
maps 'read', 'write' keys to member lists."""
-
+
r = permissions.get('read', [])
w = permissions.get('write', [])
if not r and not w:
self.feature_setmany(feature, READ, r)
if w:
self.feature_setmany(feature, WRITE, w)
-
+
def access_get(self, path):
"""Get permissions for path."""
-
+
feature = self.xfeature_get(path)
if not feature:
return {}
permissions['write'] = permissions[WRITE]
del(permissions[WRITE])
return permissions
-
+
def access_members(self, path):
feature = self.xfeature_get(path)
if not feature:
members.remove(m)
members.update(self.group_members(user, group))
return members
-
+
def access_clear(self, path):
"""Revoke access to path (both permissions and public)."""
-
+
self.xfeature_destroy(path)
self.public_unset(path)
-
+
def access_clear_bulk(self, paths):
"""Revoke access to path (both permissions and public)."""
-
+
self.xfeature_destroy_bulk(paths)
self.public_unset_bulk(paths)
-
+
def access_check(self, path, access, member):
"""Return true if the member has this access to the path."""
-
+
feature = self.xfeature_get(path)
if not feature:
return False
if owner + ':' + group in members:
return True
return False
-
+
def access_inherit(self, path):
"""Return the paths influencing the access for path."""
-
+
# r = self.xfeature_inherit(path)
# if not r:
# return []
# # Compute valid.
# return [x[0] for x in r if x[0] in valid]
-
+
# Only keep path components.
parts = path.rstrip('/').split('/')
valid = []
if subp != path:
valid.append(subp + '/')
return [x for x in valid if self.xfeature_get(x)]
-
+
def access_list_paths(self, member, prefix=None):
"""Return the list of paths granted to member."""
-
+
q = ("select distinct path from xfeatures inner join "
" (select distinct feature_id, key from xfeaturevals inner join "
" (select owner || ':' || name as value from groups "
p += (self.escape_like(prefix) + '%',)
self.execute(q, p)
return [r[0] for r in self.fetchall()]
-
+
def access_list_shared(self, prefix=''):
"""Return the list of shared paths."""
-
+
q = "select path from xfeatures where path like ? escape '\\'"
self.execute(q, (self.escape_like(prefix) + '%',))
return [r[0] for r in self.fetchall()]
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
class Public(DBWorker):
"""Paths can be marked as public."""
-
+
def __init__(self, **params):
DBWorker.__init__(self, **params)
execute = self.execute
-
+
execute(""" create table if not exists public
( public_id integer primary key autoincrement,
path text not null,
active boolean not null default 1 ) """)
execute(""" create unique index if not exists idx_public_path
on public(path) """)
-
+
def public_set(self, path):
q = "insert or ignore into public (path) values (?)"
self.execute(q, (path,))
q = "update public set active = 1 where path = ?"
self.execute(q, (path,))
-
+
def public_unset(self, path):
q = "update public set active = 0 where path = ?"
self.execute(q, (path,))
-
+
def public_unset_bulk(self, paths):
placeholders = ','.join('?' for path in paths)
q = "update public set active = 0 where path in (%s)" % placeholders
self.execute(q, paths)
-
+
def public_get(self, path):
q = "select public_id from public where path = ? and active = 1"
self.execute(q, (path,))
if row:
return row[0]
return None
-
+
def public_list(self, prefix):
q = "select path, public_id from public where path like ? escape '\\' and active = 1"
self.execute(q, (self.escape_like(prefix) + '%',))
return self.fetchall()
-
+
def public_path(self, public):
q = "select path from public where public_id = ? and active = 1"
self.execute(q, (public,))
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
"""XFeatures are path properties that allow non-nested
inheritance patterns. Currently used for storing permissions.
"""
-
+
def __init__(self, **params):
DBWorker.__init__(self, **params)
execute = self.execute
-
+
execute(""" pragma foreign_keys = on """)
-
+
execute(""" create table if not exists xfeatures
( feature_id integer primary key,
path text ) """)
primary key (feature_id, key, value)
foreign key (feature_id) references xfeatures(feature_id)
on delete cascade ) """)
-
+
# def xfeature_inherit(self, path):
# """Return the (path, feature) inherited by the path, or None."""
-#
+#
# q = ("select path, feature_id from xfeatures "
# "where path <= ? "
# "and ? like path || '%' " # XXX: Escape like...
# "order by path desc")
# self.execute(q, (path, path))
# return self.fetchall()
-
+
def xfeature_get(self, path):
"""Return feature for path."""
-
+
q = "select feature_id from xfeatures where path = ?"
self.execute(q, (path,))
r = self.fetchone()
if r is not None:
return r[0]
return None
-
+
def xfeature_create(self, path):
"""Create and return a feature for path.
If the path has a feature, return it.
"""
-
+
feature = self.xfeature_get(path)
if feature is not None:
return feature
q = "insert into xfeatures (path) values (?)"
id = self.execute(q, (path,)).lastrowid
return id
-
+
def xfeature_destroy(self, path):
"""Destroy a feature and all its key, value pairs."""
-
+
q = "delete from xfeatures where path = ?"
self.execute(q, (path,))
-
+
def xfeature_destroy_bulk(self, paths):
"""Destroy features and all their key, value pairs."""
-
+
placeholders = ','.join('?' for path in paths)
q = "delete from xfeatures where path in (%s)" % placeholders
self.execute(q, paths)
-
+
def feature_dict(self, feature):
"""Return a dict mapping keys to list of values for feature."""
-
+
q = "select key, value from xfeaturevals where feature_id = ?"
self.execute(q, (feature,))
d = defaultdict(list)
for key, value in self.fetchall():
d[key].append(value)
return d
-
+
def feature_set(self, feature, key, value):
"""Associate a key, value pair with a feature."""
-
+
q = "insert or ignore into xfeaturevals (feature_id, key, value) values (?, ?, ?)"
self.execute(q, (feature, key, value))
-
+
def feature_setmany(self, feature, key, values):
"""Associate the given key, and values with a feature."""
-
+
q = "insert or ignore into xfeaturevals (feature_id, key, value) values (?, ?, ?)"
self.executemany(q, ((feature, key, v) for v in values))
-
+
def feature_unset(self, feature, key, value):
"""Disassociate a key, value pair from a feature."""
-
+
q = ("delete from xfeaturevals where "
"feature_id = ? and key = ? and value = ?")
self.execute(q, (feature, key, value))
-
+
def feature_unsetmany(self, feature, key, values):
"""Disassociate the key for the values given, from a feature."""
-
+
q = ("delete from xfeaturevals where "
"feature_id = ? and key = ? and value = ?")
self.executemany(q, ((feature, key, v) for v in values))
-
+
def feature_get(self, feature, key):
"""Return the list of values for a key of a feature."""
-
+
q = "select value from xfeaturevals where feature_id = ? and key = ?"
self.execute(q, (feature, key))
return [r[0] for r in self.fetchall()]
-
+
def feature_clear(self, feature, key):
"""Delete all key, value pairs for a key of a feature."""
-
+
q = "delete from xfeaturevals where feature_id = ? and key = ?"
self.execute(q, (feature, key))
import sqlalchemy as sa
DEFAULT_ALEMBIC_INI_PATH = os.path.join(
- os.path.abspath(os.path.dirname(sqlalchemy_backend.__file__)),
- 'alembic.ini')
+ os.path.abspath(os.path.dirname(sqlalchemy_backend.__file__)),
+ 'alembic.ini')
def initialize_db():
alembic_cfg = Config(DEFAULT_ALEMBIC_INI_PATH)
- db = alembic_cfg.get_main_option("sqlalchemy.url", PITHOS_BACKEND_DB_CONNECTION)
+ db = alembic_cfg.get_main_option(
+ "sqlalchemy.url", PITHOS_BACKEND_DB_CONNECTION)
alembic_cfg.set_main_option("sqlalchemy.url", db)
engine = sa.engine_from_config(
- alembic_cfg.get_section(alembic_cfg.config_ini_section),
- prefix='sqlalchemy.')
+ alembic_cfg.get_section(alembic_cfg.config_ini_section),
+ prefix='sqlalchemy.')
node.create_tables(engine)
groups.create_tables(engine)
command.stamp(alembic_cfg, "head")
-
def main(argv=None, **kwargs):
if not argv:
argv = sys.argv
print "DB initialized."
exit(1)
-
# default config arg, if not already set
if not '-c' in argv:
argv.insert(0, DEFAULT_ALEMBIC_INI_PATH)
argv.insert(0, '-c')
-
alembic_main(argv, **kwargs)
if __name__ == '__main__':
import sys
main(sys.argv)
-
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
AccountExists, ContainerExists, AccountNotEmpty, ContainerNotEmpty, ItemNotExists, VersionNotExists
# Stripped-down version of the HashMap class found in tools.
+
+
class HashMap(list):
def __init__(self, blocksize, blockhash):
QUEUE_CLIENT_ID = 'pithos'
QUEUE_INSTANCE_ID = '1'
-( CLUSTER_NORMAL, CLUSTER_HISTORY, CLUSTER_DELETED ) = range(3)
+(CLUSTER_NORMAL, CLUSTER_HISTORY, CLUSTER_DELETED) = range(3)
inf = float('inf')
if not autocommit:
return func
+
def fn(self, *args, **kw):
self.wrapper.execute()
try:
class ModularBackend(BaseBackend):
"""A modular backend.
-
+
Uses modules for SQL functions and storage.
"""
-
+
def __init__(self, db_module=None, db_connection=None,
block_module=None, block_path=None, block_umask=None,
queue_module=None, queue_connection=None):
block_umask = block_umask or DEFAULT_BLOCK_UMASK
#queue_module = queue_module or DEFAULT_QUEUE_MODULE
#queue_connection = queue_connection or DEFAULT_QUEUE_CONNECTION
-
+
self.hash_algorithm = 'sha256'
- self.block_size = 4 * 1024 * 1024 # 4MB
-
- self.default_policy = {'quota': DEFAULT_QUOTA, 'versioning': DEFAULT_VERSIONING}
-
+ self.block_size = 4 * 1024 * 1024 # 4MB
+
+ self.default_policy = {'quota': DEFAULT_QUOTA,
+ 'versioning': DEFAULT_VERSIONING}
+
def load_module(m):
__import__(m)
return sys.modules[m]
-
+
self.db_module = load_module(db_module)
self.wrapper = self.db_module.DBWrapper(db_connection)
params = {'wrapper': self.wrapper}
self.node = self.db_module.Node(**params)
for x in ['ROOTNODE', 'SERIAL', 'HASH', 'SIZE', 'TYPE', 'MTIME', 'MUSER', 'UUID', 'CHECKSUM', 'CLUSTER', 'MATCH_PREFIX', 'MATCH_EXACT']:
setattr(self, x, getattr(self.db_module, x))
-
+
self.block_module = load_module(block_module)
params = {'path': block_path,
'block_size': self.block_size,
class NoQueue:
def send(self, *args):
pass
-
+
def close(self):
pass
-
+
self.queue = NoQueue()
-
+
def close(self):
self.wrapper.close()
self.queue.close()
-
+
@backend_method
def list_accounts(self, user, marker=None, limit=10000):
"""Return a list of accounts the user can access."""
-
+
logger.debug("list_accounts: %s %s %s", user, marker, limit)
allowed = self._allowed_accounts(user)
start, limit = self._list_limits(allowed, marker, limit)
return allowed[start:start + limit]
-
+
@backend_method
def get_account_meta(self, user, account, domain, until=None, include_user_defined=True):
"""Return a dictionary with the account metadata for the domain."""
-
- logger.debug("get_account_meta: %s %s %s %s", user, account, domain, until)
+
+ logger.debug(
+ "get_account_meta: %s %s %s %s", user, account, domain, until)
path, node = self._lookup_account(account, user == account)
if user != account:
if until or node is None or account not in self._allowed_accounts(user):
if until is None:
modified = tstamp
else:
- modified = self._get_statistics(node)[2] # Overall last modification.
+ modified = self._get_statistics(
+ node)[2] # Overall last modification.
modified = max(modified, mtime)
-
+
if user != account:
meta = {'name': account}
else:
meta = {}
if props is not None and include_user_defined:
- meta.update(dict(self.node.attribute_get(props[self.SERIAL], domain)))
+ meta.update(
+ dict(self.node.attribute_get(props[self.SERIAL], domain)))
if until is not None:
meta.update({'until_timestamp': tstamp})
meta.update({'name': account, 'count': count, 'bytes': bytes})
meta.update({'modified': modified})
return meta
-
+
@backend_method
def update_account_meta(self, user, account, domain, meta, replace=False):
"""Update the metadata associated with the account for the domain."""
-
- logger.debug("update_account_meta: %s %s %s %s %s", user, account, domain, meta, replace)
+
+ logger.debug("update_account_meta: %s %s %s %s %s", user,
+ account, domain, meta, replace)
if user != account:
raise NotAllowedError
path, node = self._lookup_account(account, True)
self._put_metadata(user, node, domain, meta, replace)
-
+
@backend_method
def get_account_groups(self, user, account):
"""Return a dictionary with the user groups defined for this account."""
-
+
logger.debug("get_account_groups: %s %s", user, account)
if user != account:
if account not in self._allowed_accounts(user):
return {}
self._lookup_account(account, True)
return self.permissions.group_dict(account)
-
+
@backend_method
def update_account_groups(self, user, account, groups, replace=False):
"""Update the groups associated with the account."""
-
- logger.debug("update_account_groups: %s %s %s %s", user, account, groups, replace)
+
+ logger.debug("update_account_groups: %s %s %s %s", user,
+ account, groups, replace)
if user != account:
raise NotAllowedError
self._lookup_account(account, True)
if replace:
self.permissions.group_destroy(account)
for k, v in groups.iteritems():
- if not replace: # If not already deleted.
+ if not replace: # If not already deleted.
self.permissions.group_delete(account, k)
if v:
self.permissions.group_addmany(account, k, v)
-
+
@backend_method
def get_account_policy(self, user, account):
"""Return a dictionary with the account policy."""
-
+
logger.debug("get_account_policy: %s %s", user, account)
if user != account:
if account not in self._allowed_accounts(user):
return {}
path, node = self._lookup_account(account, True)
return self._get_policy(node)
-
+
@backend_method
def update_account_policy(self, user, account, policy, replace=False):
"""Update the policy associated with the account."""
-
- logger.debug("update_account_policy: %s %s %s %s", user, account, policy, replace)
+
+ logger.debug("update_account_policy: %s %s %s %s", user,
+ account, policy, replace)
if user != account:
raise NotAllowedError
path, node = self._lookup_account(account, True)
self._check_policy(policy)
self._put_policy(node, policy, replace)
-
+
@backend_method
def put_account(self, user, account, policy={}):
"""Create a new account with the given name."""
-
+
logger.debug("put_account: %s %s %s", user, account, policy)
if user != account:
raise NotAllowedError
self._check_policy(policy)
node = self._put_path(user, self.ROOTNODE, account)
self._put_policy(node, policy, True)
-
+
@backend_method
def delete_account(self, user, account):
"""Delete the account with the given name."""
-
+
logger.debug("delete_account: %s %s", user, account)
if user != account:
raise NotAllowedError
if not self.node.node_remove(node):
raise AccountNotEmpty('Account is not empty')
self.permissions.group_destroy(account)
-
+
@backend_method
def list_containers(self, user, account, marker=None, limit=10000, shared=False, until=None, public=False):
"""Return a list of containers existing under an account."""
-
- logger.debug("list_containers: %s %s %s %s %s %s %s", user, account, marker, limit, shared, until, public)
+
+ logger.debug("list_containers: %s %s %s %s %s %s %s", user,
+ account, marker, limit, shared, until, public)
if user != account:
if until or account not in self._allowed_accounts(user):
raise NotAllowedError
start, limit = self._list_limits(allowed, marker, limit)
return allowed[start:start + limit]
node = self.node.node_lookup(account)
- containers = [x[0] for x in self._list_object_properties(node, account, '', '/', marker, limit, False, None, [], until)]
- start, limit = self._list_limits([x[0] for x in containers], marker, limit)
+ containers = [x[0] for x in self._list_object_properties(
+ node, account, '', '/', marker, limit, False, None, [], until)]
+ start, limit = self._list_limits(
+ [x[0] for x in containers], marker, limit)
return containers[start:start + limit]
-
+
@backend_method
def list_container_meta(self, user, account, container, domain, until=None):
"""Return a list with all the container's object meta keys for the domain."""
-
- logger.debug("list_container_meta: %s %s %s %s %s", user, account, container, domain, until)
+
+ logger.debug("list_container_meta: %s %s %s %s %s", user,
+ account, container, domain, until)
allowed = []
if user != account:
if until:
raise NotAllowedError
- allowed = self.permissions.access_list_paths(user, '/'.join((account, container)))
+ allowed = self.permissions.access_list_paths(
+ user, '/'.join((account, container)))
if not allowed:
raise NotAllowedError
path, node = self._lookup_container(account, container)
before = until if until is not None else inf
allowed = self._get_formatted_paths(allowed)
return self.node.latest_attribute_keys(node, domain, before, CLUSTER_DELETED, allowed)
-
+
@backend_method
def get_container_meta(self, user, account, container, domain, until=None, include_user_defined=True):
"""Return a dictionary with the container metadata for the domain."""
-
- logger.debug("get_container_meta: %s %s %s %s %s", user, account, container, domain, until)
+
+ logger.debug("get_container_meta: %s %s %s %s %s", user,
+ account, container, domain, until)
if user != account:
if until or container not in self._allowed_containers(user, account):
raise NotAllowedError
if until is None:
modified = tstamp
else:
- modified = self._get_statistics(node)[2] # Overall last modification.
+ modified = self._get_statistics(
+ node)[2] # Overall last modification.
modified = max(modified, mtime)
-
+
if user != account:
meta = {'name': container}
else:
meta = {}
if include_user_defined:
- meta.update(dict(self.node.attribute_get(props[self.SERIAL], domain)))
+ meta.update(
+ dict(self.node.attribute_get(props[self.SERIAL], domain)))
if until is not None:
meta.update({'until_timestamp': tstamp})
meta.update({'name': container, 'count': count, 'bytes': bytes})
meta.update({'modified': modified})
return meta
-
+
@backend_method
def update_container_meta(self, user, account, container, domain, meta, replace=False):
"""Update the metadata associated with the container for the domain."""
-
- logger.debug("update_container_meta: %s %s %s %s %s %s", user, account, container, domain, meta, replace)
+
+ logger.debug("update_container_meta: %s %s %s %s %s %s",
+ user, account, container, domain, meta, replace)
if user != account:
raise NotAllowedError
path, node = self._lookup_container(account, container)
- src_version_id, dest_version_id = self._put_metadata(user, node, domain, meta, replace)
+ src_version_id, dest_version_id = self._put_metadata(
+ user, node, domain, meta, replace)
if src_version_id is not None:
versioning = self._get_policy(node)['versioning']
if versioning != 'auto':
self.node.version_remove(src_version_id)
-
+
@backend_method
def get_container_policy(self, user, account, container):
"""Return a dictionary with the container policy."""
-
- logger.debug("get_container_policy: %s %s %s", user, account, container)
+
+ logger.debug(
+ "get_container_policy: %s %s %s", user, account, container)
if user != account:
if container not in self._allowed_containers(user, account):
raise NotAllowedError
return {}
path, node = self._lookup_container(account, container)
return self._get_policy(node)
-
+
@backend_method
def update_container_policy(self, user, account, container, policy, replace=False):
"""Update the policy associated with the container."""
-
- logger.debug("update_container_policy: %s %s %s %s %s", user, account, container, policy, replace)
+
+ logger.debug("update_container_policy: %s %s %s %s %s",
+ user, account, container, policy, replace)
if user != account:
raise NotAllowedError
path, node = self._lookup_container(account, container)
self._check_policy(policy)
self._put_policy(node, policy, replace)
-
+
@backend_method
def put_container(self, user, account, container, policy={}):
"""Create a new container with the given name."""
-
- logger.debug("put_container: %s %s %s %s", user, account, container, policy)
+
+ logger.debug(
+ "put_container: %s %s %s %s", user, account, container, policy)
if user != account:
raise NotAllowedError
try:
if policy:
self._check_policy(policy)
path = '/'.join((account, container))
- node = self._put_path(user, self._lookup_account(account, True)[1], path)
+ node = self._put_path(
+ user, self._lookup_account(account, True)[1], path)
self._put_policy(node, policy, True)
-
+
@backend_method
def delete_container(self, user, account, container, until=None, prefix='', delimiter=None):
"""Delete/purge the container with the given name."""
-
- logger.debug("delete_container: %s %s %s %s %s %s", user, account, container, until, prefix, delimiter)
+
+ logger.debug("delete_container: %s %s %s %s %s %s", user,
+ account, container, until, prefix, delimiter)
if user != account:
raise NotAllowedError
path, node = self._lookup_container(account, container)
-
+
if until is not None:
- hashes, size = self.node.node_purge_children(node, until, CLUSTER_HISTORY)
+ hashes, size = self.node.node_purge_children(
+ node, until, CLUSTER_HISTORY)
for h in hashes:
self.store.map_delete(h)
self.node.node_purge_children(node, until, CLUSTER_DELETED)
- self._report_size_change(user, account, -size, {'action': 'container purge', 'path':path})
+ self._report_size_change(user, account, -size, {'action':
+ 'container purge', 'path': path})
return
-
+
if not delimiter:
if self._get_statistics(node)[0] > 0:
raise ContainerNotEmpty('Container is not empty')
- hashes, size = self.node.node_purge_children(node, inf, CLUSTER_HISTORY)
+ hashes, size = self.node.node_purge_children(
+ node, inf, CLUSTER_HISTORY)
for h in hashes:
self.store.map_delete(h)
self.node.node_purge_children(node, inf, CLUSTER_DELETED)
self.node.node_remove(node)
- self._report_size_change(user, account, -size, {'action': 'container delete', 'path':path})
+ self._report_size_change(user, account, -size, {'action':
+ 'container delete', 'path': path})
else:
- # remove only contents
+ # remove only contents
src_names = self._list_objects_no_limit(user, account, container, prefix='', delimiter=None, virtual=False, domain=None, keys=[], shared=False, until=None, size_range=None, all_props=True, public=False)
paths = []
for t in src_names:
path = '/'.join((account, container, t[0]))
node = t[2]
src_version_id, dest_version_id = self._put_version_duplicate(user, node, size=0, type='', hash=None, checksum='', cluster=CLUSTER_DELETED)
- del_size = self._apply_versioning(account, container, src_version_id)
+ del_size = self._apply_versioning(
+ account, container, src_version_id)
if del_size:
- self._report_size_change(user, account, -del_size, {'action': 'object delete', 'path':path})
- self._report_object_change(user, account, path, details={'action': 'object delete'})
+ self._report_size_change(user, account, -del_size, {'action': 'object delete', 'path': path})
+ self._report_object_change(
+ user, account, path, details={'action': 'object delete'})
paths.append(path)
self.permissions.access_clear_bulk(paths)
-
+
def _list_objects(self, user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, all_props, public):
if user != account and until:
raise NotAllowedError
if shared and public:
# get shared first
- shared = self._list_object_permissions(user, account, container, prefix, shared=True, public=False)
+ shared = self._list_object_permissions(
+ user, account, container, prefix, shared=True, public=False)
objects = set()
if shared:
path, node = self._lookup_container(account, container)
shared = self._get_formatted_paths(shared)
objects |= set(self._list_object_properties(node, path, prefix, delimiter, marker, limit, virtual, domain, keys, until, size_range, shared, all_props))
-
+
# get public
- objects |= set(self._list_public_object_properties(user, account, container, prefix, all_props))
+ objects |= set(self._list_public_object_properties(
+ user, account, container, prefix, all_props))
objects = list(objects)
-
+
objects.sort(key=lambda x: x[0])
- start, limit = self._list_limits([x[0] for x in objects], marker, limit)
+ start, limit = self._list_limits(
+ [x[0] for x in objects], marker, limit)
return objects[start:start + limit]
elif public:
- objects = self._list_public_object_properties(user, account, container, prefix, all_props)
- start, limit = self._list_limits([x[0] for x in objects], marker, limit)
+ objects = self._list_public_object_properties(
+ user, account, container, prefix, all_props)
+ start, limit = self._list_limits(
+ [x[0] for x in objects], marker, limit)
return objects[start:start + limit]
-
- allowed = self._list_object_permissions(user, account, container, prefix, shared, public)
+
+ allowed = self._list_object_permissions(
+ user, account, container, prefix, shared, public)
if shared and not allowed:
return []
path, node = self._lookup_container(account, container)
allowed = self._get_formatted_paths(allowed)
objects = self._list_object_properties(node, path, prefix, delimiter, marker, limit, virtual, domain, keys, until, size_range, allowed, all_props)
- start, limit = self._list_limits([x[0] for x in objects], marker, limit)
+ start, limit = self._list_limits(
+ [x[0] for x in objects], marker, limit)
return objects[start:start + limit]
-
+
def _list_public_object_properties(self, user, account, container, prefix, all_props):
- public = self._list_object_permissions(user, account, container, prefix, shared=False, public=True)
+ public = self._list_object_permissions(
+ user, account, container, prefix, shared=False, public=True)
paths, nodes = self._lookup_objects(public)
path = '/'.join((account, container))
cont_prefix = path + '/'
props = self.node.version_lookup_bulk(nodes, all_props=all_props)
objects = [(path,) + props for path, props in zip(paths, props)]
return objects
-
+
def _list_objects_no_limit(self, user, account, container, prefix, delimiter, virtual, domain, keys, shared, until, size_range, all_props, public):
objects = []
while True:
if not l or len(l) < limit:
break
return objects
-
+
def _list_object_permissions(self, user, account, container, prefix, shared, public):
allowed = []
path = '/'.join((account, container, prefix)).rstrip('/')
if shared:
allowed.update(self.permissions.access_list_shared(path))
if public:
- allowed.update([x[0] for x in self.permissions.public_list(path)])
+ allowed.update(
+ [x[0] for x in self.permissions.public_list(path)])
allowed = sorted(allowed)
if not allowed:
return []
return allowed
-
+
@backend_method
def list_objects(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None, public=False):
"""Return a list of object (name, version_id) tuples existing under a container."""
-
+
logger.debug("list_objects: %s %s %s %s %s %s %s %s %s %s %s %s %s %s", user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, public)
return self._list_objects(user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, False, public)
-
+
@backend_method
def list_object_meta(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], shared=False, until=None, size_range=None, public=False):
"""Return a list of object metadata dicts existing under a container."""
-
+
logger.debug("list_object_meta: %s %s %s %s %s %s %s %s %s %s %s %s %s %s", user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, public)
props = self._list_objects(user, account, container, prefix, delimiter, marker, limit, virtual, domain, keys, shared, until, size_range, True, public)
objects = []
'uuid': p[self.UUID + 1],
'checksum': p[self.CHECKSUM + 1]})
return objects
-
+
@backend_method
def list_object_permissions(self, user, account, container, prefix=''):
"""Return a list of paths that enforce permissions under a container."""
-
- logger.debug("list_object_permissions: %s %s %s %s", user, account, container, prefix)
+
+ logger.debug("list_object_permissions: %s %s %s %s", user,
+ account, container, prefix)
return self._list_object_permissions(user, account, container, prefix, True, False)
-
+
@backend_method
def list_object_public(self, user, account, container, prefix=''):
"""Return a dict mapping paths to public ids for objects that are public under a container."""
-
- logger.debug("list_object_public: %s %s %s %s", user, account, container, prefix)
+
+ logger.debug("list_object_public: %s %s %s %s", user,
+ account, container, prefix)
public = {}
for path, p in self.permissions.public_list('/'.join((account, container, prefix))):
public[path] = p + ULTIMATE_ANSWER
return public
-
+
@backend_method
def get_object_meta(self, user, account, container, name, domain, version=None, include_user_defined=True):
"""Return a dictionary with the object metadata for the domain."""
-
- logger.debug("get_object_meta: %s %s %s %s %s %s", user, account, container, name, domain, version)
+
+ logger.debug("get_object_meta: %s %s %s %s %s %s", user,
+ account, container, name, domain, version)
self._can_read(user, account, container, name)
path, node = self._lookup_object(account, container, name)
props = self._get_version(node, version)
modified = props[self.MTIME]
else:
try:
- modified = self._get_version(node)[self.MTIME] # Overall last modification.
- except NameError: # Object may be deleted.
- del_props = self.node.version_lookup(node, inf, CLUSTER_DELETED)
+ modified = self._get_version(
+ node)[self.MTIME] # Overall last modification.
+ except NameError: # Object may be deleted.
+ del_props = self.node.version_lookup(
+ node, inf, CLUSTER_DELETED)
if del_props is None:
raise ItemNotExists('Object does not exist')
modified = del_props[self.MTIME]
-
+
meta = {}
if include_user_defined:
- meta.update(dict(self.node.attribute_get(props[self.SERIAL], domain)))
+ meta.update(
+ dict(self.node.attribute_get(props[self.SERIAL], domain)))
meta.update({'name': name,
'bytes': props[self.SIZE],
'type': props[self.TYPE],
'uuid': props[self.UUID],
'checksum': props[self.CHECKSUM]})
return meta
-
+
@backend_method
def update_object_meta(self, user, account, container, name, domain, meta, replace=False):
"""Update the metadata associated with the object for the domain and return the new version."""
-
- logger.debug("update_object_meta: %s %s %s %s %s %s %s", user, account, container, name, domain, meta, replace)
+
+ logger.debug("update_object_meta: %s %s %s %s %s %s %s",
+ user, account, container, name, domain, meta, replace)
self._can_write(user, account, container, name)
path, node = self._lookup_object(account, container, name)
- src_version_id, dest_version_id = self._put_metadata(user, node, domain, meta, replace)
+ src_version_id, dest_version_id = self._put_metadata(
+ user, node, domain, meta, replace)
self._apply_versioning(account, container, src_version_id)
return dest_version_id
-
+
@backend_method
def get_object_permissions(self, user, account, container, name):
"""Return the action allowed on the object, the path
from which the object gets its permissions from,
along with a dictionary containing the permissions."""
-
- logger.debug("get_object_permissions: %s %s %s %s", user, account, container, name)
+
+ logger.debug("get_object_permissions: %s %s %s %s", user,
+ account, container, name)
allowed = 'write'
permissions_path = self._get_permissions_path(account, container, name)
if user != account:
raise NotAllowedError
self._lookup_object(account, container, name)
return (allowed, permissions_path, self.permissions.access_get(permissions_path))
-
+
@backend_method
def update_object_permissions(self, user, account, container, name, permissions):
"""Update the permissions associated with the object."""
-
- logger.debug("update_object_permissions: %s %s %s %s %s", user, account, container, name, permissions)
+
+ logger.debug("update_object_permissions: %s %s %s %s %s",
+ user, account, container, name, permissions)
if user != account:
raise NotAllowedError
path = self._lookup_object(account, container, name)[0]
self._check_permissions(path, permissions)
self.permissions.access_set(path, permissions)
- self._report_sharing_change(user, account, path, {'members':self.permissions.access_members(path)})
-
+ self._report_sharing_change(user, account, path, {'members':
+ self.permissions.access_members(path)})
+
@backend_method
def get_object_public(self, user, account, container, name):
"""Return the public id of the object if applicable."""
-
- logger.debug("get_object_public: %s %s %s %s", user, account, container, name)
+
+ logger.debug(
+ "get_object_public: %s %s %s %s", user, account, container, name)
self._can_read(user, account, container, name)
path = self._lookup_object(account, container, name)[0]
p = self.permissions.public_get(path)
if p is not None:
p += ULTIMATE_ANSWER
return p
-
+
@backend_method
def update_object_public(self, user, account, container, name, public):
"""Update the public status of the object."""
-
- logger.debug("update_object_public: %s %s %s %s %s", user, account, container, name, public)
+
+ logger.debug("update_object_public: %s %s %s %s %s", user,
+ account, container, name, public)
self._can_write(user, account, container, name)
path = self._lookup_object(account, container, name)[0]
if not public:
self.permissions.public_unset(path)
else:
self.permissions.public_set(path)
-
+
@backend_method
def get_object_hashmap(self, user, account, container, name, version=None):
"""Return the object's size and a list with partial hashes."""
-
- logger.debug("get_object_hashmap: %s %s %s %s %s", user, account, container, name, version)
+
+ logger.debug("get_object_hashmap: %s %s %s %s %s", user,
+ account, container, name, version)
self._can_read(user, account, container, name)
path, node = self._lookup_object(account, container, name)
props = self._get_version(node, version)
hashmap = self.store.map_get(binascii.unhexlify(props[self.HASH]))
return props[self.SIZE], [binascii.hexlify(x) for x in hashmap]
-
+
def _update_object_hash(self, user, account, container, name, size, type, hash, checksum, domain, meta, replace_meta, permissions, src_node=None, src_version_id=None, is_copy=False):
if permissions is not None and user != account:
raise NotAllowedError
if permissions is not None:
path = '/'.join((account, container, name))
self._check_permissions(path, permissions)
-
+
account_path, account_node = self._lookup_account(account, True)
- container_path, container_node = self._lookup_container(account, container)
- path, node = self._put_object_node(container_path, container_node, name)
+ container_path, container_node = self._lookup_container(
+ account, container)
+ path, node = self._put_object_node(
+ container_path, container_node, name)
pre_version_id, dest_version_id = self._put_version_duplicate(user, node, src_node=src_node, size=size, type=type, hash=hash, checksum=checksum, is_copy=is_copy)
-
+
# Handle meta.
if src_version_id is None:
src_version_id = pre_version_id
- self._put_metadata_duplicate(src_version_id, dest_version_id, domain, meta, replace_meta)
-
+ self._put_metadata_duplicate(
+ src_version_id, dest_version_id, domain, meta, replace_meta)
+
# Check quota.
del_size = self._apply_versioning(account, container, pre_version_id)
size_delta = size - del_size
(container_quota > 0 and self._get_statistics(container_node)[1] + size_delta > container_quota):
# This must be executed in a transaction, so the version is never created if it fails.
raise QuotaError
- self._report_size_change(user, account, size_delta, {'action': 'object update', 'path':path})
-
+ self._report_size_change(user, account, size_delta, {
+ 'action': 'object update', 'path': path})
+
if permissions is not None:
self.permissions.access_set(path, permissions)
- self._report_sharing_change(user, account, path, {'members':self.permissions.access_members(path)})
-
+ self._report_sharing_change(user, account, path, {'members': self.permissions.access_members(path)})
+
self._report_object_change(user, account, path, details={'version': dest_version_id, 'action': 'object update'})
return dest_version_id
-
+
@backend_method
def update_object_hashmap(self, user, account, container, name, size, type, hashmap, checksum, domain, meta={}, replace_meta=False, permissions=None):
"""Create/update an object with the specified size and partial hashes."""
-
- logger.debug("update_object_hashmap: %s %s %s %s %s %s %s %s", user, account, container, name, size, type, hashmap, checksum)
- if size == 0: # No such thing as an empty hashmap.
+
+ logger.debug("update_object_hashmap: %s %s %s %s %s %s %s %s", user,
+ account, container, name, size, type, hashmap, checksum)
+ if size == 0: # No such thing as an empty hashmap.
hashmap = [self.put_block('')]
map = HashMap(self.block_size, self.hash_algorithm)
map.extend([binascii.unhexlify(x) for x in hashmap])
ie = IndexError()
ie.data = [binascii.hexlify(x) for x in missing]
raise ie
-
+
hash = map.hash()
dest_version_id = self._update_object_hash(user, account, container, name, size, type, binascii.hexlify(hash), checksum, domain, meta, replace_meta, permissions)
self.store.map_put(hash, map)
return dest_version_id
-
+
@backend_method
def update_object_checksum(self, user, account, container, name, version, checksum):
"""Update an object's checksum."""
-
- logger.debug("update_object_checksum: %s %s %s %s %s %s", user, account, container, name, version, checksum)
+
+ logger.debug("update_object_checksum: %s %s %s %s %s %s",
+ user, account, container, name, version, checksum)
# Update objects with greater version and same hashmap and size (fix metadata updates).
self._can_write(user, account, container, name)
path, node = self._lookup_object(account, container, name)
versions = self.node.node_get_versions(node)
for x in versions:
if x[self.SERIAL] >= int(version) and x[self.HASH] == props[self.HASH] and x[self.SIZE] == props[self.SIZE]:
- self.node.version_put_property(x[self.SERIAL], 'checksum', checksum)
-
+ self.node.version_put_property(
+ x[self.SERIAL], 'checksum', checksum)
+
def _copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, dest_domain=None, dest_meta={}, replace_meta=False, permissions=None, src_version=None, is_move=False, delimiter=None):
dest_version_ids = []
self._can_read(user, src_account, src_container, src_name)
path, node = self._lookup_object(src_account, src_container, src_name)
# TODO: Will do another fetch of the properties in duplicate version...
- props = self._get_version(node, src_version) # Check to see if source exists.
+ props = self._get_version(
+ node, src_version) # Check to see if source exists.
src_version_id = props[self.SERIAL]
hash = props[self.HASH]
size = props[self.SIZE]
- is_copy = not is_move and (src_account, src_container, src_name) != (dest_account, dest_container, dest_name) # New uuid.
+ is_copy = not is_move and (src_account, src_container, src_name) != (
+ dest_account, dest_container, dest_name) # New uuid.
dest_version_ids.append(self._update_object_hash(user, dest_account, dest_container, dest_name, size, type, hash, None, dest_domain, dest_meta, replace_meta, permissions, src_node=node, src_version_id=src_version_id, is_copy=is_copy))
if is_move and (src_account, src_container, src_name) != (dest_account, dest_container, dest_name):
- self._delete_object(user, src_account, src_container, src_name)
-
+ self._delete_object(user, src_account, src_container, src_name)
+
if delimiter:
- prefix = src_name + delimiter if not src_name.endswith(delimiter) else src_name
+ prefix = src_name + \
+ delimiter if not src_name.endswith(delimiter) else src_name
src_names = self._list_objects_no_limit(user, src_account, src_container, prefix, delimiter=None, virtual=False, domain=None, keys=[], shared=False, until=None, size_range=None, all_props=True, public=False)
- src_names.sort(key=lambda x: x[2]) # order by nodes
+ src_names.sort(key=lambda x: x[2]) # order by nodes
paths = [elem[0] for elem in src_names]
nodes = [elem[2] for elem in src_names]
# TODO: Will do another fetch of the properties in duplicate version...
- props = self._get_versions(nodes) # Check to see if source exists.
-
+ props = self._get_versions(nodes) # Check to see if source exists.
+
for prop, path, node in zip(props, paths, nodes):
src_version_id = prop[self.SERIAL]
hash = prop[self.HASH]
vtype = prop[self.TYPE]
size = prop[self.SIZE]
- dest_prefix = dest_name + delimiter if not dest_name.endswith(delimiter) else dest_name
+ dest_prefix = dest_name + delimiter if not dest_name.endswith(
+ delimiter) else dest_name
vdest_name = path.replace(prefix, dest_prefix, 1)
dest_version_ids.append(self._update_object_hash(user, dest_account, dest_container, vdest_name, size, vtype, hash, None, dest_domain, meta={}, replace_meta=False, permissions=None, src_node=node, src_version_id=src_version_id, is_copy=is_copy))
if is_move and (src_account, src_container, src_name) != (dest_account, dest_container, dest_name):
- self._delete_object(user, src_account, src_container, path)
+ self._delete_object(user, src_account, src_container, path)
return dest_version_ids[0] if len(dest_version_ids) == 1 else dest_version_ids
-
+
@backend_method
def copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None, src_version=None, delimiter=None):
"""Copy an object's data and metadata."""
-
+
logger.debug("copy_object: %s %s %s %s %s %s %s %s %s %s %s %s %s %s", user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, src_version, delimiter)
dest_version_id = self._copy_object(user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, src_version, False, delimiter)
return dest_version_id
-
+
@backend_method
def move_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta={}, replace_meta=False, permissions=None, delimiter=None):
"""Move an object's data and metadata."""
-
+
logger.debug("move_object: %s %s %s %s %s %s %s %s %s %s %s %s %s", user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, delimiter)
if user != src_account:
raise NotAllowedError
dest_version_id = self._copy_object(user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta, replace_meta, permissions, None, True, delimiter)
return dest_version_id
-
+
def _delete_object(self, user, account, container, name, until=None, delimiter=None):
if user != account:
raise NotAllowedError
-
+
if until is not None:
path = '/'.join((account, container, name))
node = self.node.node_lookup(path)
props = self._get_version(node)
except NameError:
self.permissions.access_clear(path)
- self._report_size_change(user, account, -size, {'action': 'object purge', 'path':path})
+ self._report_size_change(user, account, -size, {
+ 'action': 'object purge', 'path': path})
return
-
+
path, node = self._lookup_object(account, container, name)
src_version_id, dest_version_id = self._put_version_duplicate(user, node, size=0, type='', hash=None, checksum='', cluster=CLUSTER_DELETED)
del_size = self._apply_versioning(account, container, src_version_id)
if del_size:
- self._report_size_change(user, account, -del_size, {'action': 'object delete', 'path':path})
- self._report_object_change(user, account, path, details={'action': 'object delete'})
+ self._report_size_change(user, account, -del_size, {
+ 'action': 'object delete', 'path': path})
+ self._report_object_change(
+ user, account, path, details={'action': 'object delete'})
self.permissions.access_clear(path)
-
+
if delimiter:
prefix = name + delimiter if not name.endswith(delimiter) else name
src_names = self._list_objects_no_limit(user, account, container, prefix, delimiter=None, virtual=False, domain=None, keys=[], shared=False, until=None, size_range=None, all_props=True, public=False)
paths = []
for t in src_names:
- path = '/'.join((account, container, t[0]))
- node = t[2]
+ path = '/'.join((account, container, t[0]))
+ node = t[2]
src_version_id, dest_version_id = self._put_version_duplicate(user, node, size=0, type='', hash=None, checksum='', cluster=CLUSTER_DELETED)
- del_size = self._apply_versioning(account, container, src_version_id)
+ del_size = self._apply_versioning(
+ account, container, src_version_id)
if del_size:
- self._report_size_change(user, account, -del_size, {'action': 'object delete', 'path':path})
- self._report_object_change(user, account, path, details={'action': 'object delete'})
+ self._report_size_change(user, account, -del_size, {'action': 'object delete', 'path': path})
+ self._report_object_change(
+ user, account, path, details={'action': 'object delete'})
paths.append(path)
self.permissions.access_clear_bulk(paths)
-
+
@backend_method
def delete_object(self, user, account, container, name, until=None, prefix='', delimiter=None):
"""Delete/purge an object."""
-
- logger.debug("delete_object: %s %s %s %s %s %s %s", user, account, container, name, until, prefix, delimiter)
+
+ logger.debug("delete_object: %s %s %s %s %s %s %s", user,
+ account, container, name, until, prefix, delimiter)
self._delete_object(user, account, container, name, until, delimiter)
-
+
@backend_method
def list_versions(self, user, account, container, name):
"""Return a list of all (version, version_timestamp) tuples for an object."""
-
- logger.debug("list_versions: %s %s %s %s", user, account, container, name)
+
+ logger.debug(
+ "list_versions: %s %s %s %s", user, account, container, name)
self._can_read(user, account, container, name)
path, node = self._lookup_object(account, container, name)
versions = self.node.node_get_versions(node)
return [[x[self.SERIAL], x[self.MTIME]] for x in versions if x[self.CLUSTER] != CLUSTER_DELETED]
-
+
@backend_method
def get_uuid(self, user, uuid):
"""Return the (account, container, name) for the UUID given."""
-
+
logger.debug("get_uuid: %s %s", user, uuid)
info = self.node.latest_uuid(uuid)
if info is None:
account, container, name = path.split('/', 2)
self._can_read(user, account, container, name)
return (account, container, name)
-
+
@backend_method
def get_public(self, user, public):
"""Return the (account, container, name) for the public id given."""
-
+
logger.debug("get_public: %s %s", user, public)
if public is None or public < ULTIMATE_ANSWER:
raise NameError
account, container, name = path.split('/', 2)
self._can_read(user, account, container, name)
return (account, container, name)
-
+
@backend_method(autocommit=0)
def get_block(self, hash):
"""Return a block's data."""
-
+
logger.debug("get_block: %s", hash)
block = self.store.block_get(binascii.unhexlify(hash))
if not block:
raise ItemNotExists('Block does not exist')
return block
-
+
@backend_method(autocommit=0)
def put_block(self, data):
"""Store a block and return the hash."""
-
+
logger.debug("put_block: %s", len(data))
return binascii.hexlify(self.store.block_put(data))
-
+
@backend_method(autocommit=0)
def update_block(self, hash, data, offset=0):
"""Update a known block and return the hash."""
-
+
logger.debug("update_block: %s %s %s", hash, len(data), offset)
if offset == 0 and len(data) == self.block_size:
return self.put_block(data)
h = self.store.block_update(binascii.unhexlify(hash), offset, data)
return binascii.hexlify(h)
-
+
# Path functions.
-
+
def _generate_uuid(self):
return str(uuidlib.uuid4())
-
+
def _put_object_node(self, path, parent, name):
path = '/'.join((path, name))
node = self.node.node_lookup(path)
if node is None:
node = self.node.node_create(parent, path)
return path, node
-
+
def _put_path(self, user, parent, path):
node = self.node.node_create(parent, path)
- self.node.version_create(node, None, 0, '', None, user, self._generate_uuid(), '', CLUSTER_NORMAL)
+ self.node.version_create(node, None, 0, '', None, user,
+ self._generate_uuid(), '', CLUSTER_NORMAL)
return node
-
+
def _lookup_account(self, account, create=True):
node = self.node.node_lookup(account)
if node is None and create:
- node = self._put_path(account, self.ROOTNODE, account) # User is account.
+ node = self._put_path(
+ account, self.ROOTNODE, account) # User is account.
return account, node
-
+
def _lookup_container(self, account, container):
path = '/'.join((account, container))
node = self.node.node_lookup(path)
if node is None:
raise ItemNotExists('Container does not exist')
return path, node
-
+
def _lookup_object(self, account, container, name):
path = '/'.join((account, container, name))
node = self.node.node_lookup(path)
if node is None:
raise ItemNotExists('Object does not exist')
return path, node
-
+
def _lookup_objects(self, paths):
nodes = self.node.node_lookup_bulk(paths)
return paths, nodes
-
+
def _get_properties(self, node, until=None):
"""Return properties until the timestamp given."""
-
+
before = until if until is not None else inf
props = self.node.version_lookup(node, before, CLUSTER_NORMAL)
if props is None and until is not None:
if props is None:
raise ItemNotExists('Path does not exist')
return props
-
+
def _get_statistics(self, node, until=None):
"""Return count, sum of size and latest timestamp of everything under node."""
-
+
if until is None:
stats = self.node.statistics_get(node, CLUSTER_NORMAL)
else:
if stats is None:
stats = (0, 0, 0)
return stats
-
+
def _get_version(self, node, version=None):
if version is None:
props = self.node.version_lookup(node, inf, CLUSTER_NORMAL)
def _get_versions(self, nodes):
return self.node.version_lookup_bulk(nodes, inf, CLUSTER_NORMAL)
-
+
def _put_version_duplicate(self, user, node, src_node=None, size=None, type=None, hash=None, checksum=None, cluster=CLUSTER_NORMAL, is_copy=False):
"""Create a new version of the node."""
-
- props = self.node.version_lookup(node if src_node is None else src_node, inf, CLUSTER_NORMAL)
+
+ props = self.node.version_lookup(
+ node if src_node is None else src_node, inf, CLUSTER_NORMAL)
if props is not None:
src_version_id = props[self.SERIAL]
src_hash = props[self.HASH]
src_size = 0
src_type = ''
src_checksum = ''
- if size is None: # Set metadata.
- hash = src_hash # This way hash can be set to None (account or container).
+ if size is None: # Set metadata.
+ hash = src_hash # This way hash can be set to None (account or container).
size = src_size
if type is None:
type = src_type
if checksum is None:
checksum = src_checksum
- uuid = self._generate_uuid() if (is_copy or src_version_id is None) else props[self.UUID]
-
+ uuid = self._generate_uuid(
+ ) if (is_copy or src_version_id is None) else props[self.UUID]
+
if src_node is None:
pre_version_id = src_version_id
else:
pre_version_id = props[self.SERIAL]
if pre_version_id is not None:
self.node.version_recluster(pre_version_id, CLUSTER_HISTORY)
-
+
dest_version_id, mtime = self.node.version_create(node, hash, size, type, src_version_id, user, uuid, checksum, cluster)
return pre_version_id, dest_version_id
-
+
def _put_metadata_duplicate(self, src_version_id, dest_version_id, domain, meta, replace=False):
if src_version_id is not None:
self.node.attribute_copy(src_version_id, dest_version_id)
if not replace:
- self.node.attribute_del(dest_version_id, domain, (k for k, v in meta.iteritems() if v == ''))
- self.node.attribute_set(dest_version_id, domain, ((k, v) for k, v in meta.iteritems() if v != ''))
+ self.node.attribute_del(dest_version_id, domain, (
+ k for k, v in meta.iteritems() if v == ''))
+ self.node.attribute_set(dest_version_id, domain, (
+ (k, v) for k, v in meta.iteritems() if v != ''))
else:
self.node.attribute_del(dest_version_id, domain)
- self.node.attribute_set(dest_version_id, domain, ((k, v) for k, v in meta.iteritems()))
-
+ self.node.attribute_set(dest_version_id, domain, ((
+ k, v) for k, v in meta.iteritems()))
+
def _put_metadata(self, user, node, domain, meta, replace=False):
"""Create a new version and store metadata."""
-
- src_version_id, dest_version_id = self._put_version_duplicate(user, node)
- self._put_metadata_duplicate(src_version_id, dest_version_id, domain, meta, replace)
+
+ src_version_id, dest_version_id = self._put_version_duplicate(
+ user, node)
+ self._put_metadata_duplicate(
+ src_version_id, dest_version_id, domain, meta, replace)
return src_version_id, dest_version_id
-
+
def _list_limits(self, listing, marker, limit):
start = 0
if marker:
if not limit or limit > 10000:
limit = 10000
return start, limit
-
+
def _list_object_properties(self, parent, path, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=[], until=None, size_range=None, allowed=[], all_props=False):
cont_prefix = path + '/'
prefix = cont_prefix + prefix
before = until if until is not None else inf
filterq = keys if domain else []
sizeq = size_range
-
+
objects, prefixes = self.node.latest_version_list(parent, prefix, delimiter, start, limit, before, CLUSTER_DELETED, allowed, domain, filterq, sizeq, all_props)
objects.extend([(p, None) for p in prefixes] if virtual else [])
objects.sort(key=lambda x: x[0])
objects = [(x[0][len(cont_prefix):],) + x[1:] for x in objects]
return objects
-
+
# Reporting functions.
-
+
def _report_size_change(self, user, account, size, details={}):
account_node = self._lookup_account(account, True)[1]
total = self._get_statistics(account_node)[1]
details.update({'user': user, 'total': total})
- logger.debug("_report_size_change: %s %s %s %s", user, account, size, details)
- self.messages.append((QUEUE_MESSAGE_KEY_PREFIX % ('resource.diskspace',), account, QUEUE_INSTANCE_ID, 'diskspace', float(size), details))
-
+ logger.debug(
+ "_report_size_change: %s %s %s %s", user, account, size, details)
+ self.messages.append((QUEUE_MESSAGE_KEY_PREFIX % ('resource.diskspace',),
+ account,
+ QUEUE_INSTANCE_ID,
+ 'diskspace',
+ float(size),
+ details))
+
def _report_object_change(self, user, account, path, details={}):
details.update({'user': user})
- logger.debug("_report_object_change: %s %s %s %s", user, account, path, details)
- self.messages.append((QUEUE_MESSAGE_KEY_PREFIX % ('object',), account, QUEUE_INSTANCE_ID, 'object', path, details))
-
+ logger.debug("_report_object_change: %s %s %s %s", user,
+ account, path, details)
+ self.messages.append((QUEUE_MESSAGE_KEY_PREFIX % (
+ 'object',), account, QUEUE_INSTANCE_ID, 'object', path, details))
+
def _report_sharing_change(self, user, account, path, details={}):
- logger.debug("_report_permissions_change: %s %s %s %s", user, account, path, details)
+ logger.debug("_report_permissions_change: %s %s %s %s",
+ user, account, path, details)
details.update({'user': user})
- self.messages.append((QUEUE_MESSAGE_KEY_PREFIX % ('sharing',), account, QUEUE_INSTANCE_ID, 'sharing', path, details))
-
+ self.messages.append((QUEUE_MESSAGE_KEY_PREFIX % ('sharing',), account,
+ QUEUE_INSTANCE_ID, 'sharing', path, details))
+
# Policy functions.
-
+
def _check_policy(self, policy):
for k in policy.keys():
if policy[k] == '':
policy[k] = self.default_policy.get(k)
for k, v in policy.iteritems():
if k == 'quota':
- q = int(v) # May raise ValueError.
+ q = int(v) # May raise ValueError.
if q < 0:
raise ValueError
elif k == 'versioning':
raise ValueError
else:
raise ValueError
-
+
def _put_policy(self, node, policy, replace):
if replace:
for k, v in self.default_policy.iteritems():
if k not in policy:
policy[k] = v
self.node.policy_set(node, policy)
-
+
def _get_policy(self, node):
policy = self.default_policy.copy()
policy.update(self.node.policy_get(node))
return policy
-
+
def _apply_versioning(self, account, container, version_id):
"""Delete the provided version if such is the policy.
Return size of object removed.
"""
-
+
if version_id is None:
return 0
path, node = self._lookup_container(account, container)
self.store.map_delete(hash)
return size
return 0
-
+
# Access control functions.
-
+
def _check_groups(self, groups):
# raise ValueError('Bad characters in groups')
pass
-
+
def _check_permissions(self, path, permissions):
# raise ValueError('Bad characters in permissions')
pass
-
+
def _get_formatted_paths(self, paths):
formatted = []
for p in paths:
formatted.append((p.rstrip('/') + '/', self.MATCH_PREFIX))
formatted.append((p, self.MATCH_EXACT))
return formatted
-
+
def _get_permissions_path(self, account, container, name):
path = '/'.join((account, container, name))
permission_paths = self.permissions.access_inherit(path)
if props[self.TYPE].split(';', 1)[0].strip() in ('application/directory', 'application/folder'):
return p
return None
-
+
def _can_read(self, user, account, container, name):
if user == account:
return True
raise NotAllowedError
if not self.permissions.access_check(path, self.READ, user) and not self.permissions.access_check(path, self.WRITE, user):
raise NotAllowedError
-
+
def _can_write(self, user, account, container, name):
if user == account:
return True
raise NotAllowedError
if not self.permissions.access_check(path, self.WRITE, user):
raise NotAllowedError
-
+
def _allowed_accounts(self, user):
allow = set()
for path in self.permissions.access_list_paths(user):
allow.add(path.split('/', 1)[0])
return sorted(allow)
-
+
def _allowed_containers(self, user, account):
allow = set()
for path in self.permissions.access_list_paths(user, account):
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
+
+
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
- show_ignored=False):
+ show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
- or fn.lower() == pattern.lower()):
+ or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
- and not prefix):
+ and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
- stack.append((fn, prefix + name + "/", package, only_in_packages))
+ stack.append(
+ (fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
- or fn.lower() == pattern.lower()):
+ or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
break
if bad_name:
continue
- out.setdefault(package, []).append(prefix+name)
+ out.setdefault(package, []).append(prefix + name)
return out
setup(
- name = 'snf-pithos-backend',
- version = VERSION,
- license = 'BSD',
- url = 'http://code.grnet.gr/',
- description = SHORT_DESCRIPTION,
- long_description=README + '\n\n' + CHANGES,
- classifiers = CLASSIFIERS,
-
- author = 'Package author',
- author_email = 'author@grnet.gr',
- maintainer = 'Package maintainer',
- maintainer_email = 'maintainer@grnet.gr',
-
- namespace_packages = ['pithos'],
- packages = PACKAGES,
- package_dir= {'': PACKAGES_ROOT},
+ name='snf-pithos-backend',
+ version=VERSION,
+ license='BSD',
+ url='http://code.grnet.gr/',
+ description=SHORT_DESCRIPTION,
+ long_description=README + '\n\n' + CHANGES,
+ classifiers=CLASSIFIERS,
+
+ author='Package author',
+ author_email='author@grnet.gr',
+ maintainer='Package maintainer',
+ maintainer_email='maintainer@grnet.gr',
+
+ namespace_packages=['pithos'],
+ packages=PACKAGES,
+ package_dir={'': PACKAGES_ROOT},
package_data=find_package_data("."),
- include_package_data = True,
- zip_safe = False,
+ include_package_data=True,
+ zip_safe=False,
- dependency_links = [
+ dependency_links=[
'http://docs.dev.grnet.gr/pypi/'],
- install_requires = INSTALL_REQUIRES,
- extras_require = EXTRAS_REQUIRES,
- tests_require = TESTS_REQUIRES,
+ install_requires=INSTALL_REQUIRES,
+ extras_require=EXTRAS_REQUIRES,
+ tests_require=TESTS_REQUIRES,
- entry_points = {
- 'console_scripts': [
- 'pithos-migrate = pithos.backends.migrate:main'
- ],
+ entry_points={
+ 'console_scripts': [
+ 'pithos-migrate = pithos.backends.migrate:main'
+ ],
},
)
-
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
- pkg_resources.require("distribute>="+version)
+ pkg_resources.require("distribute>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
- "The required version of distribute (>=%s) is not available,\n"
- "and can't be installed while this script is running. Please\n"
- "install a more recent version first, using\n"
- "'easy_install -U distribute'."
- "\n\n(Currently using %r)\n" % (version, e.args[0]))
+ "The required version of distribute (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U distribute'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
+
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
dst.close()
return os.path.realpath(saveto)
+
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
return __no_sandbox
+
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
_patch_file = _no_sandbox(_patch_file)
+
def _same_content(path, content):
return open(path).read() == content
+
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
+
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
+
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
- (SETUPTOOLS_FAKED_VERSION, pyver)
+ (SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
finally:
f.close()
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+_create_fake_setuptools_pkg_info = _no_sandbox(
+ _create_fake_setuptools_pkg_info)
+
def _patch_egg_dir(path):
# let's check if it's already patched
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
- args = sys.argv[sys.argv.index('install')+1:]
+ args = sys.argv[sys.argv.index('install') + 1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
- top_dir = args[index+1]
+ top_dir = args[index + 1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
replacement=False))
except TypeError:
# old distribute API
- setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+ setuptools_dist = ws.find(
+ pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
- _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+ _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 448 # decimal for oct 0700
+ tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
import logging
from synnefo.lib.queue import (exchange_connect, exchange_close,
- exchange_send, exchange_route, queue_callback, queue_start)
+ exchange_send, exchange_route, queue_callback, queue_start)
from optparse import OptionParser
try:
from synnefo import settings
except ImportError:
- raise Exception("Cannot import settings")
+ raise Exception("Cannot import settings")
setup_environ(settings)
BROKER_HOST = 'localhost'
CONSUMER_EXCHANGE = 'sample'
CONSUMER_KEY = '#'
+
def main():
parser = OptionParser()
parser.add_option('-v', '--verbose', action='store_true', default=False,
parser.add_option('--test', action='store_true', default=False,
dest='test', help='Produce a dummy message for testing')
opts, args = parser.parse_args()
-
+
DEBUG = False
if opts.verbose:
DEBUG = True
- logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S',
- level=logging.DEBUG if DEBUG else logging.INFO)
+ logging.basicConfig(
+ format='%(asctime)s [%(levelname)s] %(name)s %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S',
+ level=logging.DEBUG if DEBUG else logging.INFO)
logger = logging.getLogger('dispatcher')
-
- exchange = 'rabbitmq://%s:%s@%s:%s/%s' % (opts.user, opts.password, opts.host, opts.port, opts.exchange)
+
+ exchange = 'rabbitmq://%s:%s@%s:%s/%s' % (
+ opts.user, opts.password, opts.host, opts.port, opts.exchange)
connection = exchange_connect(exchange)
if opts.test:
exchange_send(connection, opts.key, {"test": "0123456789"})
exchange_close(connection)
sys.exit()
-
+
callback = None
if opts.callback:
cb = opts.callback.rsplit('.', 1)
__import__(cb[0])
cb_module = sys.modules[cb[0]]
callback = getattr(cb_module, cb[1])
-
+
def handle_message(msg):
+ print msg
logger.debug('%s', msg)
if callback:
callback(msg)
-
+
exchange_route(connection, opts.key, opts.queue)
queue_callback(connection, opts.queue, handle_message)
try:
#!/usr/bin/env python
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from cStringIO import StringIO
from errno import (EACCES, EBADF, EINVAL, EISDIR, EIO, ENOENT, ENOTDIR,
- ENOTEMPTY)
+ ENOTEMPTY)
from getpass import getuser
from stat import S_IFDIR, S_IFREG
from sys import argv
def __init__(self, verbose=False):
self.verbose = verbose
self.client = OOS_Client(get_url(), get_auth(), get_user())
-
+
def __call__(self, op, path, *args):
container, sep, object = path[1:].partition('/')
if self.verbose:
data = repr(args)[:100]
print '-> %s %r %r %r' % (op, container, object, data)
ret = '[Unhandled Exception]'
-
+
try:
if object:
func = getattr(self, 'object_' + op, None)
# Fallback to defaults
func = getattr(self, op)
funcargs = (path,) + args
-
+
ret = func(*funcargs)
return ret
except FuseOSError, e:
finally:
if self.verbose:
print '<-', op, repr(ret)
-
-
+
def _get_container_meta(self, container, **kwargs):
try:
return self.client.retrieve_container_metadata(container, **kwargs)
except Fault:
raise FuseOSError(ENOENT)
-
+
def _get_object_meta(self, container, object, **kwargs):
try:
return self.client.retrieve_object_metadata(container, object,
**kwargs)
except Fault:
raise FuseOSError(ENOENT)
-
-
+
# Global
-
def statfs(self, path):
- return dict(f_bsize=1024, f_blocks=1024**2, f_bfree=1024**2,
- f_bavail=1024**2)
-
-
+ return dict(f_bsize=1024, f_blocks=1024 ** 2, f_bfree=1024 ** 2,
+ f_bavail=1024 ** 2)
+
# Account Level
-
def account_chmod(self, mode):
self.client.update_account_metadata(mode=str(mode))
-
+
def account_chown(self, uid, gid):
self.client.update_account_metadata(uid=uid, gid=gid)
-
+
def account_getattr(self, fh=None):
meta = self.client.retrieve_account_metadata()
mode = int(meta.get('x-account-meta-mode', 0755))
count = int(meta['x-account-container-count'])
uid = int(meta.get('x-account-meta-uid', 0))
gid = int(meta.get('x-account-meta-gid', 0))
-
+
return {
'st_mode': S_IFDIR | mode,
'st_nlink': 2 + count,
'st_ctime': epoch,
'st_mtime': modified,
'st_atime': modified}
-
+
def account_getxattr(self, name, position=0):
meta = self.client.retrieve_account_metadata(restricted=True)
return meta.get('xattr-' + name, '')
-
+
def account_listxattr(self):
meta = self.client.retrieve_account_metadata(restricted=True)
prefix = 'xattr-'
return [k[len(prefix):] for k in meta if k.startswith(prefix)]
-
+
def account_readdir(self, fh):
return ['.', '..'] + self.client.list_containers() or []
-
+
def account_removexattr(self, name):
attr = 'xattr-' + name
self.client.delete_account_metadata([attr])
-
+
def account_setxattr(self, name, value, options, position=0):
attr = 'xattr-' + name
meta = {attr: value}
self.client.update_account_metadata(**meta)
-
-
+
# Container Level
-
def container_chmod(self, container, mode):
self.client.update_container_metadata(container, mode=str(mode))
-
+
def container_chown(self, container, uid, gid):
self.client.update_container_metadata(container, uid=uid, gid=gid)
-
+
def container_getattr(self, container, fh=None):
meta = self._get_container_meta(container)
mode = int(meta.get('x-container-meta-mode', 0755))
count = int(meta['x-container-object-count'])
uid = int(meta.get('x-account-meta-uid', 0))
gid = int(meta.get('x-account-meta-gid', 0))
-
+
return {
'st_mode': S_IFDIR | mode,
'st_nlink': 2 + count,
'st_ctime': epoch,
'st_mtime': modified,
'st_atime': modified}
-
+
def container_getxattr(self, container, name, position=0):
meta = self._get_container_meta(container)
return meta.get('xattr-' + name, '')
-
+
def container_listxattr(self, container):
meta = self._get_container_meta(container, restricted=True)
prefix = 'xattr-'
return [k[len(prefix):] for k in meta if k.startswith(prefix)]
-
+
def container_mkdir(self, container, mode):
mode = str(mode & 0777)
self.client.create_container(container, mode=mode)
-
+
def container_readdir(self, container, fh):
objects = self.client.list_objects(container, delimiter='/', prefix='')
files = [o for o in objects if not o.endswith('/')]
return ['.', '..'] + files
-
+
def container_removexattr(self, container, name):
attr = 'xattr-' + name
self.client.delete_container_metadata(container, [attr])
-
+
def container_rename(self, container, path):
new_container, sep, new_object = path[1:].partition('/')
if not new_container or new_object:
raise FuseOSError(EINVAL)
self.client.delete_container(container)
self.client.create_container(new_container)
-
+
def container_rmdir(self, container):
try:
self.client.delete_container(container)
except Fault:
raise FuseOSError(ENOENT)
-
+
def container_setxattr(self, container, name, value, options, position=0):
attr = 'xattr-' + name
meta = {attr: value}
self.client.update_container_metadata(container, **meta)
-
-
+
# Object Level
-
def object_chmod(self, container, object, mode):
self.client.update_object_metadata(container, object, mode=str(mode))
-
+
def object_chown(self, container, uid, gid):
self.client.update_object_metadata(container, object,
- uid=str(uid), gid=str(gid))
-
+ uid=str(uid), gid=str(gid))
+
def object_create(self, container, object, mode, fi=None):
mode &= 0777
self.client.create_object(container, object,
- f=None,
- content_type='application/octet-stream',
- mode=str(mode))
+ f=None,
+ content_type='application/octet-stream',
+ mode=str(mode))
return 0
-
+
def object_getattr(self, container, object, fh=None):
meta = self._get_object_meta(container, object)
modified = parse_http_date(meta['last-modified'])
uid = int(meta.get('x-account-meta-uid', 0))
gid = int(meta.get('x-account-meta-gid', 0))
size = int(meta.get('content-length', 0))
-
+
if meta['content-type'].split(';', 1)[0].strip() == 'application/directory':
mode = int(meta.get('x-object-meta-mode', 0755))
flags = S_IFDIR
mode = int(meta.get('x-object-meta-mode', 0644))
flags = S_IFREG
nlink = 1
-
+
return {
'st_mode': flags | mode,
'st_nlink': nlink,
'st_mtime': modified,
'st_atime': modified,
'st_size': size}
-
+
def object_getxattr(self, container, object, name, position=0):
meta = self._get_object_meta(container, object, restricted=True)
return meta.get('xattr-' + name, '')
-
+
def object_listxattr(self, container, object):
meta = self._get_object_meta(container, object, restricted=True)
prefix = 'xattr-'
return [k[len(prefix):] for k in meta if k.startswith(prefix)]
-
+
def object_mkdir(self, container, object, mode):
mode = str(mode & 0777)
self.client.create_directory_marker(container, object)
self.client.update_object_metadata(container, object, mode=mode)
-
+
def object_read(self, container, object, nbyte, offset, fh):
data = self.client.retrieve_object(container, object)
return data[offset:offset + nbyte]
-
+
def object_readdir(self, container, object, fh):
objects = self.client.list_objects(container, delimiter='/',
- prefix=object)
+ prefix=object)
files = [o.rpartition('/')[2] for o in objects if not o.endswith('/')]
return ['.', '..'] + files
-
+
def object_removexattr(self, container, object, name):
attr = 'xattr-' + name
self.client.delete_object_metadata(container, object, [attr])
-
+
def object_rename(self, container, object, path):
new_container, sep, new_object = path[1:].partition('/')
if not new_container or not new_object:
raise FuseOSError(EINVAL)
self.client.move_object(container, object, new_container, new_object)
-
+
def object_rmdir(self, container, object):
self.client.delete_object(container, object)
-
+
def object_setxattr(self, container, object, name, value, options,
position=0):
attr = 'xattr-' + name
meta = {attr: value}
self.client.update_object_metadata(container, object, **meta)
-
+
def object_truncate(self, container, object, length, fh=None):
data = self.client.retrieve_object(container, object)
f = StringIO(data[:length])
self.client.update_object(container, object, f)
-
+
def object_unlink(self, container, object):
self.client.delete_object(container, object)
-
+
def object_write(self, container, object, data, offset, fh):
f = StringIO(data)
self.client.update_object(container, object, f, offset=offset)
if len(argv) != 2:
print 'usage: %s <mountpoint>' % argv[0]
exit(1)
-
+
user = getuser()
fs = StoreFS(verbose=True)
fuse = FUSE(fs, argv[1], foreground=True)
if __name__ == '__main__':
main()
-
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
import urllib
import datetime
-ERROR_CODES = {304:'Not Modified',
- 400:'Bad Request',
- 401:'Unauthorized',
- 403:'Forbidden',
- 404:'Not Found',
- 409:'Conflict',
- 411:'Length Required',
- 412:'Precondition Failed',
- 413:'Request Entity Too Large',
- 416:'Range Not Satisfiable',
- 422:'Unprocessable Entity',
- 500:'Internal Server Error',
- 501:'Not Implemented'}
+ERROR_CODES = {304: 'Not Modified',
+ 400: 'Bad Request',
+ 401: 'Unauthorized',
+ 403: 'Forbidden',
+ 404: 'Not Found',
+ 409: 'Conflict',
+ 411: 'Length Required',
+ 412: 'Precondition Failed',
+ 413: 'Request Entity Too Large',
+ 416: 'Range Not Satisfiable',
+ 422: 'Unprocessable Entity',
+ 500: 'Internal Server Error',
+ 501: 'Not Implemented'}
+
class Fault(Exception):
def __init__(self, data='', status=None):
self.data = data
self.status = status
+
class Client(object):
def __init__(self, url, token, account, verbose=False, debug=False):
"""`url` can also include a port, e.g '127.0.0.1:8000'."""
-
+
self.url = url
self.account = account
self.verbose = verbose or debug
self.debug = debug
self.token = token
-
+
def _req(self, method, path, body=None, headers={}, format='text', params={}):
p = urlparse(self.url)
if p.scheme == 'http':
conn = HTTPSConnection(p.netloc)
else:
raise Exception('Unknown URL scheme')
-
+
full_path = _prepare_path(p.path + path, format, params)
-
+
kwargs = {}
kwargs['headers'] = _prepare_headers(headers)
kwargs['headers']['X-Auth-Token'] = self.token
if body:
kwargs['body'] = body
- kwargs['headers'].setdefault('content-type', 'application/octet-stream')
- kwargs['headers'].setdefault('content-length', len(body) if body else 0)
-
+ kwargs['headers'].setdefault(
+ 'content-type', 'application/octet-stream')
+ kwargs['headers'].setdefault('content-length', len(body)
+ if body else 0)
+
#print '#', method, full_path, kwargs
#t1 = datetime.datetime.utcnow()
conn.request(method, full_path, **kwargs)
-
+
resp = conn.getresponse()
#t2 = datetime.datetime.utcnow()
#print 'response time:', str(t2-t1)
return _handle_response(resp, self.verbose, self.debug)
-
+
def _chunked_transfer(self, path, method='PUT', f=stdin, headers=None,
blocksize=1024, params={}):
"""perfomrs a chunked request"""
conn = HTTPSConnection(p.netloc)
else:
raise Exception('Unknown URL scheme')
-
+
full_path = _prepare_path(p.path + path, params=params)
-
+
headers.setdefault('content-type', 'application/octet-stream')
-
+
conn.putrequest(method, full_path)
conn.putheader('x-auth-token', self.token)
conn.putheader('transfer-encoding', 'chunked')
- for k,v in _prepare_headers(headers).items():
+ for k, v in _prepare_headers(headers).items():
conn.putheader(k, v)
conn.endheaders()
-
+
# write body
data = ''
while True:
except:
#retry
conn.send(data)
-
+
resp = conn.getresponse()
return _handle_response(resp, self.verbose, self.debug)
-
+
def delete(self, path, format='text', params={}):
return self._req('DELETE', path, format=format, params=params)
-
+
def get(self, path, format='text', headers={}, params={}):
return self._req('GET', path, headers=headers, format=format,
- params=params)
-
+ params=params)
+
def head(self, path, format='text', params={}):
- return self._req('HEAD', path, format=format, params=params)
-
+ return self._req('HEAD', path, format=format, params=params)
+
def post(self, path, body=None, format='text', headers=None, params={}):
return self._req('POST', path, body, headers=headers, format=format,
- params=params)
-
+ params=params)
+
def put(self, path, body=None, format='text', headers=None, params={}):
return self._req('PUT', path, body, headers=headers, format=format,
params=params)
-
+
def _list(self, path, format='text', params={}, **headers):
status, headers, data = self.get(path, format=format, headers=headers,
params=params)
else:
data = data.split('\n')[:-1] if data else ''
return data
-
+
def _get_metadata(self, path, prefix=None, params={}):
status, headers, data = self.head(path, params=params)
prefixlen = len(prefix) if prefix else 0
key = key[prefixlen:]
meta[key] = val
return meta
-
+
def _filter(self, l, d):
"""
filter out from l elements having the metadata values provided
"""
ll = l
for elem in l:
- if type(elem) == types.DictionaryType:
+ if isinstance(elem, types.DictionaryType):
for key in d.keys():
k = 'x_object_meta_%s' % key
if k in elem.keys() and elem[k] == d[key]:
ll.remove(elem)
break
return ll
-
+
+
class OOS_Client(Client):
"""Openstack Object Storage Client"""
-
+
def _update_metadata(self, path, entity, **meta):
"""adds new and updates the values of previously set metadata"""
ex_meta = self.retrieve_account_metadata(restricted=True)
ex_meta.update(meta)
headers = {}
prefix = 'x-%s-meta-' % entity
- for k,v in ex_meta.items():
+ for k, v in ex_meta.items():
k = '%s%s' % (prefix, k)
headers[k] = v
return self.post(path, headers=headers)
-
+
def _reset_metadata(self, path, entity, **meta):
"""
overwrites all user defined metadata
"""
headers = {}
prefix = 'x-%s-meta-' % entity
- for k,v in meta.items():
+ for k, v in meta.items():
k = '%s%s' % (prefix, k)
headers[k] = v
return self.post(path, headers=headers)
-
+
def _delete_metadata(self, path, entity, meta=[]):
"""delete previously set metadata"""
ex_meta = self.retrieve_account_metadata(restricted=True)
if k in meta:
headers['%s%s' % (prefix, k)] = ex_meta[k]
return self.post(path, headers=headers)
-
+
# Storage Account Services
-
+
def list_containers(self, format='text', limit=None,
marker=None, params={}, account=None, **headers):
"""lists containers"""
account = account or self.account
path = '/%s' % account
- params.update({'limit':limit, 'marker':marker})
+ params.update({'limit': limit, 'marker': marker})
return self._list(path, format, params, **headers)
-
+
def retrieve_account_metadata(self, restricted=False, account=None, **params):
"""returns the account metadata"""
account = account or self.account
path = '/%s' % account
prefix = 'x-account-meta-' if restricted else None
return self._get_metadata(path, prefix, params)
-
+
def update_account_metadata(self, account=None, **meta):
"""updates the account metadata"""
account = account or self.account
path = '/%s' % account
return self._update_metadata(path, 'account', **meta)
-
+
def delete_account_metadata(self, meta=[], account=None):
"""deletes the account metadata"""
account = account or self.account
path = '/%s' % account
return self._delete_metadata(path, 'account', meta)
-
+
def reset_account_metadata(self, account=None, **meta):
"""resets account metadata"""
account = account or self.account
path = '/%s' % account
return self._reset_metadata(path, 'account', **meta)
-
+
# Storage Container Services
-
+
def _filter_trashed(self, l):
- return self._filter(l, {'trash':'true'})
-
+ return self._filter(l, {'trash': 'true'})
+
def list_objects(self, container, format='text',
limit=None, marker=None, prefix=None, delimiter=None,
path=None, include_trashed=False, params={}, account=None,
**headers):
"""returns a list with the container objects"""
account = account or self.account
- params.update({'limit':limit, 'marker':marker, 'prefix':prefix,
- 'delimiter':delimiter, 'path':path})
+ params.update({'limit': limit, 'marker': marker, 'prefix': prefix,
+ 'delimiter': delimiter, 'path': path})
l = self._list('/%s/%s' % (account, container), format, params,
**headers)
#TODO support filter trashed with xml also
if format != 'xml' and not include_trashed:
l = self._filter_trashed(l)
return l
-
+
def create_container(self, container, account=None, meta={}, **headers):
"""creates a container"""
account = account or self.account
if not headers:
headers = {}
- for k,v in meta.items():
- headers['x-container-meta-%s' %k.strip().upper()] = v.strip()
+ for k, v in meta.items():
+ headers['x-container-meta-%s' % k.strip().upper()] = v.strip()
status, header, data = self.put('/%s/%s' % (account, container),
headers=headers)
if status == 202:
elif status != 201:
raise Fault(data, int(status))
return True
-
+
def delete_container(self, container, params={}, account=None):
"""deletes a container"""
account = account or self.account
return self.delete('/%s/%s' % (account, container), params=params)
-
+
def retrieve_container_metadata(self, container, restricted=False,
account=None, **params):
"""returns the container metadata"""
prefix = 'x-container-meta-' if restricted else None
return self._get_metadata('/%s/%s' % (account, container), prefix,
params)
-
+
def update_container_metadata(self, container, account=None, **meta):
"""unpdates the container metadata"""
account = account or self.account
return self._update_metadata('/%s/%s' % (account, container),
'container', **meta)
-
+
def delete_container_metadata(self, container, meta=[], account=None):
"""deletes the container metadata"""
account = account or self.account
path = '/%s/%s' % (account, container)
return self._delete_metadata(path, 'container', meta)
-
+
# Storage Object Services
-
+
def request_object(self, container, object, format='text', params={},
account=None, **headers):
"""returns tuple containing the status, headers and data response for an object request"""
path = '/%s/%s/%s' % (account, container, object)
status, headers, data = self.get(path, format, headers, params)
return status, headers, data
-
+
def retrieve_object(self, container, object, format='text', params={},
account=None, **headers):
"""returns an object's data"""
elif format == 'xml':
data = minidom.parseString(data)
return data
-
- def retrieve_object_hashmap(self, container, object, format='json', params={},
- account=None, **headers):
+
+ def retrieve_object_hashmap(
+ self, container, object, format='json', params={},
+ account=None, **headers):
"""returns the hashmap representing object's data"""
if not params:
params = {}
- params.update({'hashmap':None})
+ params.update({'hashmap': None})
return self.retrieve_object(container, object, params, format, account, **headers)
-
+
def create_directory_marker(self, container, object, account=None):
"""creates a dierectory marker"""
account = account or self.account
if not object:
raise Fault('Directory markers have to be nested in a container')
- h = {'content_type':'application/directory'}
- return self.create_zero_length_object(container, object, account=account,
- **h)
-
+ h = {'content_type': 'application/directory'}
+ return self.create_zero_length_object(
+ container, object, account=account,
+ **h)
+
def create_object(self, container, object, f=stdin, format='text', meta={},
params={}, etag=None, content_type=None, content_encoding=None,
content_disposition=None, account=None, **headers):
"""creates a zero-length object"""
account = account or self.account
path = '/%s/%s/%s' % (account, container, object)
- for k, v in headers.items():
- if v == None:
+ for k, v in headers.items():
+ if v is None:
headers.pop(k)
-
+
l = ['etag', 'content_encoding', 'content_disposition', 'content_type']
l = [elem for elem in l if eval(elem)]
for elem in l:
- headers.update({elem:eval(elem)})
+ headers.update({elem: eval(elem)})
headers.setdefault('content-type', 'application/octet-stream')
-
- for k,v in meta.items():
- headers['x-object-meta-%s' %k.strip()] = v.strip()
+
+ for k, v in meta.items():
+ headers['x-object-meta-%s' % k.strip()] = v.strip()
data = f.read() if f else None
return self.put(path, data, format, headers=headers, params=params)
-
+
def create_zero_length_object(self, container, object, meta={}, etag=None,
content_type=None, content_encoding=None,
content_disposition=None, account=None,
args.pop(elem)
args.update(headers)
return self.create_object(container, account=account, f=None, **args)
-
+
def update_object(self, container, object, f=stdin,
offset=None, meta={}, params={}, content_length=None,
content_type=None, content_encoding=None,
- content_disposition=None, account=None, **headers):
+ content_disposition=None, account=None, **headers):
account = account or self.account
path = '/%s/%s/%s' % (account, container, object)
- for k, v in headers.items():
- if v == None:
+ for k, v in headers.items():
+ if v is None:
headers.pop(k)
-
+
l = ['content_encoding', 'content_disposition', 'content_type',
'content_length']
l = [elem for elem in l if eval(elem)]
for elem in l:
- headers.update({elem:eval(elem)})
-
+ headers.update({elem: eval(elem)})
+
if 'content_range' not in headers.keys():
- if offset != None:
+ if offset is not None:
headers['content_range'] = 'bytes %s-/*' % offset
else:
headers['content_range'] = 'bytes */*'
-
- for k,v in meta.items():
- headers['x-object-meta-%s' %k.strip()] = v.strip()
+
+ for k, v in meta.items():
+ headers['x-object-meta-%s' % k.strip()] = v.strip()
data = f.read() if f else None
return self.post(path, data, headers=headers, params=params)
-
+
def update_object_using_chunks(self, container, object, f=stdin,
blocksize=1024, offset=None, meta={},
params={}, content_type=None, content_encoding=None,
l = ['content_type', 'content_encoding', 'content_disposition']
l = [elem for elem in l if eval(elem)]
for elem in l:
- headers.update({elem:eval(elem)})
-
- if offset != None:
+ headers.update({elem: eval(elem)})
+
+ if offset is not None:
headers['content_range'] = 'bytes %s-/*' % offset
else:
headers['content_range'] = 'bytes */*'
-
- for k,v in meta.items():
+
+ for k, v in meta.items():
v = v.strip()
- headers['x-object-meta-%s' %k.strip()] = v
+ headers['x-object-meta-%s' % k.strip()] = v
return self._chunked_transfer(path, 'POST', f, headers=headers,
blocksize=blocksize, params=params)
-
+
def _change_obj_location(self, src_container, src_object, dst_container,
dst_object, remove=False, meta={}, account=None,
content_type=None, delimiter=None, **headers):
else:
params['ignore_content_type'] = ''
if delimiter:
- params['delimiter'] = delimiter
+ params['delimiter'] = delimiter
return self.put(path, headers=headers, params=params)
-
+
def copy_object(self, src_container, src_object, dst_container, dst_object,
- meta={}, account=None, content_type=None, delimiter=None, **headers):
+ meta={}, account=None, content_type=None, delimiter=None, **headers):
"""copies an object"""
account = account or self.account
return self._change_obj_location(src_container, src_object,
- dst_container, dst_object, account=account,
- remove=False, meta=meta,
- content_type=content_type, delimiter=delimiter, **headers)
-
+ dst_container, dst_object, account=account,
+ remove=False, meta=meta,
+ content_type=content_type, delimiter=delimiter, **headers)
+
def move_object(self, src_container, src_object, dst_container,
- dst_object, meta={}, account=None,
- content_type=None, **headers):
+ dst_object, meta={}, account=None,
+ content_type=None, **headers):
"""moves an object"""
account = account or self.account
return self._change_obj_location(src_container, src_object,
account=account, remove=True,
meta=meta, content_type=content_type,
**headers)
-
+
def delete_object(self, container, object, params={}, account=None):
"""deletes an object"""
account = account or self.account
return self.delete('/%s/%s/%s' % (account, container, object),
params=params)
-
+
def retrieve_object_metadata(self, container, object, restricted=False,
version=None, account=None):
"""
account = account or self.account
path = '/%s/%s/%s' % (account, container, object)
prefix = 'x-object-meta-' if restricted else None
- params = {'version':version} if version else {}
+ params = {'version': version} if version else {}
return self._get_metadata(path, prefix, params=params)
-
+
def update_object_metadata(self, container, object, account=None,
**meta):
"""
account = account or self.account
path = '/%s/%s/%s' % (account, container, object)
return self._update_metadata(path, 'object', **meta)
-
+
def delete_object_metadata(self, container, object, meta=[], account=None):
"""
deletes object's metadata
account = account or self.account
path = '/%s/%s' % (account, container, object)
return self._delete_metadata(path, 'object', meta)
-
+
+
class Pithos_Client(OOS_Client):
"""Pithos Storage Client. Extends OOS_Client"""
-
+
def _update_metadata(self, path, entity, **meta):
"""
adds new and updates the values of previously set metadata
"""
- params = {'update':None}
+ params = {'update': None}
headers = {}
prefix = 'x-%s-meta-' % entity
- for k,v in meta.items():
+ for k, v in meta.items():
k = '%s%s' % (prefix, k)
headers[k] = v
return self.post(path, headers=headers, params=params)
-
+
def _delete_metadata(self, path, entity, meta=[]):
"""
delete previously set metadata
"""
- params = {'update':None}
+ params = {'update': None}
headers = {}
prefix = 'x-%s-meta-' % entity
for m in meta:
headers['%s%s' % (prefix, m)] = ''
return self.post(path, headers=headers, params=params)
-
+
# Storage Account Services
-
+
def list_containers(self, format='text', if_modified_since=None,
if_unmodified_since=None, limit=None, marker=None,
shared=False, until=None, account=None, public=False):
"""returns a list with the account containers"""
account = account or self.account
- params = {'until':until} if until else {}
+ params = {'until': until} if until else {}
if shared:
params['shared'] = None
if public:
params['public'] = None
- headers = {'if-modified-since':if_modified_since,
- 'if-unmodified-since':if_unmodified_since}
+ headers = {'if-modified-since': if_modified_since,
+ 'if-unmodified-since': if_unmodified_since}
return OOS_Client.list_containers(self, account=account, format=format,
limit=limit, marker=marker,
params=params, **headers)
-
+
def retrieve_account_metadata(self, restricted=False, until=None,
account=None):
"""returns the account metadata"""
account = account or self.account
- params = {'until':until} if until else {}
+ params = {'until': until} if until else {}
return OOS_Client.retrieve_account_metadata(self, account=account,
restricted=restricted,
**params)
-
+
def set_account_groups(self, account=None, **groups):
"""create account groups"""
account = account or self.account
headers = {}
for k, v in groups.items():
headers['x-account-group-%s' % k] = v
- params = {'update':None}
+ params = {'update': None}
return self.post(path, headers=headers, params=params)
-
+
def retrieve_account_groups(self, account=None):
"""returns the account groups"""
account = account or self.account
key = key[prefixlen:]
groups[key] = val
return groups
-
+
def unset_account_groups(self, groups=[], account=None):
"""delete account groups"""
account = account or self.account
headers = {}
for elem in groups:
headers['x-account-group-%s' % elem] = ''
- params = {'update':None}
+ params = {'update': None}
return self.post(path, headers=headers, params=params)
-
+
def reset_account_groups(self, account=None, **groups):
"""overrides account groups"""
account = account or self.account
headers['x-account-group-%s' % k] = v
meta = self.retrieve_account_metadata(restricted=True)
prefix = 'x-account-meta-'
- for k,v in meta.items():
+ for k, v in meta.items():
k = '%s%s' % (prefix, k)
headers[k] = v
return self.post(path, headers=headers)
-
+
# Storage Container Services
def create_container(self, container, account=None, meta={}, policies={}):
"""creates a container"""
for k, v in policies.items():
args['X-Container-Policy-%s' % k.capitalize()] = v
return OOS_Client.create_container(self, container, account, meta, **args)
-
+
def list_objects(self, container, format='text',
limit=None, marker=None, prefix=None, delimiter=None,
path=None, shared=False, include_trashed=False, params={},
until=None, account=None, public=False):
"""returns a list with the container objects"""
account = account or self.account
- params = {'until':until, 'meta':meta}
+ params = {'until': until, 'meta': meta}
if shared:
params['shared'] = None
if public:
for elem in ['self', 'container', 'params', 'until', 'meta']:
args.pop(elem)
return OOS_Client.list_objects(self, container, params=params, **args)
-
+
def retrieve_container_metadata(self, container, restricted=False,
until=None, account=None):
"""returns container's metadata"""
account = account or self.account
- params = {'until':until} if until else {}
+ params = {'until': until} if until else {}
return OOS_Client.retrieve_container_metadata(self, container,
account=account,
restricted=restricted,
**params)
-
+
def set_container_policies(self, container, account=None,
**policies):
"""sets containers policies"""
for key, val in policies.items():
headers['x-container-policy-%s' % key] = val
return self.post(path, headers=headers)
-
+
def update_container_data(self, container, f=stdin):
"""adds blocks of data to the container"""
account = self.account
data = f.read() if f else None
headers['content_length'] = len(data)
return self.post(path, data, headers=headers, params=params)
-
+
def delete_container(self, container, until=None, account=None, delimiter=None):
"""deletes a container or the container history until the date provided"""
account = account or self.account
- params = {'until':until} if until else {}
+ params = {'until': until} if until else {}
if delimiter:
- params['delimiter'] = delimiter
+ params['delimiter'] = delimiter
return OOS_Client.delete_container(self, container, account=account,
params=params)
-
+
# Storage Object Services
-
+
def retrieve_object(self, container, object, params={}, format='text',
range=None, if_range=None,
if_match=None, if_none_match=None,
account=None, **headers):
"""returns an object"""
account = account or self.account
- headers={}
+ headers = {}
l = ['range', 'if_range', 'if_match', 'if_none_match',
'if_modified_since', 'if_unmodified_since']
l = [elem for elem in l if eval(elem)]
for elem in l:
- headers.update({elem:eval(elem)})
+ headers.update({elem: eval(elem)})
if format != 'text':
params['hashmap'] = None
return OOS_Client.retrieve_object(self, container, object,
account=account, format=format,
params=params, **headers)
-
+
def retrieve_object_version(self, container, object, version,
format='text', range=None, if_range=None,
if_match=None, if_none_match=None,
l = ['self', 'container', 'object']
for elem in l:
args.pop(elem)
- params = {'version':version}
+ params = {'version': version}
return self.retrieve_object(container, object, params=params, **args)
-
+
def retrieve_object_versionlist(self, container, object, range=None,
if_range=None, if_match=None,
if_none_match=None, if_modified_since=None,
l = ['self', 'container', 'object']
for elem in l:
args.pop(elem)
-
+
return self.retrieve_object_version(container, object, version='list',
format='json', **args)
-
+
def create_zero_length_object(self, container, object,
meta={}, etag=None, content_type=None,
content_encoding=None,
args.pop(elem)
return OOS_Client.create_zero_length_object(self, container, object,
**args)
-
+
def create_folder(self, container, name,
- meta={}, etag=None,
- content_encoding=None,
- content_disposition=None,
- x_object_manifest=None, x_object_sharing=None,
- x_object_public=None, account=None):
- args = locals().copy()
+ meta={}, etag=None,
+ content_encoding=None,
+ content_disposition=None,
+ x_object_manifest=None, x_object_sharing=None,
+ x_object_public=None, account=None):
+ args = locals().copy()
for elem in ['self', 'container', 'name']:
args.pop(elem)
args['content_type'] = 'application/directory'
return self.create_zero_length_object(container, name, **args)
-
+
def create_object(self, container, object, f=stdin, format='text',
meta={}, params={}, etag=None, content_type=None,
content_encoding=None, content_disposition=None,
for elem in ['self', 'container', 'object']:
args.pop(elem)
if format != 'text':
- params.update({'hashmap':None})
+ params.update({'hashmap': None})
return OOS_Client.create_object(self, container, object, **args)
-
+
def create_object_using_chunks(self, container, object,
f=stdin, blocksize=1024, meta={}, etag=None,
content_type=None, content_encoding=None,
account = account or self.account
path = '/%s/%s/%s' % (account, container, object)
headers = {}
- l = ['etag', 'content_type', 'content_encoding', 'content_disposition',
+ l = ['etag', 'content_type', 'content_encoding', 'content_disposition',
'x_object_sharing', 'x_object_manifest', 'x_object_public']
l = [elem for elem in l if eval(elem)]
for elem in l:
- headers.update({elem:eval(elem)})
+ headers.update({elem: eval(elem)})
headers.setdefault('content-type', 'application/octet-stream')
-
- for k,v in meta.items():
+
+ for k, v in meta.items():
v = v.strip()
- headers['x-object-meta-%s' %k.strip()] = v
-
+ headers['x-object-meta-%s' % k.strip()] = v
+
return self._chunked_transfer(path, 'PUT', f, headers=headers,
blocksize=blocksize)
-
+
def create_object_by_hashmap(self, container, object, hashmap={},
meta={}, etag=None, content_encoding=None,
content_disposition=None, content_type=None,
x_object_sharing=None, x_object_manifest=None,
- x_object_public = None, account=None):
+ x_object_public=None, account=None):
"""creates an object by uploading hashes representing data instead of data"""
account = account or self.account
args = locals().copy()
for elem in ['self', 'container', 'object', 'hashmap']:
args.pop(elem)
-
+
try:
data = json.dumps(hashmap)
except SyntaxError:
raise Fault('Invalid formatting')
- args['params'] = {'hashmap':None}
+ args['params'] = {'hashmap': None}
args['format'] = 'json'
-
+
return self.create_object(container, object, f=StringIO(data), **args)
-
+
def create_manifestation(self, container, object, manifest, account=None):
"""creates a manifestation"""
account = account or self.account
- headers={'x_object_manifest':manifest}
+ headers = {'x_object_manifest': manifest}
return self.create_object(container, object, f=None, account=account,
**headers)
-
+
def update_object(self, container, object, f=stdin,
offset=None, meta={}, replace=False, content_length=None,
content_type=None, content_range=None,
for elem in ['self', 'container', 'object', 'replace']:
args.pop(elem)
if not replace:
- args['params'] = {'update':None}
+ args['params'] = {'update': None}
return OOS_Client.update_object(self, container, object, **args)
-
+
def update_object_using_chunks(self, container, object, f=stdin,
blocksize=1024, offset=None, meta={},
replace=False, content_type=None, content_encoding=None,
for elem in ['self', 'container', 'object', 'replace']:
args.pop(elem)
if not replace:
- args['params'] = {'update':None}
+ args['params'] = {'update': None}
return OOS_Client.update_object_using_chunks(self, container, object, **args)
-
+
def update_from_other_source(self, container, object, source,
- offset=None, meta={}, content_range=None,
- content_encoding=None, content_disposition=None,
- x_object_bytes=None, x_object_manifest=None,
- x_object_sharing=None, x_object_public=None, account=None):
+ offset=None, meta={}, content_range=None,
+ content_encoding=None, content_disposition=None,
+ x_object_bytes=None, x_object_manifest=None,
+ x_object_sharing=None, x_object_public=None, account=None):
"""updates an object"""
account = account or self.account
args = locals().copy()
for elem in ['self', 'container', 'object', 'source']:
args.pop(elem)
-
+
args['x_source_object'] = source
return self.update_object(container, object, f=None, **args)
-
+
def delete_object(self, container, object, until=None, account=None, delimiter=None):
"""deletes an object or the object history until the date provided"""
account = account or self.account
- params = {'until':until} if until else {}
+ params = {'until': until} if until else {}
if delimiter:
- params['delimiter'] = delimiter
+ params['delimiter'] = delimiter
return OOS_Client.delete_object(self, container, object, params, account)
-
+
def trash_object(self, container, object):
"""trashes an object"""
account = account or self.account
path = '/%s/%s' % (container, object)
- meta = {'trash':'true'}
+ meta = {'trash': 'true'}
return self._update_metadata(path, 'object', **meta)
-
+
def restore_object(self, container, object, account=None):
"""restores a trashed object"""
account = account or self.account
return self.delete_object_metadata(container, object, account, ['trash'])
-
+
def publish_object(self, container, object, account=None):
"""sets a previously created object publicly accessible"""
account = account or self.account
path = '/%s/%s/%s' % (account, container, object)
headers = {}
headers['x_object_public'] = True
- params = {'update':None}
+ params = {'update': None}
return self.post(path, headers=headers, params=params)
-
+
def unpublish_object(self, container, object, account=None):
"""unpublish an object"""
account = account or self.account
path = '/%s/%s/%s' % (account, container, object)
headers = {}
headers['x_object_public'] = False
- params = {'update':None}
+ params = {'update': None}
return self.post(path, headers=headers, params=params)
-
+
def copy_object(self, src_container, src_object, dst_container, dst_object,
meta={}, public=False, version=None, account=None,
content_type=None, delimiter=None):
account=account, content_type=content_type,
delimiter=delimiter,
**headers)
-
+
def move_object(self, src_container, src_object, dst_container,
- dst_object, meta={}, public=False,
- account=None, content_type=None, delimiter=None):
+ dst_object, meta={}, public=False,
+ account=None, content_type=None, delimiter=None):
"""moves an object"""
headers = {}
headers['x_object_public'] = public
account=account, content_type=content_type,
delimiter=delimiter,
**headers)
-
+
def list_shared_by_others(self, limit=None, marker=None, format='text'):
"""lists other accounts that share objects to the user"""
l = ['limit', 'marker']
for elem in [elem for elem in l if eval(elem)]:
params[elem] = eval(elem)
return self._list('', format, params)
-
+
def share_object(self, container, object, l, read=True):
"""gives access(read by default) to an object to a user/group list"""
action = 'read' if read else 'write'
sharing = '%s=%s' % (action, ','.join(l))
self.update_object(container, object, f=None, x_object_sharing=sharing)
+
def _prepare_path(path, format='text', params={}):
full_path = '%s?format=%s' % (quote(path), format)
-
- for k,v in params.items():
+
+ for k, v in params.items():
value = quote(str(v)) if v else ''
- full_path = '%s&%s=%s' %(full_path, quote(k), value)
+ full_path = '%s&%s=%s' % (full_path, quote(k), value)
return full_path
+
def _prepare_headers(headers):
- for k,v in headers.items():
+ for k, v in headers.items():
headers.pop(k)
k = k.replace('_', '-')
- headers[quote(k)] = quote(v, safe='/=,:@ *"') if type(v) == types.StringType else v
+ headers[quote(k)] = quote(
+ v, safe='/=,:@ *"') if isinstance(v, types.StringType) else v
return headers
+
def _handle_response(response, verbose=False, debug=False):
headers = response.getheaders()
- headers = dict((unquote(h), unquote(v)) for h,v in headers)
-
+ headers = dict((unquote(h), unquote(v)) for h, v in headers)
+
if verbose:
print '%d %s' % (response.status, response.reason)
for key, val in headers.items():
print '%s: %s' % (key.capitalize(), val)
print
-
+
length = response.getheader('content-length', None)
data = response.read(length)
if debug:
print data
print
-
+
if int(response.status) in ERROR_CODES.keys():
raise Fault(data, int(response.status))
-
+
#print '**', response.status, headers, data, '\n'
return response.status, headers, data
# Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com>
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
+
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
+
class c_stat(Structure):
pass # Platform dependent
_system = system()
if _system in ('Darwin', 'FreeBSD'):
- _libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
+ _libiconv = CDLL(
+ find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
- c_size_t, c_int, c_uint32)
+ c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
- c_size_t, c_uint32)
+ c_size_t, c_uint32)
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
- setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
- getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
-
+ setxattr_t = CFUNCTYPE(
+ c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
+ getxattr_t = CFUNCTYPE(
+ c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
+
_machine = machine()
if _machine == 'x86_64':
c_stat._fields_ = [
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
- setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
- getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
+ setxattr_t = CFUNCTYPE(
+ c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
+ getxattr_t = CFUNCTYPE(
+ c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
+
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
+
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh', c_uint64),
('lock_owner', c_uint64)]
+
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('pid', c_pid_t),
('private_data', c_voidp)]
+
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
- POINTER(fuse_file_info))),
- ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
+ POINTER(fuse_file_info))),
+ (
+ 'write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
- ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp,
- c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))),
+ (
+ 'readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp,
+ c_char_p, POINTER(
+ c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
- ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
+ ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(
+ fuse_file_info))),
('init', CFUNCTYPE(c_voidp, c_voidp)),
('destroy', CFUNCTYPE(c_voidp, c_voidp)),
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
- ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))),
- ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))),
+ ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(
+ fuse_file_info))),
+ ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(
+ fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
- POINTER(fuse_file_info))),
- ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)),
+ POINTER(fuse_file_info))),
+ ('lock', CFUNCTYPE(
+ c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))]
def time_of_timespec(ts):
return ts.tv_sec + ts.tv_nsec / 10 ** 9
+
def set_st_attrs(st, attrs):
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime'):
"""This class is the lower level interface and should not be subclassed
under normal use. Its methods are called by fuse.
Assumes API version 2.6 or later."""
-
+
def __init__(self, operations, mountpoint, raw_fi=False, **kwargs):
"""Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc."""
-
+
self.operations = operations
self.raw_fi = raw_fi
args = ['fuse']
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(key if val == True else '%s=%s' % (key, val)
- for key, val in kwargs.items()))
+ for key, val in kwargs.items()))
args.append(mountpoint)
argv = (c_char_p * len(args))(*args)
-
+
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
op = partial(self._wrapper_, getattr(self, name))
setattr(fuse_ops, name, prototype(op))
err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
- sizeof(fuse_ops), None)
+ sizeof(fuse_ops), None)
del self.operations # Invoke the destructor
if err:
raise RuntimeError(err)
-
+
def _wrapper_(self, func, *args, **kwargs):
"""Decorator for the methods that follow"""
try:
except:
print_exc()
return -EFAULT
-
+
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
-
+
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path)
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
-
+
def mknod(self, path, mode, dev):
return self.operations('mknod', path, mode, dev)
-
+
def mkdir(self, path, mode):
return self.operations('mkdir', path, mode)
-
+
def unlink(self, path):
return self.operations('unlink', path)
-
+
def rmdir(self, path):
return self.operations('rmdir', path)
-
+
def symlink(self, source, target):
return self.operations('symlink', target, source)
-
+
def rename(self, old, new):
return self.operations('rename', old, new)
-
+
def link(self, source, target):
return self.operations('link', target, source)
-
+
def chmod(self, path, mode):
return self.operations('chmod', path, mode)
-
+
def chown(self, path, uid, gid):
# Check if any of the arguments is a -1 that has overflowed
if c_uid_t(uid + 1).value == 0:
if c_gid_t(gid + 1).value == 0:
gid = -1
return self.operations('chown', path, uid, gid)
-
+
def truncate(self, path, length):
return self.operations('truncate', path, length)
-
+
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
else:
fi.fh = self.operations('open', path, fi.flags)
return 0
-
+
def read(self, path, buf, size, offset, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
ret = self.operations('read', path, size, offset, fh)
data = create_string_buffer(ret[:size], size)
memmove(buf, data, size)
return size
-
+
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('write', path, data, offset, fh)
-
+
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path)
if hasattr(stv, key):
setattr(stv, key, val)
return 0
-
+
def flush(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('flush', path, fh)
-
+
def release(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('release', path, fh)
-
+
def fsync(self, path, datasync, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('fsync', path, datasync, fh)
-
+
def setxattr(self, path, name, value, size, options, *args):
data = string_at(value, size)
return self.operations('setxattr', path, name, data, options, *args)
-
+
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path, name, *args)
retsize = len(ret)
return -ERANGE
memmove(value, buf, retsize)
return retsize
-
+
def listxattr(self, path, namebuf, size):
ret = self.operations('listxattr', path)
buf = create_string_buffer('\x00'.join(ret)) if ret else ''
return -ERANGE
memmove(namebuf, buf, bufsize)
return bufsize
-
+
def removexattr(self, path, name):
return self.operations('removexattr', path, name)
-
+
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir', path)
return 0
-
+
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path, fip.contents.fh):
if filler(buf, name, st, offset) != 0:
break
return 0
-
+
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path, fip.contents.fh)
-
+
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path, datasync, fip.contents.fh)
-
+
def init(self, conn):
return self.operations('init', '/')
-
+
def destroy(self, private_data):
return self.operations('destroy', '/')
-
+
def access(self, path, amode):
return self.operations('access', path, amode)
-
+
def create(self, path, mode, fip):
fi = fip.contents
if self.raw_fi:
else:
fi.fh = self.operations('create', path, mode)
return 0
-
+
def ftruncate(self, path, length, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('truncate', path, length, fh)
-
+
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
attrs = self.operations('getattr', path, fh)
set_st_attrs(st, attrs)
return 0
-
+
def lock(self, path, fip, cmd, lock):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('lock', path, fh, cmd, lock)
-
+
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
else:
times = None
return self.operations('utimens', path, times)
-
+
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path, blocksize, idx)
"""This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise a FuseOSError exception
on error.
-
+
When in doubt of what an operation should do, check the FUSE header
file or the corresponding system call man page."""
-
+
def __call__(self, op, *args):
if not hasattr(self, op):
raise FuseOSError(EFAULT)
return getattr(self, op)(*args)
-
+
def access(self, path, amode):
return 0
-
+
bmap = None
-
+
def chmod(self, path, mode):
raise FuseOSError(EROFS)
-
+
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
-
+
def create(self, path, mode, fi=None):
"""When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0."""
raise FuseOSError(EROFS)
-
+
def destroy(self, path):
"""Called on filesystem destruction. Path is always /"""
pass
-
+
def flush(self, path, fh):
return 0
-
+
def fsync(self, path, datasync, fh):
return 0
-
+
def fsyncdir(self, path, datasync, fh):
return 0
-
+
def getattr(self, path, fh=None):
"""Returns a dictionary with keys identical to the stat C structure
of stat(2).
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories."""
-
+
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
-
+
def getxattr(self, path, name, position=0):
raise FuseOSError(ENOTSUP)
-
+
def init(self, path):
"""Called on filesystem initialization. Path is always /
Use it instead of __init__ if you start threads on initialization."""
pass
-
+
def link(self, target, source):
raise FuseOSError(EROFS)
-
+
def listxattr(self, path):
return []
-
+
lock = None
-
+
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
-
+
def mknod(self, path, mode, dev):
raise FuseOSError(EROFS)
-
+
def open(self, path, flags):
"""When raw_fi is False (default case), open should return a numerical
file handle.
open(self, path, fi)
and the file handle should be set directly."""
return 0
-
+
def opendir(self, path):
"""Returns a numerical file handle."""
return 0
-
+
def read(self, path, size, offset, fh):
"""Returns a string containing the data requested."""
raise FuseOSError(EIO)
-
+
def readdir(self, path, fh):
"""Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr."""
return ['.', '..']
-
+
def readlink(self, path):
raise FuseOSError(ENOENT)
-
+
def release(self, path, fh):
return 0
-
+
def releasedir(self, path, fh):
return 0
-
+
def removexattr(self, path, name):
raise FuseOSError(ENOTSUP)
-
+
def rename(self, old, new):
raise FuseOSError(EROFS)
-
+
def rmdir(self, path):
raise FuseOSError(EROFS)
-
+
def setxattr(self, path, name, value, options, position=0):
raise FuseOSError(ENOTSUP)
-
+
def statfs(self, path):
"""Returns a dictionary with keys identical to the statvfs C structure
of statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512)."""
return {}
-
+
def symlink(self, target, source):
raise FuseOSError(EROFS)
-
+
def truncate(self, path, length, fh=None):
raise FuseOSError(EROFS)
-
+
def unlink(self, path):
raise FuseOSError(EROFS)
-
+
def utimens(self, path, times=None):
"""Times is a (atime, mtime) tuple. If None use current time."""
return 0
-
+
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from progress.bar import IncrementalBar
+
def file_read_iterator(fp, size=1024):
while True:
data = fp.read(size)
break
yield data
+
class HashMap(list):
-
+
def __init__(self, blocksize, blockhash):
super(HashMap, self).__init__()
self.blocksize = blocksize
self.blockhash = blockhash
-
+
def _hash_raw(self, v):
h = hashlib.new(self.blockhash)
h.update(v)
return h.digest()
-
+
def _hash_block(self, v):
return self._hash_raw(v.rstrip('\x00'))
-
+
def hash(self):
if len(self) == 0:
return self._hash_raw('')
if len(self) == 1:
return self.__getitem__(0)
-
+
h = list(self)
s = 2
while s < len(h):
while len(h) > 1:
h = [self._hash_raw(h[x] + h[x + 1]) for x in range(0, len(h), 2)]
return h[0]
-
+
def load(self, fp):
self.size = 0
file_size = os.fstat(fp.fileno()).st_size
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
from progress.bar import IncrementalBar
+
def upload(client, path, container, prefix, name=None, mimetype=None):
-
+
meta = client.retrieve_container_metadata(container)
blocksize = int(meta['x-container-block-size'])
blockhash = meta['x-container-block-hash']
-
+
size = os.path.getsize(path)
hashes = HashMap(blocksize, blockhash)
hashes.load(open(path))
map = {'bytes': size, 'hashes': [hexlify(x) for x in hashes]}
-
+
objectname = name if name else os.path.split(path)[-1]
object = prefix + objectname
- kwargs = {'mimetype':mimetype} if mimetype else {}
+ kwargs = {'mimetype': mimetype} if mimetype else {}
v = None
try:
v = client.create_object_by_hashmap(container, object, map, **kwargs)
raise
else:
return v
-
- if type(fault.data) == types.StringType:
+
+ if isinstance(fault.data, types.StringType):
missing = json.loads(fault.data)
- elif type(fault.data) == types.ListType:
+ elif isinstance(fault.data, types.ListType):
missing = fault.data
-
+
if '' in missing:
del missing[missing.index(''):]
-
+
bar = IncrementalBar('Uploading', max=len(missing))
bar.suffix = '%(percent).1f%% - %(eta)ds'
with open(path) as fp:
client.update_container_data(container, StringIO(block))
bar.next()
bar.finish()
-
+
return client.create_object_by_hashmap(container, object, map, **kwargs)
+
def download(client, container, object, path):
-
+
res = client.retrieve_object_hashmap(container, object)
blocksize = int(res['block_size'])
blockhash = res['block_hash']
bytes = res['bytes']
map = res['hashes']
-
+
if os.path.exists(path):
h = HashMap(blocksize, blockhash)
h.load(open(path))
else:
open(path, 'w').close() # Create an empty file
hashes = []
-
+
with open(path, 'a+') as fp:
if bytes != 0:
for i, h in enumerate(map):
continue
start = i * blocksize
end = '' if i == len(map) - 1 else ((i + 1) * blocksize) - 1
- data = client.retrieve_object(container, object, range='bytes=%s-%s' % (start, end))
+ data = client.retrieve_object(
+ container, object, range='bytes=%s-%s' % (start, end))
if i != len(map) - 1:
data += (blocksize - len(data)) * '\x00'
fp.seek(start)
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
DEFAULT_USER = 'test'
DEFAULT_TOKEN = '0000'
+
def get_user():
try:
return os.environ['PITHOS_USER']
except KeyError:
return DEFAULT_USER
+
def get_auth():
try:
return os.environ['PITHOS_TOKEN']
except KeyError:
return DEFAULT_TOKEN
+
def get_url():
try:
return os.environ['PITHOS_URL'].rstrip('/')
#!/usr/bin/env python
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
_cli_commands = {}
+
def cli_command(*args):
def decorator(cls):
cls.commands = args
return cls
return decorator
+
def class_for_cli_command(name):
return _cli_commands[name]
+
class Command(object):
syntax = ''
-
+
def __init__(self, name, argv):
parser = OptionParser('%%prog %s [options] %s' % (name, self.syntax))
parser.add_option('--url', dest='url', metavar='URL',
default=False, help='debug output')
self.add_options(parser)
options, args = parser.parse_args(argv)
-
+
# Add options to self
for opt in parser.option_list:
key = opt.dest
if key:
val = getattr(options, key)
setattr(self, key, val)
-
- self.client = Pithos_Client(self.url, self.token, self.user, self.verbose,
- self.debug)
-
+
+ self.client = Pithos_Client(
+ self.url, self.token, self.user, self.verbose,
+ self.debug)
+
self.parser = parser
self.args = args
-
+
def _build_args(self, attrs):
args = {}
for a in [a for a in attrs if getattr(self, a)]:
def add_options(self, parser):
pass
-
+
def execute(self, *args):
pass
+
@cli_command('list', 'ls')
class List(Command):
syntax = '[<container>[/<object>]]'
description = 'list containers or objects'
-
+
def add_options(self, parser):
parser.add_option('-l', action='store_true', dest='detail',
default=False, help='show detailed output')
default=False, help='show only shared')
parser.add_option('--public', action='store_true', dest='public',
default=False, help='show only public')
-
-
+
def execute(self, container=None):
if container:
self.list_objects(container)
else:
self.list_containers()
-
+
def list_containers(self):
attrs = ['limit', 'marker', 'if_modified_since',
'if_unmodified_since', 'shared', 'public']
args = self._build_args(attrs)
args['format'] = 'json' if self.detail else 'text'
-
+
if getattr(self, 'until'):
t = _time.strptime(self.until, self.format)
args['until'] = int(_time.mktime(t))
-
+
l = self.client.list_containers(**args)
print_list(l)
-
+
def list_objects(self, container):
#prepate params
params = {}
'shared', 'public']
args = self._build_args(attrs)
args['format'] = 'json' if self.detail else 'text'
-
+
if self.until:
t = _time.strptime(self.until, self.format)
args['until'] = int(_time.mktime(t))
-
+
container, sep, object = container.partition('/')
if object:
return
-
+
detail = 'json'
#if request with meta quering disable trash filtering
show_trashed = True if self.meta else False
l = self.client.list_objects(container, **args)
print_list(l, detail=self.detail)
+
@cli_command('meta')
class Meta(Command):
syntax = '[<container>[/<object>]]'
description = 'get account/container/object metadata'
-
+
def add_options(self, parser):
parser.add_option('-r', action='store_true', dest='restricted',
default=False, help='show only user defined metadata')
parser.add_option('--version', action='store', dest='version',
default=None, help='show specific version \
(applies only for objects)')
-
+
def execute(self, path=''):
container, sep, object = path.partition('/')
args = {'restricted': self.restricted}
if getattr(self, 'until'):
t = _time.strptime(self.until, self.format)
args['until'] = int(_time.mktime(t))
-
+
if object:
meta = self.client.retrieve_object_metadata(container, object,
self.restricted,
meta = self.client.retrieve_container_metadata(container, **args)
else:
meta = self.client.retrieve_account_metadata(**args)
- if meta == None:
+ if meta is None:
print 'Entity does not exist'
else:
print_dict(meta, header=None)
+
@cli_command('create')
class CreateContainer(Command):
syntax = '<container> [key=val] [...]'
description = 'create a container'
-
+
def add_options(self, parser):
parser.add_option('--versioning', action='store', dest='versioning',
default=None, help='set container versioning (auto/none)')
parser.add_option('--quota', action='store', dest='quota',
default=None, help='set default container quota')
-
+
def execute(self, container, *args):
meta = {}
for arg in args:
policy['versioning'] = self.versioning
if getattr(self, 'quota'):
policy['quota'] = self.quota
- ret = self.client.create_container(container, meta=meta, policies=policy)
+ ret = self.client.create_container(
+ container, meta=meta, policies=policy)
if not ret:
print 'Container already exists'
+
@cli_command('delete', 'rm')
class Delete(Command):
syntax = '<container>[/<object>]'
description = 'delete a container or an object'
-
+
def add_options(self, parser):
parser.add_option('--until', action='store', dest='until',
default=None, help='remove history until that date')
parser.add_option('-r', action='store_true',
dest='recursive', default=False,
help='mass delimiter objects with delimiter /')
-
+
def execute(self, path):
container, sep, object = path.partition('/')
until = None
if getattr(self, 'until'):
t = _time.strptime(self.until, self.format)
until = int(_time.mktime(t))
-
+
kwargs = {}
if self.delimiter:
kwargs['delimiter'] = self.delimiter
elif self.recursive:
kwargs['delimiter'] = '/'
-
+
if object:
self.client.delete_object(container, object, until, **kwargs)
else:
self.client.delete_container(container, until, **kwargs)
+
@cli_command('get')
class GetObject(Command):
syntax = '<container>/<object>'
description = 'get the data of an object'
-
+
def add_options(self, parser):
parser.add_option('-l', action='store_true', dest='detail',
default=False, help='show detailed output')
parser.add_option('--hashmap', action='store_true',
dest='hashmap', default=False,
help='get the object hashmap instead')
-
+
def execute(self, path):
attrs = ['if_match', 'if_none_match', 'if_modified_since',
'if_unmodified_since', 'hashmap']
args['range'] = 'bytes=%s' % self.range
if getattr(self, 'if_range'):
args['if-range'] = 'If-Range:%s' % getattr(self, 'if_range')
-
+
container, sep, object = path.partition('/')
data = None
if self.versionlist:
args.pop('detail')
args.pop('format')
self.detail = True
- data = self.client.retrieve_object_versionlist(container, object, **args)
+ data = self.client.retrieve_object_versionlist(
+ container, object, **args)
elif self.version:
data = self.client.retrieve_object_version(container, object,
self.version, **args)
args.pop('detail')
args.pop('format')
self.detail = True
- data = self.client.retrieve_object_hashmap(container, object, **args)
+ data = self.client.retrieve_object_hashmap(
+ container, object, **args)
else:
- data = self.client.retrieve_object(container, object, **args)
-
+ data = self.client.retrieve_object(container, object, **args)
+
f = open(self.file, 'w') if self.file else stdout
- if self.detail or type(data) == types.DictionaryType:
+ if self.detail or isinstance(data, types.DictionaryType):
if self.versionlist:
print_versions(data, f=f)
else:
f.write(data)
f.close()
+
@cli_command('mkdir')
class PutMarker(Command):
syntax = '<container>/<directory marker>'
description = 'create a directory marker'
-
+
def execute(self, path):
container, sep, object = path.partition('/')
self.client.create_directory_marker(container, object)
+
@cli_command('put')
class PutObject(Command):
syntax = '<container>/<object> [key=val] [...]'
description = 'create/override object'
-
+
def add_options(self, parser):
- parser.add_option('--use_hashes', action='store_true', dest='use_hashes',
- default=False, help='provide hashmap instead of data')
+ parser.add_option(
+ '--use_hashes', action='store_true', dest='use_hashes',
+ default=False, help='provide hashmap instead of data')
parser.add_option('--chunked', action='store_true', dest='chunked',
default=False, help='set chunked transfer mode')
parser.add_option('--etag', action='store', dest='etag',
parser.add_option('--public', action='store_true',
dest='x_object_public', default=False,
help='make object publicly accessible')
-
+
def execute(self, path, *args):
if path.find('=') != -1:
raise Fault('Missing path argument')
-
+
#prepare user defined meta
meta = {}
for arg in args:
key, sep, val = arg.partition('=')
meta[key] = val
-
+
attrs = ['etag', 'content_encoding', 'content_disposition',
'content_type', 'x_object_sharing', 'x_object_public']
args = self._build_args(attrs)
-
+
container, sep, object = path.partition('/')
-
+
f = None
if self.srcpath:
f = open(self.srcpath) if self.srcpath != '-' else stdin
-
+
if self.use_hashes and not f:
raise Fault('Illegal option combination')
-
+
if self.chunked:
self.client.create_object_using_chunks(container, object, f,
- meta=meta, **args)
+ meta=meta, **args)
elif self.use_hashes:
data = f.read()
hashmap = json.loads(data)
self.client.create_object_by_hashmap(container, object, hashmap,
- meta=meta, **args)
+ meta=meta, **args)
elif self.x_object_manifest:
- self.client.create_manifestation(container, object, self.x_object_manifest)
+ self.client.create_manifestation(
+ container, object, self.x_object_manifest)
elif not f:
- self.client.create_zero_length_object(container, object, meta=meta, **args)
+ self.client.create_zero_length_object(
+ container, object, meta=meta, **args)
else:
self.client.create_object(container, object, f, meta=meta, **args)
if f:
f.close()
+
@cli_command('copy', 'cp')
class CopyObject(Command):
syntax = '<src container>/<src object> [<dst container>/]<dst object> [key=val] [...]'
description = 'copy an object to a different location'
-
+
def add_options(self, parser):
parser.add_option('--version', action='store',
dest='version', default=False,
parser.add_option('-r', action='store_true',
dest='recursive', default=False,
help='mass copy with delimiter /')
-
+
def execute(self, src, dst, *args):
src_container, sep, src_object = src.partition('/')
dst_container, sep, dst_object = dst.partition('/')
-
+
#prepare user defined meta
meta = {}
for arg in args:
key, sep, val = arg.partition('=')
meta[key] = val
-
+
if not sep:
dst_container = src_container
dst_object = dst
-
- args = {'content_type':self.content_type} if self.content_type else {}
+
+ args = {'content_type': self.content_type} if self.content_type else {}
if self.delimiter:
- args['delimiter'] = self.delimiter
+ args['delimiter'] = self.delimiter
elif self.recursive:
- args['delimiter'] = '/'
+ args['delimiter'] = '/'
self.client.copy_object(src_container, src_object, dst_container,
dst_object, meta, self.public, self.version,
**args)
+
@cli_command('set')
class SetMeta(Command):
syntax = '[<container>[/<object>]] key=val [key=val] [...]'
description = 'set account/container/object metadata'
-
+
def execute(self, path, *args):
#in case of account fix the args
if path.find('=') != -1:
else:
self.client.update_account_metadata(**meta)
+
@cli_command('update')
class UpdateObject(Command):
syntax = '<container>/<object> path [key=val] [...]'
description = 'update object metadata/data (default mode: append)'
-
+
def add_options(self, parser):
parser.add_option('-a', action='store_true', dest='append',
default=True, help='append data')
help='provide the presentation style of the object')
parser.add_option('--manifest', action='store', type='str',
dest='x_object_manifest', default=None,
- help='use for large file support')
+ help='use for large file support')
parser.add_option('--sharing', action='store',
dest='x_object_sharing', default=None,
help='define sharing object policy')
parser.add_option('--replace', action='store_true',
dest='replace', default=False,
help='override metadata')
-
+
def execute(self, path, *args):
if path.find('=') != -1:
raise Fault('Missing path argument')
-
+
#prepare user defined meta
meta = {}
for arg in args:
key, sep, val = arg.partition('=')
meta[key] = val
-
-
+
attrs = ['content_encoding', 'content_disposition', 'x_object_sharing',
'x_object_public', 'x_object_manifest', 'replace', 'offset',
'content_range']
args = self._build_args(attrs)
-
+
if self.no_sharing:
args['x_object_sharing'] = ''
-
+
container, sep, object = path.partition('/')
-
+
f = None
if self.srcpath:
f = open(self.srcpath) if self.srcpath != '-' else stdin
-
+
if self.chunked:
self.client.update_object_using_chunks(container, object, f,
- meta=meta, **args)
+ meta=meta, **args)
else:
self.client.update_object(container, object, f, meta=meta, **args)
if f:
f.close()
+
@cli_command('move', 'mv')
class MoveObject(Command):
syntax = '<src container>/<src object> [<dst container>/]<dst object>'
description = 'move an object to a different location'
-
+
def add_options(self, parser):
parser.add_option('--public', action='store_true',
dest='public', default=False,
parser.add_option('-r', action='store_true',
dest='recursive', default=False,
help='mass move objects with delimiter /')
-
+
def execute(self, src, dst, *args):
src_container, sep, src_object = src.partition('/')
dst_container, sep, dst_object = dst.partition('/')
if not sep:
dst_container = src_container
dst_object = dst
-
+
#prepare user defined meta
meta = {}
for arg in args:
key, sep, val = arg.partition('=')
meta[key] = val
-
- args = {'content_type':self.content_type} if self.content_type else {}
+
+ args = {'content_type': self.content_type} if self.content_type else {}
if self.delimiter:
- args['delimiter'] = self.delimiter
+ args['delimiter'] = self.delimiter
elif self.recursive:
- args['delimiter'] = '/'
+ args['delimiter'] = '/'
self.client.move_object(src_container, src_object, dst_container,
dst_object, meta, self.public, **args)
+
@cli_command('unset')
class UnsetObject(Command):
syntax = '<container>/[<object>] key [key] [...]'
description = 'delete metadata info'
-
+
def execute(self, path, *args):
#in case of account fix the args
if len(args) == 0:
else:
self.client.delete_account_metadata(meta)
+
@cli_command('group')
class CreateGroup(Command):
syntax = 'key=val [key=val] [...]'
description = 'create account groups'
-
+
def execute(self, *args):
groups = {}
for arg in args:
groups[key] = val
self.client.set_account_groups(**groups)
+
@cli_command('ungroup')
class DeleteGroup(Command):
syntax = 'key [key] [...]'
description = 'delete account groups'
-
+
def execute(self, *args):
groups = []
for arg in args:
groups.append(arg)
self.client.unset_account_groups(groups)
+
@cli_command('policy')
class SetPolicy(Command):
syntax = 'container key=val [key=val] [...]'
description = 'set container policies'
-
+
def execute(self, path, *args):
if path.find('=') != -1:
raise Fault('Missing container argument')
-
+
container, sep, object = path.partition('/')
-
+
if object:
raise Fault('Only containers have policies')
-
+
policies = {}
for arg in args:
key, sep, val = arg.partition('=')
policies[key] = val
-
+
self.client.set_container_policies(container, **policies)
+
@cli_command('publish')
class PublishObject(Command):
syntax = '<container>/<object>'
description = 'publish an object'
-
+
def execute(self, src):
src_container, sep, src_object = src.partition('/')
-
+
self.client.publish_object(src_container, src_object)
+
@cli_command('unpublish')
class UnpublishObject(Command):
syntax = '<container>/<object>'
description = 'unpublish an object'
-
+
def execute(self, src):
src_container, sep, src_object = src.partition('/')
-
+
self.client.unpublish_object(src_container, src_object)
+
@cli_command('sharing')
class SharingObject(Command):
syntax = 'list users sharing objects with the user'
description = 'list user accounts sharing objects with the user'
-
+
def add_options(self, parser):
parser.add_option('-l', action='store_true', dest='detail',
default=False, help='show detailed output')
parser.add_option('--marker', action='store', type='str',
dest='marker', default=None,
help='show output greater then marker')
-
-
+
def execute(self):
attrs = ['limit', 'marker']
args = self._build_args(attrs)
args['format'] = 'json' if self.detail else 'text'
-
+
print_list(self.client.list_shared_by_others(**args))
+
@cli_command('send')
class Send(Command):
syntax = '<file> <container>[/<prefix>]'
description = 'upload file to container (using prefix)'
-
+
def execute(self, file, path):
container, sep, prefix = path.partition('/')
upload(self.client, file, container, prefix)
+
@cli_command('receive')
class Receive(Command):
syntax = '<container>/<object> <file>'
description = 'download object to file'
-
+
def execute(self, path, file):
container, sep, object = path.partition('/')
download(self.client, container, object, file)
+
def print_usage():
cmd = Command('', [])
parser = cmd.parser
parser.usage = '%prog <command> [options]'
parser.print_help()
-
+
commands = []
for cls in set(_cli_commands.values()):
name = ', '.join(cls.commands)
commands.append(' %s %s' % (name.ljust(12), description))
print '\nCommands:\n' + '\n'.join(sorted(commands))
+
def print_dict(d, header='name', f=stdout, detail=True):
header = header if header in d else 'subdir'
if header and header in d:
- f.write('%s\n' %d.pop(header).encode('utf8'))
+ f.write('%s\n' % d.pop(header).encode('utf8'))
if detail:
patterns = ['^x_(account|container|object)_meta_(\w+)$']
patterns.append(patterns[0].replace('_', '-'))
for key, val in sorted(d.items()):
f.write('%s: %s\n' % (key.rjust(30), val))
+
def print_list(l, verbose=False, f=stdout, detail=True):
for elem in l:
#if it's empty string continue
if not elem:
continue
- if type(elem) == types.DictionaryType:
+ if isinstance(elem, types.DictionaryType):
print_dict(elem, f=f, detail=detail)
- elif type(elem) == types.StringType:
+ elif isinstance(elem, types.StringType):
if not verbose:
elem = elem.split('Traceback')[0]
f.write('%s\n' % elem)
else:
f.write('%s\n' % elem)
+
def print_versions(data, f=stdout):
if 'versions' not in data:
- f.write('%s\n' %data)
+ f.write('%s\n' % data)
return
f.write('versions:\n')
for id, t in data['versions']:
- f.write('%s @ %s\n' % (str(id).rjust(30), datetime.fromtimestamp(float(t))))
+ f.write('%s @ %s\n' % (str(id).rjust(30),
+ datetime.fromtimestamp(float(t))))
def main():
except (IndexError, KeyError):
print_usage()
exit(1)
-
+
cmd = cls(name, argv[2:])
-
+
try:
cmd.execute(*cmd.args)
except TypeError, e:
#!/usr/bin/env python
# Copyright 2011-2012 GRNET S.A. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
-#
+#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
-#
+#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
-#
+#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-#
+#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
self.deleted_dirs = set()
_makedirs(self.trashdir)
-
+
dbpath = join(SETTINGS_DIR, 'sync.db')
self.conn = sqlite3.connect(dbpath)
self.conn.execute(SQL_CREATE_FILES_TABLE)
self.conn.commit()
-
+
def current_hash(self, path):
"""Return the hash of the file as it exists now in the filesystem"""
-
+
fullpath = join(self.syncdir, path)
if fullpath in self.deleted_dirs:
return 'DEL'
if isdir(fullpath):
return 'DIR'
return merkle(fullpath)
-
+
def delete_inactive(self, timestamp):
sql = 'DELETE FROM files WHERE timestamp != ?'
self.conn.execute(sql, (timestamp,))
self.conn.commit()
-
+
def download(self, path, hash):
fullpath = join(self.syncdir, path)
if hash == 'DEL':
else:
print 'Downloading %s...' % path
download(client, self.container, path, fullpath)
-
+
current = self.current_hash(path)
assert current == hash, "Downloaded file does not match hash"
self.save(path, hash)
-
+
def empty_trash(self):
for filename in os.listdir(self.trashdir):
path = join(self.trashdir, filename)
os.remove(path)
-
+
def find_hash(self, hash):
sql = 'SELECT path FROM files WHERE hash = ?'
ret = self.conn.execute(sql, (hash,)).fetchone()
if ret:
return join(self.syncdir, ret[0])
-
+
if hash in os.listdir(self.trashdir):
return join(self.trashdir, hash)
-
+
return None
-
+
def previous_hash(self, path):
"""Return the hash of the file according to the previous sync with
the server. Return DEL if not such entry exists."""
-
+
sql = 'SELECT hash FROM files WHERE path = ?'
ret = self.conn.execute(sql, (path,)).fetchone()
return ret[0] if ret else 'DEL'
-
+
def remote_hash(self, path):
"""Return the hash of the file according to the server"""
-
+
try:
meta = client.retrieve_object_metadata(self.container, path)
except Fault:
return 'DIR'
else:
return meta['x-object-hash']
-
+
def remove_deleted_dirs(self):
for path in sorted(self.deleted_dirs, key=len, reverse=True):
os.rmdir(path)
self.deleted_dirs.remove(path)
-
+
def resolve_conflict(self, path, hash):
"""Resolve a sync conflict by renaming the local file and downloading
the remote one."""
-
+
fullpath = join(self.syncdir, path)
resolved = fullpath + '.local'
i = 0
while exists(resolved):
i += 1
resolved = fullpath + '.local%d' % i
-
+
os.rename(fullpath, resolved)
self.download(path, hash)
-
+
def rmdir(self, path):
"""Remove a dir or mark for deletion if non-empty
-
+
If a dir is empty delete it and check if any of its parents should be
deleted too. Else mark it for later deletion.
"""
-
+
fullpath = join(self.syncdir, path)
if not exists(fullpath):
return
-
+
if os.listdir(fullpath):
# Directory not empty
self.deleted_dirs.add(fullpath)
return
-
+
os.rmdir(fullpath)
self.deleted_dirs.discard(fullpath)
-
+
parent = dirname(fullpath)
while parent in self.deleted_dirs:
os.rmdir(parent)
self.deleted_dirs.remove(parent)
parent = dirname(parent)
-
+
def save(self, path, hash):
"""Save the hash value of a file. This value will be later returned
by `previous_hash`."""
-
+
sql = 'INSERT OR REPLACE INTO files (path, hash) VALUES (?, ?)'
self.conn.execute(sql, (path, hash))
self.conn.commit()
-
+
def touch(self, path, now):
sql = 'UPDATE files SET timestamp = ? WHERE path = ?'
self.conn.execute(sql, (now, path))
self.conn.commit()
-
+
def trash(self, path):
"""Move a file to trash or delete it if it's a directory"""
-
+
fullpath = join(self.syncdir, path)
if not exists(fullpath):
return
-
+
if isfile(fullpath):
hash = merkle(fullpath)
trashpath = join(self.trashdir, hash)
os.rename(fullpath, trashpath)
else:
self.rmdir(path)
-
+
def upload(self, path, hash):
fullpath = join(self.syncdir, path)
if hash == 'DEL':
prefix += '/'
print 'Uploading %s...' % path
upload(client, fullpath, self.container, prefix, name)
-
+
remote = self.remote_hash(path)
assert remote == hash, "Uploaded file does not match hash"
self.save(path, hash)
previous = state.previous_hash(path)
current = state.current_hash(path)
remote = state.remote_hash(path)
-
+
if current == previous:
# No local changes, download any remote changes
if remote != previous:
def walk(dir, container):
"""Iterates on the files of the hierarchy created by merging the files
in `dir` and the objects in `container`."""
-
+
pending = ['']
-
+
while pending:
dirs = set()
files = set()
continue
if root:
yield root
-
+
dirpath = join(dir, root)
if exists(dirpath):
for filename in os.listdir(dirpath):
dirs.add(path)
else:
files.add(path)
-
+
for object in client.list_objects(container, format='json',
- prefix=root, delimiter='/'):
+ prefix=root, delimiter='/'):
if 'subdir' in object:
continue
name = object['name']
dirs.add(name)
else:
files.add(name)
-
+
pending += sorted(dirs)
for path in files:
yield path
if len(sys.argv) != 2:
print 'syntax: %s <dir>' % sys.argv[0]
sys.exit(1)
-
+
syncdir = sys.argv[1]
-
+
_makedirs(SETTINGS_DIR)
container = os.environ.get('PITHOS_SYNC_CONTAINER', DEFAULT_CONTAINER)
client.create_container(container)
-
+
state = State(syncdir, container)
-
+
now = int(time())
for path in walk(syncdir, container):
print 'Syncing', path
sync(path, state)
state.touch(path, now)
-
+
state.delete_inactive(now)
state.empty_trash()
state.remove_deleted_dirs()
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
+
+
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
- show_ignored=False):
+ show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
- or fn.lower() == pattern.lower()):
+ or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
- and not prefix):
+ and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
- stack.append((fn, prefix + name + "/", package, only_in_packages))
+ stack.append(
+ (fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
- or fn.lower() == pattern.lower()):
+ or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
break
if bad_name:
continue
- out.setdefault(package, []).append(prefix+name)
+ out.setdefault(package, []).append(prefix + name)
return out
setup(
- name = 'snf-pithos-tools',
- version = VERSION,
- license = 'BSD',
- url = 'http://code.grnet.gr/',
- description = SHORT_DESCRIPTION,
- long_description=README + '\n\n' + CHANGES,
- classifiers = CLASSIFIERS,
-
- author = 'Package author',
- author_email = 'author@grnet.gr',
- maintainer = 'Package maintainer',
- maintainer_email = 'maintainer@grnet.gr',
-
- namespace_packages = ['pithos'],
- packages = PACKAGES,
- package_dir= {'': PACKAGES_ROOT},
- include_package_data = True,
- package_data = find_package_data('.'),
- zip_safe = False,
-
- dependency_links = [
+ name='snf-pithos-tools',
+ version=VERSION,
+ license='BSD',
+ url='http://code.grnet.gr/',
+ description=SHORT_DESCRIPTION,
+ long_description=README + '\n\n' + CHANGES,
+ classifiers=CLASSIFIERS,
+
+ author='Package author',
+ author_email='author@grnet.gr',
+ maintainer='Package maintainer',
+ maintainer_email='maintainer@grnet.gr',
+
+ namespace_packages=['pithos'],
+ packages=PACKAGES,
+ package_dir={'': PACKAGES_ROOT},
+ include_package_data=True,
+ package_data=find_package_data('.'),
+ zip_safe=False,
+
+ dependency_links=[
'http://docs.dev.grnet.gr/pypi/'],
- install_requires = INSTALL_REQUIRES,
- extras_require = EXTRAS_REQUIRES,
- tests_require = TESTS_REQUIRES,
-
- entry_points = {
- 'console_scripts': [
- 'pithos-sh = pithos.tools.sh:main',
- 'pithos-sync = pithos.tools.sync:main',
- 'pithos-test = pithos.tools.test:main',
- 'pithos-fs = pithos.tools.fs:main',
- 'pithos-dispatcher = pithos.tools.dispatcher:main',
- ],
- },
+ install_requires=INSTALL_REQUIRES,
+ extras_require=EXTRAS_REQUIRES,
+ tests_require=TESTS_REQUIRES,
+
+ entry_points={
+ 'console_scripts': [
+ 'pithos-sh = pithos.tools.sh:main',
+ 'pithos-sync = pithos.tools.sync:main',
+ 'pithos-test = pithos.tools.test:main',
+ 'pithos-fs = pithos.tools.fs:main',
+ 'pithos-dispatcher = pithos.tools.dispatcher:main',
+ ],
+ },
)