Revision 29148653

b/snf-pithos-app/pithos/api/functions.py
691 691

  
692 692
    try:
693 693
        objects = request.backend.list_object_meta(
694
            request.user_uniq, v_account,
695
            v_container, prefix, delimiter, marker,
696
            limit, virtual, 'pithos', keys, shared, until, None, public_granted)
694
            request.user_uniq, v_account, v_container, prefix, delimiter,
695
            marker, limit, virtual, 'pithos', keys, shared, until, None,
696
            public_granted)
697 697
        object_permissions = {}
698 698
        object_public = {}
699 699
        if until is None:
......
920 920
        try:
921 921
            for x in objects:
922 922
                s, h = \
923
                    request.backend.get_object_hashmap(request.user_uniq,
924
                                                       v_account, src_container,
925
                                                       x[0], x[1])
923
                    request.backend.get_object_hashmap(
924
                        request.user_uniq, v_account, src_container, x[0],
925
                        x[1])
926 926
                sizes.append(s)
927 927
                hashmaps.append(h)
928 928
        except NotAllowedError:
......
1009 1009
        else:
1010 1010
            if TRANSLATE_UUIDS:
1011 1011
                try:
1012
                    src_account = retrieve_uuid(getattr(request, 'token', None),
1013
                                                src_account)
1012
                    src_account = retrieve_uuid(
1013
                        getattr(request, 'token', None), src_account)
1014 1014
                except ItemNotExists:
1015 1015
                    faults.ItemNotFound('Invalid source account')
1016 1016

  
......
1139 1139
    return response
1140 1140

  
1141 1141

  
1142
@api_method('POST', user_required=True, logger=logger, lock_container_path=True)
1142
@api_method('POST', user_required=True, logger=logger,
1143
            lock_container_path=True)
1143 1144
def object_write_form(request, v_account, v_container, v_object):
1144 1145
    # Normal Response Codes: 201
1145 1146
    # Error Response Codes: internalServerError (500),
......
1301 1302
        # Do permissions first, as it may fail easier.
1302 1303
        if permissions is not None:
1303 1304
            try:
1304
                request.backend.update_object_permissions(request.user_uniq,
1305
                                                          v_account,
1306
                                                          v_container, v_object,
1307
                                                          permissions)
1305
                request.backend.update_object_permissions(
1306
                    request.user_uniq, v_account, v_container, v_object,
1307
                    permissions)
1308 1308
            except NotAllowedError:
1309 1309
                raise faults.Forbidden('Not allowed')
1310 1310
            except ItemNotExists:
......
1345 1345

  
1346 1346
    try:
1347 1347
        size, hashmap = \
1348
            request.backend.get_object_hashmap(request.user_uniq,
1349
                                               v_account, v_container, v_object)
1348
            request.backend.get_object_hashmap(
1349
                request.user_uniq, v_account, v_container, v_object)
1350 1350
    except NotAllowedError:
1351 1351
        raise faults.Forbidden('Not allowed')
1352 1352
    except ItemNotExists:
......
1417 1417
                        hashmap[bi] = src_hashmap[sbi]
1418 1418
                    else:
1419 1419
                        data = request.backend.get_block(src_hashmap[sbi])
1420
                        hashmap[bi] = request.backend.update_block(hashmap[bi],
1421
                                                                   data[:bl], 0)
1420
                        hashmap[bi] = request.backend.update_block(
1421
                            hashmap[bi], data[:bl], 0)
1422 1422
                else:
1423 1423
                    hashmap.append(src_hashmap[sbi])
1424 1424
                offset += bl
b/snf-pithos-app/pithos/api/manage_accounts/tests.py
344 344
                   'application/octet-stream', data, meta, permissions))
345 345
            data = get_random_data(int(random.random()))
346 346
            self.utils.create_update_object(
347
                   'account1', container, object,
348
                   'application/octet-stream', data, meta, permissions)
347
                'account1', container, object, 'application/octet-stream',
348
                data, meta, permissions)
349 349

  
350 350
        self.utils.merge_account('Account1', 'account1', only_stats=False,
351 351
                                 dry=False, silent=True)
b/snf-pithos-app/pithos/api/settings.py
27 27

  
28 28
astakos_services = deepcopy(vanilla_astakos_services)
29 29
fill_endpoints(astakos_services, ASTAKOS_BASE_URL)
30
CUSTOMIZE_ASTAKOS_SERVICES = \
31
        getattr(settings, 'PITHOS_CUSTOMIZE_ASTAKOS_SERVICES', ())
30
CUSTOMIZE_ASTAKOS_SERVICES = getattr(settings,
31
                                     'PITHOS_CUSTOMIZE_ASTAKOS_SERVICES', ())
32 32
for path, value in CUSTOMIZE_ASTAKOS_SERVICES:
33 33
    set_path(astakos_services, path, value, createpath=True)
34 34

  
......
42 42
BASE_ASTAKOS_PROXY_PATH = BASE_ASTAKOS_PROXY_PATH.strip('/')
43 43

  
44 44

  
45
ASTAKOSCLIENT_POOLSIZE = getattr(settings, 'PITHOS_ASTAKOSCLIENT_POOLSIZE', 200)
45
ASTAKOSCLIENT_POOLSIZE = getattr(settings, 'PITHOS_ASTAKOSCLIENT_POOLSIZE',
46
                                 200)
46 47

  
47 48
COOKIE_NAME = getattr(settings, 'PITHOS_ASTAKOS_COOKIE_NAME', '_pithos2_a')
48 49

  
......
60 61
BACKEND_BLOCK_UMASK = getattr(settings, 'PITHOS_BACKEND_BLOCK_UMASK', 0o022)
61 62

  
62 63
# Queue for billing.
63
BACKEND_QUEUE_MODULE = getattr(settings, 'PITHOS_BACKEND_QUEUE_MODULE',
64
                               None)  # Example: 'pithos.backends.lib.rabbitmq'
65
BACKEND_QUEUE_HOSTS = getattr(settings, 'PITHOS_BACKEND_QUEUE_HOSTS', None) # Example: "['amqp://guest:guest@localhost:5672']"
66
BACKEND_QUEUE_EXCHANGE = getattr(settings, 'PITHOS_BACKEND_QUEUE_EXCHANGE', 'pithos')
64
BACKEND_QUEUE_MODULE = getattr(settings, 'PITHOS_BACKEND_QUEUE_MODULE', None)
65
# Example: 'pithos.backends.lib.rabbitmq'
66

  
67
BACKEND_QUEUE_HOSTS = getattr(settings, 'PITHOS_BACKEND_QUEUE_HOSTS', None)
68
# Example: "['amqp://guest:guest@localhost:5672']"
69

  
70
BACKEND_QUEUE_EXCHANGE = getattr(settings, 'PITHOS_BACKEND_QUEUE_EXCHANGE',
71
                                 'pithos')
67 72

  
68 73
# Default setting for new accounts.
69 74
BACKEND_ACCOUNT_QUOTA = getattr(
......
71 76
BACKEND_CONTAINER_QUOTA = getattr(
72 77
    settings, 'PITHOS_BACKEND_CONTAINER_QUOTA', 0)
73 78
BACKEND_VERSIONING = getattr(settings, 'PITHOS_BACKEND_VERSIONING', 'auto')
74
BACKEND_FREE_VERSIONING = getattr(settings, 'PITHOS_BACKEND_FREE_VERSIONING', True)
79
BACKEND_FREE_VERSIONING = getattr(settings, 'PITHOS_BACKEND_FREE_VERSIONING',
80
                                  True)
75 81

  
76 82
# Enable backend pooling
77 83
BACKEND_POOL_ENABLED = getattr(settings, 'PITHOS_BACKEND_POOL_ENABLED', True)
......
86 92
SERVICE_TOKEN = getattr(settings, 'PITHOS_SERVICE_TOKEN', '')
87 93

  
88 94
RADOS_STORAGE = getattr(settings, 'PITHOS_RADOS_STORAGE', False)
89
RADOS_POOL_BLOCKS= getattr(settings, 'PITHOS_RADOS_POOL_BLOCKS', 'blocks')
95
RADOS_POOL_BLOCKS = getattr(settings, 'PITHOS_RADOS_POOL_BLOCKS', 'blocks')
90 96
RADOS_POOL_MAPS = getattr(settings, 'PITHOS_RADOS_POOL_MAPS', 'maps')
91 97

  
92 98
# This enables a ui compatibility layer for the introduction of UUIDs in
......
100 106
# the requests on its own.
101 107
PROXY_USER_SERVICES = getattr(settings, 'PITHOS_PROXY_USER_SERVICES', True)
102 108

  
103
# Set how many random bytes to use for constructing the URL of Pithos public files
104
PUBLIC_URL_SECURITY =  getattr(settings, 'PITHOS_PUBLIC_URL_SECURITY', 16)
109
# Set how many random bytes to use for constructing the URL
110
# of Pithos public files
111
PUBLIC_URL_SECURITY = getattr(settings, 'PITHOS_PUBLIC_URL_SECURITY', 16)
105 112
# Set the alphabet to use for constructing the URL of Pithos public files
106
PUBLIC_URL_ALPHABET =  getattr(
113
PUBLIC_URL_ALPHABET = getattr(
107 114
    settings,
108 115
    'PITHOS_PUBLIC_URL_ALPHABET',
109
    '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
110
)
116
    '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
111 117

  
112 118
# The maximum number or items returned by the listing api methods
113 119
API_LIST_LIMIT = getattr(settings, 'PITHOS_API_LIST_LIMIT', 10000)
b/snf-pithos-app/pithos/api/short_url.py
73 73

  
74 74

  
75 75
class UrlEncoder(object):
76
    def __init__(self, alphabet=DEFAULT_ALPHABET, block_size=DEFAULT_BLOCK_SIZE):
76
    def __init__(self, alphabet=DEFAULT_ALPHABET,
77
                 block_size=DEFAULT_BLOCK_SIZE):
77 78
        self.alphabet = alphabet
78 79
        self.block_size = block_size
79 80
        self.mask = (1 << block_size) - 1
b/snf-pithos-app/pithos/api/util.py
56 56
                                 ASTAKOSCLIENT_POOLSIZE,
57 57
                                 SERVICE_TOKEN,
58 58
                                 ASTAKOS_BASE_URL,
59
                                 BACKEND_ACCOUNT_QUOTA, BACKEND_CONTAINER_QUOTA,
60
                                 BACKEND_VERSIONING,
61
                                 BACKEND_FREE_VERSIONING,
59
                                 BACKEND_ACCOUNT_QUOTA,
60
                                 BACKEND_CONTAINER_QUOTA,
61
                                 BACKEND_VERSIONING, BACKEND_FREE_VERSIONING,
62 62
                                 BACKEND_POOL_ENABLED, BACKEND_POOL_SIZE,
63 63
                                 BACKEND_BLOCK_SIZE, BACKEND_HASH_ALGORITHM,
64 64
                                 RADOS_STORAGE, RADOS_POOL_BLOCKS,
......
200 200
            int(meta['until_timestamp']))
201 201
    for k, v in policy.iteritems():
202 202
        response[smart_str(format_header_key('X-Container-Policy-' + k),
203
                           strings_only=True)] = smart_str(v, strings_only=True)
203
                           strings_only=True)] = smart_str(v,
204
                                                           strings_only=True)
204 205

  
205 206

  
206 207
def get_object_headers(request):
......
402 403

  
403 404

  
404 405
def validate_modification_preconditions(request, meta):
405
    """Check that the modified timestamp conforms with the preconditions set."""
406
    """Check the modified timestamp conforms with the preconditions set."""
406 407

  
407 408
    if 'modified' not in meta:
408 409
        return  # TODO: Always return?
......
441 442
    if if_none_match is not None:
442 443
        # TODO: If this passes, must ignore If-Modified-Since header.
443 444
        if etag is not None:
444
            if (if_none_match == '*'
445
                    or etag in [x.lower() for x in parse_etags(if_none_match)]):
445
            if (if_none_match == '*' or etag in [x.lower() for x in
446
                                                 parse_etags(if_none_match)]):
446 447
                # TODO: Continue if an If-Modified-Since header is present.
447 448
                if request.method in ('HEAD', 'GET'):
448 449
                    raise faults.NotModified('Resource ETag matches')
......
697 698

  
698 699

  
699 700
def socket_read_iterator(request, length=0, blocksize=4096):
700
    """Return a maximum of blocksize data read from the socket in each iteration
701
    """Return maximum of blocksize data read from the socket in each iteration
701 702

  
702 703
    Read up to 'length'. If 'length' is negative, will attempt a chunked read.
703 704
    The maximum ammount of data read is controlled by MAX_UPLOAD_SIZE.
b/snf-pithos-backend/pithos/backends/base.py
37 37
DEFAULT_CONTAINER_VERSIONING = 'auto'
38 38

  
39 39

  
40

  
41 40
class NotAllowedError(Exception):
42 41
    pass
43 42

  
......
71 70

  
72 71

  
73 72
class BaseBackend(object):
74
    """Abstract backend class that serves as a reference for actual implementations.
73
    """Abstract backend class.
74

  
75
    This class serves as a reference for actual implementations.
75 76

  
76
    The purpose of the backend is to provide the necessary functions for handling data
77
    and metadata. It is responsible for the actual storage and retrieval of information.
77
    The purpose of the backend is to provide the necessary functions
78
    for handling data and metadata.
78 79

  
79
    Note that the account level is always valid as it is checked from another subsystem.
80
    It is responsible for the actual storage and retrieval of information.
80 81

  
81
    When not replacing metadata/groups/policy, keys with empty values should be deleted.
82
    Note that the account level is always valid as it is checked
83
    from another subsystem.
84

  
85
    When not replacing metadata/groups/policy, keys with empty values
86
    should be deleted.
82 87

  
83 88
    The following variables should be available:
84 89
        'hash_algorithm': Suggested is 'sha256'
85 90

  
86 91
        'block_size': Suggested is 4MB
87 92

  
88
        'default_account_policy': A dictionary with default account policy settings
89
        'default_container_policy': A dictionary with default container policy settings
93
        'default_account_policy': A dictionary with default account policy
94
                                  settings
95
        'default_container_policy': A dictionary with default container policy
96
                                    settings
90 97
    """
91 98

  
92 99
    def close(self):
......
96 103
    def list_accounts(self, user, marker=None, limit=10000):
97 104
        """Return a list of accounts the user can access.
98 105

  
99
        Parameters:
106
        Keyword arguments:
100 107
            'marker': Start list from the next item after 'marker'
101 108

  
102 109
            'limit': Number of containers to return
103 110
        """
104 111
        return []
105 112

  
106
    def get_account_meta(self, user, account, domain, until=None, include_user_defined=True):
113
    def get_account_meta(self, user, account, domain, until=None,
114
                         include_user_defined=True, external_quota=None):
107 115
        """Return a dictionary with the account metadata for the domain.
108 116

  
109 117
        The keys returned are all user-defined, except:
......
117 125

  
118 126
            'until_timestamp': Last modification until the timestamp provided
119 127

  
128
            'external_quota': The quota computed from external quota holder
129
                              mechanism
130

  
120 131
        Raises:
121 132
            NotAllowedError: Operation not permitted
122 133
        """
......
128 139
        Parameters:
129 140
            'domain': Metadata domain
130 141

  
142
        Keyword arguments:
131 143
            'meta': Dictionary with metadata to update
132 144

  
133 145
            'replace': Replace instead of update
......
138 150
        return
139 151

  
140 152
    def get_account_groups(self, user, account):
141
        """Return a dictionary with the user groups defined for this account.
153
        """Return a dictionary with the user groups defined for the account.
142 154

  
143 155
        Raises:
144 156
            NotAllowedError: Operation not permitted
......
161 173
        The keys returned are:
162 174
            'quota': The maximum bytes allowed (default is 0 - unlimited)
163 175

  
164
            'versioning': Can be 'auto', 'manual' or 'none' (default is 'manual')
176
            'versioning': Can be 'auto', 'manual' or 'none' (default: 'manual')
165 177

  
166 178
        Raises:
167 179
            NotAllowedError: Operation not permitted
......
198 210
        """
199 211
        return
200 212

  
201
    def list_containers(self, user, account, marker=None, limit=10000, shared=False, until=None, public=False):
213
    def list_containers(self, user, account, marker=None, limit=10000,
214
                        shared=False, until=None, public=False):
202 215
        """Return a list of container names existing under an account.
203 216

  
204
        Parameters:
217
        Keyword arguments:
205 218
            'marker': Start list from the next item after 'marker'
206 219

  
207 220
            'limit': Number of containers to return
......
216 229
        """
217 230
        return []
218 231

  
219
    def list_container_meta(self, user, account, container, domain, until=None):
220
        """Return a list with all the container's object meta keys for the domain.
232
    def list_container_meta(self, user, account, container, domain,
233
                            until=None):
234
        """Return a list of the container's object meta keys for a domain.
221 235

  
222 236
        Raises:
223 237
            NotAllowedError: Operation not permitted
......
226 240
        """
227 241
        return []
228 242

  
229
    def get_container_meta(self, user, account, container, domain, until=None, include_user_defined=True):
243
    def get_container_meta(self, user, account, container, domain, until=None,
244
                           include_user_defined=True):
230 245
        """Return a dictionary with the container metadata for the domain.
231 246

  
232 247
        The keys returned are all user-defined, except:
......
247 262
        """
248 263
        return {}
249 264

  
250
    def update_container_meta(self, user, account, container, domain, meta, replace=False):
265
    def update_container_meta(self, user, account, container, domain, meta,
266
                              replace=False):
251 267
        """Update the metadata associated with the container for the domain.
252 268

  
253 269
        Parameters:
254 270
            'domain': Metadata domain
255 271

  
272
        Keyword arguments:
256 273
            'meta': Dictionary with metadata to update
257 274

  
258 275
            'replace': Replace instead of update
......
270 287
        The keys returned are:
271 288
            'quota': The maximum bytes allowed (default is 0 - unlimited)
272 289

  
273
            'versioning': Can be 'auto', 'manual' or 'none' (default is 'manual')
290
            'versioning': Can be 'auto', 'manual' or 'none' (default: 'manual')
274 291

  
275 292
        Raises:
276 293
            NotAllowedError: Operation not permitted
......
279 296
        """
280 297
        return {}
281 298

  
282
    def update_container_policy(self, user, account, container, policy, replace=False):
299
    def update_container_policy(self, user, account, container, policy,
300
                                replace=False):
283 301
        """Update the policy associated with the container.
284 302

  
285 303
        Raises:
......
291 309
        """
292 310
        return
293 311

  
294
    def put_container(self, user, account, container, policy=None, delimiter=None):
312
    def put_container(self, user, account, container, policy=None):
295 313
        """Create a new container with the given name.
296 314

  
297
        Parameters:
298
            'delimiter': If present deletes container contents instead of the container
299

  
300 315
        Raises:
301 316
            NotAllowedError: Operation not permitted
302 317

  
......
306 321
        """
307 322
        return
308 323

  
309
    def delete_container(self, user, account, container, until=None):
324
    def delete_container(self, user, account, container, until=None,
325
                         delimiter=None):
310 326
        """Delete/purge the container with the given name.
311 327

  
328
        Keyword arguments:
329
            'delimiter': If not None, deletes the container contents starting
330
                         with the delimiter
331

  
312 332
        Raises:
313 333
            NotAllowedError: Operation not permitted
314 334

  
......
318 338
        """
319 339
        return
320 340

  
321
    def list_objects(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=None, shared=False, until=None, size_range=None, public=False):
322
        """Return a list of object (name, version_id) tuples existing under a container.
341
    def list_objects(self, user, account, container, prefix='', delimiter=None,
342
                     marker=None, limit=10000, virtual=True, domain=None,
343
                     keys=None, shared=False, until=None, size_range=None,
344
                     public=False):
345
        """List (object name, object version_id) under a container.
323 346

  
324
        Parameters:
347
        Keyword arguments:
325 348
            'prefix': List objects starting with 'prefix'
326 349

  
327
            'delimiter': Return unique names before 'delimiter' and after 'prefix'
350
            'delimiter': Return unique names before 'delimiter' and
351
                         after 'prefix'
328 352

  
329 353
            'marker': Start list from the next item after 'marker'
330 354

  
......
333 357
            'virtual': If not set, the result will only include names starting
334 358
                       with 'prefix' and ending without a 'delimiter' or with
335 359
                       the first occurance of the 'delimiter' after 'prefix'.
336
                       If set, the result will include all names after 'prefix',
360
                       If set, the result will include all names after 'prefix'
337 361
                       up to and including the 'delimiter' if it is found
338 362

  
339 363
            'domain': Metadata domain for keys
......
357 381
        """
358 382
        return []
359 383

  
360
    def list_object_meta(self, user, account, container, prefix='', delimiter=None, marker=None, limit=10000, virtual=True, domain=None, keys=None, shared=False, until=None, size_range=None):
361
        """Return a list of object metadata dicts existing under a container.
384
    def list_object_meta(self, user, account, container, prefix='',
385
                         delimiter=None, marker=None, limit=10000,
386
                         virtual=True, domain=None, keys=None, shared=False,
387
                         until=None, size_range=None, public=False):
388
        """Return a list of metadata dicts of objects under a container.
362 389

  
363 390
        Same parameters with list_objects. Returned dicts have no user-defined
364 391
        metadata and, if until is not None, a None 'modified' timestamp.
......
371 398
        return []
372 399

  
373 400
    def list_object_permissions(self, user, account, container, prefix=''):
374
        """Return a list of paths that enforce permissions under a container.
401
        """Return a list of paths enforce permissions under a container.
375 402

  
376 403
        Raises:
377 404
            NotAllowedError: Operation not permitted
......
379 406
        return []
380 407

  
381 408
    def list_object_public(self, user, account, container, prefix=''):
382
        """Return a dict mapping paths to public ids for objects that are public under a container."""
409
        """Return a mapping of object paths to public ids under a container."""
383 410
        return {}
384 411

  
385
    def get_object_meta(self, user, account, container, name, domain, version=None, include_user_defined=True):
412
    def get_object_meta(self, user, account, container, name, domain,
413
                        version=None, include_user_defined=True):
386 414
        """Return a dictionary with the object metadata for the domain.
387 415

  
388 416
        The keys returned are all user-defined, except:
......
396 424

  
397 425
            'modified': Last modification timestamp (overall)
398 426

  
399
            'modified_by': The user that committed the object (version requested)
427
            'modified_by': The user that committed the object
428
                           (version requested)
400 429

  
401 430
            'version': The version identifier
402 431

  
403 432
            'version_timestamp': The version's modification timestamp
404 433

  
405
            'uuid': A unique identifier that persists data or metadata updates and renames
434
            'uuid': A unique identifier that persists data or metadata updates
435
                    and renames
406 436

  
407 437
            'checksum': The MD5 sum of the object (may be empty)
408 438

  
......
415 445
        """
416 446
        return {}
417 447

  
418
    def update_object_meta(self, user, account, container, name, domain, meta, replace=False):
419
        """Update the metadata associated with the object for the domain and return the new version.
448
    def update_object_meta(self, user, account, container, name, domain, meta,
449
                           replace=False):
450
        """Update object metadata for a domain and return the new version.
420 451

  
421 452
        Parameters:
422 453
            'domain': Metadata domain
......
449 480
        """
450 481
        return {}
451 482

  
452
    def update_object_permissions(self, user, account, container, name, permissions):
483
    def update_object_permissions(self, user, account, container, name,
484
                                  permissions):
453 485
        """Update (set) the permissions associated with the object.
454 486

  
455 487
        Parameters:
......
499 531
        """
500 532
        return 0, []
501 533

  
502
    def update_object_hashmap(self, user, account, container, name, size, type, hashmap, checksum, domain, meta=None, replace_meta=False, permissions=None):
503
        """Create/update an object with the specified size and partial hashes and return the new version.
534
    def update_object_hashmap(self, user, account, container, name, size, type,
535
                              hashmap, checksum, domain, meta=None,
536
                              replace_meta=False, permissions=None):
537
        """Create/update an object's hashmap and return the new version.
504 538

  
505 539
        Parameters:
506 540
            'domain': Metadata domain
......
522 556
        """
523 557
        return ''
524 558

  
525
    def update_object_checksum(self, user, account, container, name, version, checksum):
559
    def update_object_checksum(self, user, account, container, name, version,
560
                               checksum):
526 561
        """Update an object's checksum."""
527 562
        return
528 563

  
529
    def copy_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta=None, replace_meta=False, permissions=None, src_version=None, delimiter=None):
564
    def copy_object(self, user, src_account, src_container, src_name,
565
                    dest_account, dest_container, dest_name, type, domain,
566
                    meta=None, replace_meta=False, permissions=None,
567
                    src_version=None, delimiter=None):
530 568
        """Copy an object's data and metadata and return the new version.
531 569

  
532 570
        Parameters:
533 571
            'domain': Metadata domain
534 572

  
535
            'meta': Dictionary with metadata to change from source to destination
573
            'meta': Dictionary with metadata to change from source
574
                    to destination
536 575

  
537 576
            'replace_meta': Replace metadata instead of update
538 577

  
......
540 579

  
541 580
            'src_version': Copy from the version provided
542 581

  
543
            'delimiter': Copy objects whose path starts with src_name + delimiter
582
            'delimiter': Copy objects whose path starts with
583
                         src_name + delimiter
544 584

  
545 585
        Raises:
546 586
            NotAllowedError: Operation not permitted
......
555 595
        """
556 596
        return ''
557 597

  
558
    def move_object(self, user, src_account, src_container, src_name, dest_account, dest_container, dest_name, type, domain, meta=None, replace_meta=False, permissions=None, delimiter=None):
598
    def move_object(self, user, src_account, src_container, src_name,
599
                    dest_account, dest_container, dest_name, type, domain,
600
                    meta=None, replace_meta=False, permissions=None,
601
                    delimiter=None):
559 602
        """Move an object's data and metadata and return the new version.
560 603

  
561 604
        Parameters:
562 605
            'domain': Metadata domain
563 606

  
564
            'meta': Dictionary with metadata to change from source to destination
607
            'meta': Dictionary with metadata to change from source
608
                    to destination
565 609

  
566 610
            'replace_meta': Replace metadata instead of update
567 611

  
568 612
            'permissions': New object permissions
569 613

  
570
            'delimiter': Move objects whose path starts with src_name + delimiter
614
            'delimiter': Move objects whose path starts with
615
                         src_name + delimiter
571 616

  
572 617
        Raises:
573 618
            NotAllowedError: Operation not permitted
......
580 625
        """
581 626
        return ''
582 627

  
583
    def delete_object(self, user, account, container, name, until=None, delimiter=None):
628
    def delete_object(self, user, account, container, name, until=None,
629
                      delimiter=None):
584 630
        """Delete/purge an object.
585 631

  
586 632
        Parameters:
587
            'delimiter': Delete objects whose path starting with name + delimiter
633
            'delimiter': Delete objects whose path starting with
634
                         name + delimiter
588 635

  
589 636
        Raises:
590 637
            NotAllowedError: Operation not permitted
......
594 641
        return
595 642

  
596 643
    def list_versions(self, user, account, container, name):
597
        """Return a list of all (version, version_timestamp) tuples for an object.
644
        """Return a list of all object (version, version_timestamp) tuples.
598 645

  
599 646
        Raises:
600 647
            NotAllowedError: Operation not permitted
......
640 687
            IndexError: Offset or data outside block limits
641 688
        """
642 689
        return 0
690

  
691
    def get_domain_objects(self, domain, user=None):
692
        """Return a list of tuples for objects under the domain.
693

  
694
        Parameters:
695
            'user': return only objects accessible to the user.
696
        """
b/snf-pithos-backend/pithos/backends/lib/hashfiler/blocker.py
31 31
# interpreted as representing official policies, either expressed
32 32
# or implied, of GRNET S.A.
33 33

  
34
from hashlib import new as newhasher
35
from binascii import hexlify
36

  
37 34
from fileblocker import FileBlocker
38 35

  
36

  
39 37
def intersect(a, b):
40 38
    """ return the intersection of two lists """
41 39
    return list(set(a) & set(b))
42 40

  
41

  
43 42
def union(a, b):
44 43
    """ return the union of two lists """
45 44
    return list(set(a) | set(b))
......
57 56
            if params['blockpool']:
58 57
                from radosblocker import RadosBlocker
59 58
                self.rblocker = RadosBlocker(**params)
60
	except KeyError:
59
        except KeyError:
61 60
            pass
62 61

  
63 62
        self.fblocker = FileBlocker(**params)
......
93 92
            (_, r_missing) = self.rblocker.block_stor(blocklist)
94 93
        return (hashes, union(r_missing, f_missing))
95 94

  
96

  
97 95
    def block_delta(self, blkhash, offset, data):
98 96
        """Construct and store a new block from a given block
99 97
           and a data 'patch' applied at offset. Return:
......
103 101
        r_existed = True
104 102
        (f_hash, f_existed) = self.fblocker.block_delta(blkhash, offset, data)
105 103
        if self.rblocker:
106
            (r_hash, r_existed) = self.rblocker.block_delta(blkhash, offset, data)
104
            (r_hash, r_existed) = self.rblocker.block_delta(blkhash, offset,
105
                                                            data)
107 106
        if not r_hash and not f_hash:
108 107
            return None, None
109 108
        if self.rblocker and not r_hash:
b/snf-pithos-backend/pithos/backends/lib/hashfiler/context_file.py
31 31
# interpreted as representing official policies, either expressed
32 32
# or implied, of GRNET S.A.
33 33

  
34
from os import SEEK_CUR, SEEK_SET, fsync
34
from os import SEEK_CUR, SEEK_SET
35 35
from errno import ENOENT, EROFS
36 36

  
37 37

  
......
64 64

  
65 65
    try:
66 66
        seek(offset * chunksize)
67
    except IOError, e:
67
    except IOError:
68 68
        seek = None
69 69
        for x in xrange(offset):
70 70
            fwrite(zeros(chunksize))
......
105 105
    seek = openfile.seek
106 106
    try:
107 107
        seek(remains)
108
    except IOError, e:
108
    except IOError:
109 109
        seek = None
110 110
        while 1:
111 111
            s = fread(remains)
......
177 177

  
178 178
    def sync_write_chunks(self, chunksize, offset, chunks, size=None):
179 179
        #self.dirty = 1
180
        return file_sync_write_chunks(self.fdesc, chunksize, offset, chunks, size)
180
        return file_sync_write_chunks(self.fdesc, chunksize, offset, chunks,
181
                                      size)
181 182

  
182 183
    def sync_read(self, size):
183 184
        read = self.fdesc.read
b/snf-pithos-backend/pithos/backends/lib/hashfiler/context_object.py
32 32
# or implied, of GRNET S.A.
33 33

  
34 34
from os import SEEK_CUR, SEEK_SET
35
from rados import Ioctx, ObjectNotFound
35
from rados import ObjectNotFound
36 36

  
37 37
_zeros = ''
38 38

  
......
80 80
        radosobject.sunc_write(zeros(chunksize))
81 81
    radosobject.sync_write(buffer(zeros(chunksize), 0, r))
82 82

  
83

  
83 84
def file_sync_read_chunks(radosobject, chunksize, nr, offset=0):
84 85
    """Read and yield groups of chunks from a buffered file object at offset.
85 86
       Reads never span accros chunksize boundaries.
......
101 102
        yield chunk
102 103
        nr -= 1
103 104

  
105

  
104 106
class RadosObject(object):
105 107
    __slots__ = ("name", "ioctx", "create", "offset")
106 108

  
......
144 146
        datalen = 0
145 147
        while 1:
146 148
            try:
147
                s = read(self.name, size-datalen, self.offset)
149
                s = read(self.name, size - datalen, self.offset)
148 150
            except ObjectNotFound:
149 151
                s = None
150 152
            if not s:
......
158 160

  
159 161
    def sync_read_chunks(self, chunksize, nr, offset=0):
160 162
        return file_sync_read_chunks(self, chunksize, nr, offset)
161

  
b/snf-pithos-backend/pithos/backends/lib/hashfiler/fileblocker.py
1 1
# Copyright 2011-2012 GRNET S.A. All rights reserved.
2
# 
2
#
3 3
# Redistribution and use in source and binary forms, with or
4 4
# without modification, are permitted provided that the following
5 5
# conditions are met:
6
# 
6
#
7 7
#   1. Redistributions of source code must retain the above
8 8
#      copyright notice, this list of conditions and the following
9 9
#      disclaimer.
10
# 
10
#
11 11
#   2. Redistributions in binary form must reproduce the above
12 12
#      copyright notice, this list of conditions and the following
13 13
#      disclaimer in the documentation and/or other materials
14 14
#      provided with the distribution.
15
# 
15
#
16 16
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17 17
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 18
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
......
25 25
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 26
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 27
# POSSIBILITY OF SUCH DAMAGE.
28
# 
28
#
29 29
# The views and conclusions contained in the software and
30 30
# documentation are those of the authors and should not be
31 31
# interpreted as representing official policies, either expressed
......
56 56
            if not exists(blockpath):
57 57
                makedirs(blockpath)
58 58
            else:
59
                raise ValueError("Variable blockpath '%s' is not a directory" % (blockpath,))
59
                raise ValueError("Variable blockpath '%s' is not a directory" %
60
                                 (blockpath,))
60 61

  
61 62
        hashtype = params['hashtype']
62 63
        try:
......
125 126
                if not rbl:
126 127
                    break
127 128
                for block in rbl.sync_read_chunks(blocksize, 1, 0):
128
                    break # there should be just one block there
129
                    break  # there should be just one block there
129 130
            if not block:
130 131
                break
131 132
            append(self._pad(block))
......
140 141
        """
141 142
        block_hash = self.block_hash
142 143
        hashlist = [block_hash(b) for b in blocklist]
143
        mf = None
144
        missing = [i for i, h in enumerate(hashlist) if not self._check_rear_block(h)]
144
        missing = [i for i, h in enumerate(hashlist) if not
145
                   self._check_rear_block(h)]
145 146
        for i in missing:
146 147
            with self._get_rear_block(hashlist[i], 1) as rbl:
147
                 rbl.sync_write(blocklist[i]) #XXX: verify?
148
                rbl.sync_write(blocklist[i])  # XXX: verify?
148 149

  
149 150
        return hashlist, missing
150 151

  
......
161 162
        block = self.block_retr((blkhash,))
162 163
        if not block:
163 164
            return None, None
164
        
165

  
165 166
        block = block[0]
166 167
        newblock = block[:offset] + data
167 168
        if len(newblock) > blocksize:
......
204 205
            sextend(sl)
205 206
            lastsize = len(block)
206 207

  
207
        size = (len(hashlist) -1) * blocksize + lastsize if hashlist else 0
208
        size = (len(hashlist) - 1) * blocksize + lastsize if hashlist else 0
208 209
        return size, hashlist, storedlist
209

  
b/snf-pithos-backend/pithos/backends/lib/hashfiler/filemapper.py
1 1
# Copyright 2011-2012 GRNET S.A. All rights reserved.
2
# 
2
#
3 3
# Redistribution and use in source and binary forms, with or
4 4
# without modification, are permitted provided that the following
5 5
# conditions are met:
6
# 
6
#
7 7
#   1. Redistributions of source code must retain the above
8 8
#      copyright notice, this list of conditions and the following
9 9
#      disclaimer.
10
# 
10
#
11 11
#   2. Redistributions in binary form must reproduce the above
12 12
#      copyright notice, this list of conditions and the following
13 13
#      disclaimer in the documentation and/or other materials
14 14
#      provided with the distribution.
15
# 
15
#
16 16
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17 17
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 18
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
......
25 25
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 26
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 27
# POSSIBILITY OF SUCH DAMAGE.
28
# 
28
#
29 29
# The views and conclusions contained in the software and
30 30
# documentation are those of the authors and should not be
31 31
# interpreted as representing official policies, either expressed
32 32
# or implied, of GRNET S.A.
33 33

  
34
from os import makedirs, unlink
34
from os import makedirs
35 35
from os.path import isdir, realpath, exists, join
36 36
from binascii import hexlify
37 37

  
......
42 42
    """Mapper.
43 43
       Required constructor parameters: mappath, namelen.
44 44
    """
45
    
45

  
46 46
    mappath = None
47 47
    namelen = None
48 48

  
......
54 54
            if not exists(mappath):
55 55
                makedirs(mappath)
56 56
            else:
57
                raise ValueError("Variable mappath '%s' is not a directory" % (mappath,))
57
                raise ValueError("Variable mappath '%s' is not a directory" %
58
                                 (mappath,))
58 59
        self.mappath = mappath
59 60

  
60 61
    def _get_rear_map(self, maphash, create=0):
......
91 92
            return
92 93
        with self._get_rear_map(maphash, 1) as rmap:
93 94
            rmap.sync_write_chunks(namelen, blkoff, hashes, None)
94

  
b/snf-pithos-backend/pithos/backends/lib/hashfiler/mapper.py
31 31
# interpreted as representing official policies, either expressed
32 32
# or implied, of GRNET S.A.
33 33

  
34
from binascii import hexlify
35

  
36 34
from filemapper import FileMapper
37 35

  
36

  
38 37
class Mapper(object):
39 38
    """Mapper.
40 39
       Required constructor parameters: mappath, namelen.
......
47 46
            if params['mappool']:
48 47
                from radosmapper import RadosMapper
49 48
                self.rmap = RadosMapper(**params)
50
	except KeyError:
49
        except KeyError:
51 50
            pass
52 51

  
53 52
        self.fmap = FileMapper(**params)
b/snf-pithos-backend/pithos/backends/lib/hashfiler/radosblocker.py
37 37

  
38 38
from context_object import RadosObject, file_sync_read_chunks
39 39

  
40
CEPH_CONF_FILE="/etc/ceph/ceph.conf"
40
CEPH_CONF_FILE = "/etc/ceph/ceph.conf"
41

  
41 42

  
42 43
class RadosBlocker(object):
43 44
    """Blocker.
......
123 124
                if not rbl:
124 125
                    break
125 126
                for block in rbl.sync_read_chunks(blocksize, 1, 0):
126
                    break # there should be just one block there
127
                    break  # there should be just one block there
127 128
            if not block:
128 129
                break
129 130
            append(self._pad(block))
......
138 139
        """
139 140
        block_hash = self.block_hash
140 141
        hashlist = [block_hash(b) for b in blocklist]
141
        mf = None
142
        missing = [i for i, h in enumerate(hashlist) if not self._check_rear_block(h)]
142
        missing = [i for i, h in enumerate(hashlist) if not
143
                   self._check_rear_block(h)]
143 144
        for i in missing:
144 145
            with self._get_rear_block(hashlist[i], 1) as rbl:
145
                 rbl.sync_write(blocklist[i]) #XXX: verify?
146
                rbl.sync_write(blocklist[i])  # XXX: verify?
146 147

  
147 148
        return hashlist, missing
148 149

  
......
202 203
            sextend(sl)
203 204
            lastsize = len(block)
204 205

  
205
        size = (len(hashlist) -1) * blocksize + lastsize if hashlist else 0
206
        size = (len(hashlist) - 1) * blocksize + lastsize if hashlist else 0
206 207
        return size, hashlist, storedlist
207

  
b/snf-pithos-backend/pithos/backends/lib/hashfiler/radosmapper.py
33 33

  
34 34
from binascii import hexlify
35 35

  
36
from context_object import RadosObject, file_sync_read_chunks
36
from context_object import RadosObject
37 37
from rados import *
38 38

  
39
CEPH_CONF_FILE="/etc/ceph/ceph.conf"
39
CEPH_CONF_FILE = "/etc/ceph/ceph.conf"
40

  
40 41

  
41 42
class RadosMapper(object):
42 43
    """Mapper.
......
91 92
            return
92 93
        with self._get_rear_map(maphash, 1) as rmap:
93 94
            rmap.sync_write_chunks(namelen, blkoff, hashes, None)
94

  
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/__init__.py
32 32
# or implied, of GRNET S.A.
33 33

  
34 34
from dbwrapper import DBWrapper
35
from node import Node, ROOTNODE, SERIAL, HASH, SIZE, TYPE, MTIME, MUSER, UUID, CHECKSUM, CLUSTER, MATCH_PREFIX, MATCH_EXACT
35
from node import (Node, ROOTNODE, SERIAL, HASH, SIZE, TYPE, MTIME, MUSER, UUID,
36
                  CHECKSUM, CLUSTER, MATCH_PREFIX, MATCH_EXACT)
36 37
from permissions import Permissions, READ, WRITE
37 38
from config import Config
38 39
from quotaholder_serials import QuotaholderSerial
39 40

  
40 41
__all__ = ["DBWrapper",
41
           "Node", "ROOTNODE", "SERIAL", "HASH", "SIZE", "TYPE", "MTIME", "MUSER", "UUID", "CHECKSUM", "CLUSTER", "MATCH_PREFIX", "MATCH_EXACT",
42
           "Permissions", "READ", "WRITE", "Config", "QuotaholderSerial"]
42
           "Node", "ROOTNODE", "SERIAL", "HASH", "SIZE", "TYPE", "MTIME",
43
           "MUSER", "UUID", "CHECKSUM", "CLUSTER", "MATCH_PREFIX",
44
           "MATCH_EXACT", "Permissions", "READ", "WRITE", "Config",
45
           "QuotaholderSerial"]
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/alembic/versions/165ba3fbfe53_update_path_account.py
11 11
down_revision = '3dd56e750a3'
12 12

  
13 13
from alembic import op
14
from sqlalchemy.sql import table, column, literal, and_
14
from sqlalchemy.sql import table, column, and_
15 15

  
16 16
from pithos.api.settings import (SERVICE_TOKEN, ASTAKOS_BASE_URL)
17 17

  
......
35 35
import sqlalchemy as sa
36 36

  
37 37
catalog = {}
38

  
39

  
38 40
def get_uuid(account):
39 41
    global catalog
40 42
    if account in catalog:
......
52 54
        return catalog[account]
53 55

  
54 56
inverse_catalog = {}
57

  
58

  
55 59
def get_displayname(account):
56 60
    global inverse_catalog
57 61
    if account in inverse_catalog:
......
92 96
    column('path', sa.String(2048))
93 97
)
94 98

  
95
xvals =  table(
99
xvals = table(
96 100
    'xfeaturevals',
97 101
    column('feature_id', sa.Integer),
98 102
    column('key', sa.Integer),
99 103
    column('value', sa.String(256))
100 104
)
101 105

  
102
g =  table(
106
g = table(
103 107
    'groups',
104 108
    column('owner', sa.String(256)),
105 109
    column('name', sa.String(256)),
106 110
    column('member', sa.String(256))
107 111
)
108 112

  
113

  
109 114
def migrate(callback):
110 115
    connection = op.get_bind()
111 116

  
......
119 124
            bar.next()
120 125
            continue
121 126
        path = sep.join([match, rest])
122
        u = n.update().where(n.c.node == node).values({'path':path})
127
        u = n.update().where(n.c.node == node).values({'path': path})
123 128
        connection.execute(u)
124 129
        bar.next()
125 130
    bar.finish()
......
127 132
    s = sa.select([v.c.muser]).distinct()
128 133
    musers = connection.execute(s).fetchall()
129 134
    bar = IncrementalBar('Migrating version modification users...',
130
                         max=len(musers)
131
    )
135
                         max=len(musers))
132 136
    for muser, in musers:
133 137
        match = callback(muser)
134 138
        if not match:
......
149 153
            bar.next()
150 154
            continue
151 155
        path = sep.join([match, rest])
152
        u = p.update().where(p.c.public_id == id).values({'path':path})
156
        u = p.update().where(p.c.public_id == id).values({'path': path})
153 157
        connection.execute(u)
154 158
        bar.next()
155 159
    bar.finish()
......
164 168
            bar.next()
165 169
            continue
166 170
        path = sep.join([match, rest])
167
        u = x.update().where(x.c.feature_id == id).values({'path':path})
171
        u = x.update().where(x.c.feature_id == id).values({'path': path})
168 172
        connection.execute(u)
169 173
        bar.next()
170 174
    bar.finish()
......
182 186
            continue
183 187
        new_value = sep.join([match, group])
184 188
        u = xvals.update()
185
        u = u.where(and_(
186
                xvals.c.feature_id == feature_id,
187
                xvals.c.key == key,
188
                xvals.c.value == value))
189
        u = u.values({'value':new_value})
189
        u = u.where(and_(xvals.c.feature_id == feature_id,
190
                         xvals.c.key == key,
191
                         xvals.c.value == value))
192
        u = u.values({'value': new_value})
190 193
        connection.execute(u)
191 194
        bar.next()
192 195
    bar.finish()
......
214 217
            bar.next()
215 218
    bar.finish()
216 219

  
220

  
217 221
def upgrade():
218 222
    migrate(get_uuid)
219 223

  
224

  
220 225
def downgrade():
221 226
    migrate(get_displayname)
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/alembic/versions/27381099d477_alter_public_add_col.py
16 16
from pithos.backends.modular import ULTIMATE_ANSWER
17 17
from pithos.api.short_url import encode_url
18 18

  
19

  
19 20
def upgrade():
20 21
    op.add_column('public', sa.Column('url', sa.String(2048)))
21 22
    op.create_unique_constraint('idx_public_url', 'public', ['url'])
......
31 32
    s = sa.select([p.c.public_id])
32 33
    rows = conn.execute(s).fetchall()
33 34
    for r in rows:
34
        s = p.update().values(url=get_url(r[0])).where(p.c.public_id==r[0])
35
        s = p.update().values(url=get_url(r[0])).where(p.c.public_id == r[0])
35 36
        op.execute(s)
36 37

  
38

  
37 39
def downgrade():
38 40
    op.drop_constraint('idx_public_url', 'public')
39 41
    op.drop_column('public', 'url')
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/alembic/versions/2a309a9a3438_fix_statistics_negat.py
24 24
    u = st.update().where(st.c.population < 0).values({'population': 0})
25 25
    op.execute(u)
26 26

  
27

  
27 28
def downgrade():
28 29
    pass
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/config.py
37 37

  
38 38
from dbworker import DBWorker
39 39

  
40

  
40 41
def create_tables(engine):
41 42
    metadata = MetaData()
42 43
    columns = []
43 44
    columns.append(Column('key', String(256), primary_key=True))
44 45
    columns.append(Column('value', String(256)))
45
    config = Table('config', metadata, *columns, mysql_engine='InnoDB')
46
    
46
    Table('config', metadata, *columns, mysql_engine='InnoDB')
47

  
47 48
    metadata.create_all(engine)
48 49
    return metadata.sorted_tables
49 50

  
51

  
50 52
class Config(DBWorker):
51 53
    """Config are properties holding persistent information about system state.
52 54
    """
......
71 73
        if row:
72 74
            return row[0]
73 75
        return None
74
    
76

  
75 77
    def set_value(self, key, value):
76 78
        """Set a configuration entry.
77 79
        """
......
80 82
        r = self.conn.execute(s, key=key, value=value)
81 83
        inserted_primary_key = r.inserted_primary_key[0]
82 84
        r.close()
83
        return inserted_primary_key
85
        return inserted_primary_key
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/dbworker.py
33 33

  
34 34
ESCAPE_CHAR = '@'
35 35

  
36

  
36 37
class DBWorker(object):
37 38
    """Database connection handler."""
38 39

  
......
44 45
        self.engine = wrapper.engine
45 46

  
46 47
    def escape_like(self, s, escape_char=ESCAPE_CHAR):
47
        return (
48
                s
49
                .replace(escape_char, escape_char * 2)
50
                .replace('%', escape_char + '%')
51
                .replace('_', escape_char + '_')
52
        )
48
        return (s.replace(escape_char, escape_char * 2).
49
                replace('%', escape_char + '%').
50
                replace('_', escape_char + '_'))
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/dbwrapper.py
56 56
        else:
57 57
            #self.engine = create_engine(db, pool_size=0, max_overflow=-1)
58 58
            self.engine = create_engine(
59
                db, poolclass=NullPool, isolation_level='READ COMMITTED'
60
        )
59
                db, poolclass=NullPool, isolation_level='READ COMMITTED')
61 60
        self.engine.echo = False
62 61
        self.engine.echo_pool = False
63 62
        self.conn = self.engine.connect()
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/groups.py
148 148
    def group_check(self, owner, group, member):
149 149
        """Check if a member is in a group."""
150 150

  
151
        s = select([self.groups.c.member], and_(self.groups.c.owner == owner,
152
                                                self.groups.c.name == group,
153
                                                self.groups.c.member == member))
151
        s = select([self.groups.c.member],
152
                   and_(self.groups.c.owner == owner,
153
                        self.groups.c.name == group,
154
                        self.groups.c.member == member))
154 155
        r = self.conn.execute(s)
155 156
        l = r.fetchone()
156 157
        r.close()
b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py
37 37

  
38 38
from sqlalchemy import (Table, Integer, BigInteger, DECIMAL, Boolean,
39 39
                        Column, String, MetaData, ForeignKey)
40
from sqlalchemy.types import Text
41
from sqlalchemy.schema import Index, Sequence
42
from sqlalchemy.sql import func, and_, or_, not_, null, select, bindparam, text, exists
43
from sqlalchemy.ext.compiler import compiles
44
from sqlalchemy.engine.reflection import Inspector
40
from sqlalchemy.schema import Index
41
from sqlalchemy.sql import func, and_, or_, not_, select, bindparam, exists
45 42
from sqlalchemy.exc import NoSuchTableError
46 43

  
47 44
from dbworker import DBWorker, ESCAPE_CHAR
......
135 132
                          primary_key=True))
136 133
    columns.append(Column('key', String(128), primary_key=True))
137 134
    columns.append(Column('value', String(256)))
138
    policy = Table('policy', metadata, *columns, mysql_engine='InnoDB')
135
    Table('policy', metadata, *columns, mysql_engine='InnoDB')
139 136

  
140 137
    #create statistics table
141 138
    columns = []
......
149 146
    columns.append(Column('mtime', DECIMAL(precision=16, scale=6)))
150 147
    columns.append(Column('cluster', Integer, nullable=False, default=0,
151 148
                          primary_key=True, autoincrement=False))
152
    statistics = Table('statistics', metadata, *columns, mysql_engine='InnoDB')
149
    Table('statistics', metadata, *columns, mysql_engine='InnoDB')
153 150

  
154 151
    #create versions table
155 152
    columns = []
......
282 279
    def node_get_versions(self, node, keys=(), propnames=_propnames):
283 280
        """Return the properties of all versions at node.
284 281
           If keys is empty, return all properties in the order
285
           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
282
           (serial, node, hash, size, type, source, mtime, muser, uuid,
283
            checksum, cluster).
286 284
        """
287 285

  
288 286
        s = select([self.versions.c.serial,
......
306 304
        if not keys:
307 305
            return rows
308 306

  
309
        return [[p[propnames[k]] for k in keys if k in propnames] for p in rows]
307
        return [[p[propnames[k]] for k in keys if k in propnames] for
308
                p in rows]
310 309

  
311 310
    def node_count_children(self, node):
312 311
        """Return node's child count."""
......
367 366
        s = select([self.nodes.c.node],
368 367
                   and_(self.nodes.c.parent == parent,
369 368
                        select([func.count(self.versions.c.serial)],
370
                               self.versions.c.node == self.nodes.c.node).as_scalar() == 0))
369
                               self.versions.c.node == self.nodes.c.node).
370
                        as_scalar() == 0))
371 371
        rp = self.conn.execute(s)
372
        nodes = [r[0] for r in rp.fetchall()]
372
        nodes = [row[0] for row in rp.fetchall()]
373 373
        rp.close()
374 374
        if nodes:
375 375
            s = self.nodes.delete().where(self.nodes.c.node.in_(nodes))
......
423 423
        s = select([self.nodes.c.node],
424 424
                   and_(self.nodes.c.node == node,
425 425
                        select([func.count(self.versions.c.serial)],
426
                               self.versions.c.node == self.nodes.c.node).as_scalar() == 0))
427
        rp= self.conn.execute(s)
428
        nodes = [r[0] for r in rp.fetchall()]
426
                               self.versions.c.node == self.nodes.c.node).
427
                        as_scalar() == 0))
428
        rp = self.conn.execute(s)
429
        nodes = [row[0] for row in rp.fetchall()]
429 430
        rp.close()
430 431
        if nodes:
431 432
            s = self.nodes.delete().where(self.nodes.c.node.in_(nodes))
......
554 555

  
555 556
        #insert or replace
556 557
        #TODO better upsert
557
        u = self.statistics.update().where(and_(self.statistics.c.node == node,
558
                                           self.statistics.c.cluster == cluster))
558
        u = self.statistics.update().where(and_(
559
            self.statistics.c.node == node,
560
            self.statistics.c.cluster == cluster))
559 561
        u = u.values(population=population, size=size, mtime=mtime)
560 562
        rp = self.conn.execute(u)
561 563
        rp.close()
......
693 695
        """
694 696

  
695 697
        mtime = time()
696
        s = self.versions.insert(
697
        ).values(node=node, hash=hash, size=size, type=type, source=source,
698
                 mtime=mtime, muser=muser, uuid=uuid, checksum=checksum, cluster=cluster)
698
        s = self.versions.insert().values(
699
            node=node, hash=hash, size=size, type=type, source=source,
700
            mtime=mtime, muser=muser, uuid=uuid, checksum=checksum,
701
            cluster=cluster)
699 702
        serial = self.conn.execute(s).inserted_primary_key[0]
700 703
        self.statistics_update_ancestors(node, 1, size, mtime, cluster,
701 704
                                         update_statistics_ancestors_depth)
......
736 739
            return props
737 740
        return None
738 741

  
739
    def version_lookup_bulk(self, nodes, before=inf, cluster=0, all_props=True):
742
    def version_lookup_bulk(self, nodes, before=inf, cluster=0,
743
                            all_props=True):
740 744
        """Lookup the current versions of the given nodes.
741 745
           Return a list with their properties:
742
           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
746
           (serial, node, hash, size, type, source, mtime, muser, uuid,
747
            checksum, cluster).
743 748
        """
744 749
        if not nodes:
745 750
            return ()
......
771 776
        """Return a sequence of values for the properties of
772 777
           the version specified by serial and the keys, in the order given.
773 778
           If keys is empty, return all properties in the order
774
           (serial, node, hash, size, type, source, mtime, muser, uuid, checksum, cluster).
779
           (serial, node, hash, size, type, source, mtime, muser, uuid,
780
            checksum, cluster).
775 781
        """
776 782

  
777 783
        v = self.versions.alias()
......
848 854
        return hash, size
849 855

  
850 856
    def attribute_get(self, serial, domain, keys=()):
851
        """Return a list of (key, value) pairs of the version specified by serial.
852
           If keys is empty, return all attributes.
853
           Othwerise, return only those specified.
857
        """Return a list of (key, value) pairs of the specific version.
858

  
859
        If keys is empty, return all attributes.
860
        Othwerise, return only those specified.
854 861
        """
855 862

  
856 863
        if keys:
......
938 945
    def attribute_unset_is_latest(self, node, exclude):
939 946
        u = self.attributes.update().where(and_(
940 947
            self.attributes.c.node == node,
941
                     self.attributes.c.serial != exclude)).values(
942
                             {'is_latest': False})
948
            self.attributes.c.serial != exclude)).values({'is_latest': False})
943 949
        self.conn.execute(u)
944 950

  
945
    def latest_attribute_keys(self, parent, domain, before=inf, except_cluster=0, pathq=None):
951
    def latest_attribute_keys(self, parent, domain, before=inf,
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff