Statistics
| Branch: | Tag: | Revision:

root / kamaki / clients / pithos.py @ 699d3bb1

History | View | Annotate | Download (25.2 kB)

1
# Copyright 2011-2012 GRNET S.A. All rights reserved.
2
#
3
# Redistribution and use in source and binary forms, with or
4
# without modification, are permitted provided that the following
5
# conditions are met:
6
#
7
#   1. Redistributions of source code must retain the above
8
#      copyright notice, this list of conditions and the following
9
#      disclaimer.
10
#
11
#   2. Redistributions in binary form must reproduce the above
12
#      copyright notice, this list of conditions and the following
13
#      disclaimer in the documentation and/or other materials
14
#      provided with the distribution.
15
#
16
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
20
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
# POSSIBILITY OF SUCH DAMAGE.
28
#
29
# The views and conclusions contained in the software and
30
# documentation are those of the authors and should not be
31
# interpreted as representing official policies, either expressed
32
# or implied, of GRNET S.A.
33

    
34
import gevent
35
import gevent.monkey
36
# Monkey-patch everything for gevent early on
37
gevent.monkey.patch_all()
38
import gevent.pool
39

    
40
from os import fstat, path
41
from hashlib import new as newhashlib
42
from time import time, sleep
43
from datetime import datetime
44
import sys
45

    
46
from binascii import hexlify
47
from .pithos_sh_lib.hashmap import HashMap
48

    
49
from .pithos_rest_api import PithosRestAPI
50
from .storage import ClientError
51
from .utils import path4url, filter_in
52

    
53
def pithos_hash(block, blockhash):
54
    h = newhashlib(blockhash)
55
    h.update(block.rstrip('\x00'))
56
    return h.hexdigest()
57

    
58
class PithosClient(PithosRestAPI):
59
    """GRNet Pithos API client"""
60

    
61
    def __init__(self, base_url, token, account=None, container = None):
62
        super(PithosClient, self).__init__(base_url, token, account = account,
63
            container = container)
64
        self.async_pool = None
65

    
66
    def purge_container(self):
67
        self.container_delete(until=unicode(time()))
68
        
69
    def upload_object_unchunked(self, obj, f, withHashFile = False, size=None, etag=None,
70
        content_encoding=None, content_disposition=None, content_type=None, sharing=None,
71
        public=None):
72
        # This is a naive implementation, it loads the whole file in memory
73
        #Look in pithos for a nice implementation
74
        self.assert_container()
75

    
76
        if withHashFile:
77
            data = f.read()
78
            try:
79
                import json
80
                data = json.dumps(json.loads(data))
81
            except ValueError:
82
                raise ClientError(message='"%s" is not json-formated'%f.name, status=1)
83
            except SyntaxError:
84
                raise ClientError(message='"%s" is not a valid hashmap file'%f.name, status=1)
85
            from StringIO import StringIO
86
            f = StringIO(data)
87
        data = f.read(size) if size is not None else f.read()
88
        self.object_put(obj, data=data, etag=etag, content_encoding=content_encoding,
89
            content_disposition=content_disposition, content_type=content_type, permitions=sharing,
90
            public=public, success=201)
91
        
92
    def put_block_async(self, data, hash):
93
        class SilentGreenlet(gevent.Greenlet):
94
            def _report_error(self, exc_info):
95
                _stderr = None
96
                try:
97
                    _stderr = sys._stderr
98
                    sys.stderr = StringIO()
99
                    gevent.Greenlet._report_error(self, exc_info)
100
                finally:
101
                    sys.stderr = _stderr
102
        POOL_SIZE = 5
103
        if self.async_pool is None:
104
            self.async_pool = gevent.pool.Pool(size=POOL_SIZE)
105
        g = SilentGreenlet(self.put_block, data, hash)
106
        self.async_pool.start(g)
107
        return g
108

    
109
    def put_block(self, data, hash):
110
        r = self.container_post(update=True, content_type='application/octet-stream',
111
            content_length=len(data), data=data, format='json')
112
        assert r.json[0] == hash, 'Local hash does not match server'
113
        
114

    
115
    def create_object_by_manifestation(self, obj, etag=None, content_encoding=None,
116
        content_disposition=None, content_type=None, sharing=None, public=None):
117
        self.assert_container()
118
        obj_content_type = 'application/octet-stream' if content_type is None else content_type
119
        self.object_put(obj, content_length=0, etag=etag, content_encoding=content_encoding,
120
            content_disposition=content_disposition, content_type=content_type, permitions=sharing,
121
            public=public, manifest='%s/%s'%(self.container,obj))
122
       
123
    #upload_* auxiliary methods 
124
    def _get_file_block_info(self, fileobj, size=None):
125
        meta = self.get_container_info()
126
        blocksize = int(meta['x-container-block-size'])
127
        blockhash = meta['x-container-block-hash']
128
        size = size if size is not None else fstat(fileobj.fileno()).st_size
129
        nblocks = 1 + (size - 1) // blocksize
130
        return (blocksize, blockhash, size, nblocks)
131

    
132
    def _get_missing_hashes(self, obj, json, size=None, format='json', hashmap=True,
133
        content_type=None, etag=None, content_encoding=None, content_disposition=None,
134
        permitions=None, public=None, success=(201, 409)):
135
        r = self.object_put(obj, format='json', hashmap=True, content_type=content_type,
136
            json=json, etag=etag, content_encoding=content_encoding,
137
            content_disposition=content_disposition, permitions=permitions, public=public,
138
            success=success)
139
        if r.status_code == 201:
140
            return None
141
        return r.json
142

    
143
    def _caclulate_uploaded_blocks(self, blocksize, blockhash, size, nblocks, hashes, hmap, fileobj,
144
        hash_cb=None):
145
        offset=0
146
        if hash_cb:
147
            hash_gen = hash_cb(nblocks)
148
            hash_gen.next()
149

    
150
        for i in range(nblocks):
151
            block = fileobj.read(min(blocksize, size - offset))
152
            bytes = len(block)
153
            hash = pithos_hash(block, blockhash)
154
            hashes.append(hash)
155
            hmap[hash] = (offset, bytes)
156
            offset += bytes
157
            if hash_cb:
158
                hash_gen.next()
159
        assert offset == size
160

    
161
    def _upload_missing_blocks(self, missing, hmap, fileobj, upload_cb=None):
162
        """upload missing blocks asynchronously in a pseudo-parallel fashion (greenlets)
163
        """
164
        if upload_cb:
165
            upload_gen = upload_cb(len(missing))
166
            upload_gen.next()
167

    
168
        flying = []
169
        for hash in missing:
170
            offset, bytes = hmap[hash]
171
            fileobj.seek(offset)
172
            data = fileobj.read(bytes)
173
            r = self.put_block_async(data, hash)
174
            flying.append(r)
175
            for r in flying:
176
                if r.ready():
177
                    if r.exception:
178
                        raise r.exception
179
                    if upload_cb:
180
                        upload_gen.next()
181
            flying = [r for r in flying if not r.ready()]
182
        while upload_cb:
183
            try:
184
                upload_gen.next()
185
            except StopIteration:
186
                break
187
        gevent.joinall(flying)
188

    
189
    def upload_object(self, obj, f, size=None, hash_cb=None, upload_cb=None, etag=None,
190
        content_encoding=None, content_disposition=None, content_type=None, sharing=None,
191
        public=None):
192
        self.assert_container()
193

    
194
        #init
195
        block_info = (blocksize, blockhash, size, nblocks) = self._get_file_block_info(f, size)
196
        (hashes, hmap, offset) = ([], {}, 0)
197
        content_type = 'application/octet-stream' if content_type is None else content_type
198

    
199
        self._caclulate_uploaded_blocks(*block_info, hashes=hashes, hmap=hmap, fileobj=f,
200
            hash_cb=hash_cb)
201

    
202
        hashmap = dict(bytes=size, hashes=hashes)
203
        missing = self._get_missing_hashes(obj, hashmap, content_type=content_type, size=size,
204
            etag=etag, content_encoding=content_encoding, content_disposition=content_disposition,
205
            permitions=sharing, public=public)
206

    
207
        if missing is None:
208
            return
209
        self._upload_missing_blocks(missing, hmap, f, upload_cb=upload_cb)
210

    
211
        self.object_put(obj, format='json', hashmap=True, content_type=content_type, 
212
            json=hashmap, success=201)
213
      
214
    #download_* auxiliary methods
215
    def _get_object_block_info(self,obj, **kwargs):
216
        #retrieve object hashmap
217
        hashmap = self.get_object_hashmap(obj, **kwargs)
218
        blocksize = int(hashmap['block_size'])
219
        blockhash = hashmap['block_hash']
220
        total_size = hashmap['bytes']
221
        hmap = hashmap['hashes']
222
        map_dict = {}
223
        for h in hmap:
224
            map_dict[h] = True
225
        return (blocksize, blockhash, total_size, hmap, map_dict)
226

    
227
    def _get_range_limits(self, range):
228
        try:
229
            (custom_start, custom_end) = range.split('-')
230
            (custom_start, custom_end) = (int(custom_start), int(custom_end))
231
        except ValueError:
232
            raise ClientError(message='Invalid range string', status=601)
233
        if custom_start > custom_end or custom_start < 0:
234
            raise ClientError(message='Negative range', status=601)
235
        elif custom_start == custom_end:
236
            return
237
        elif custom_end > total_size:
238
            raise ClientError(message='Range exceeds file size', status=601)
239
        return (custom_start, custom_end)
240

    
241
    def _get_downloaded_blocks(self, hmap, fileobj, blocksize, blockhash, map_dict,
242
        overide=False, download_gen=None):
243
        if fileobj.isatty() or not path.exists(fileobj.name):
244
            return {}
245
        h = HashMap(blocksize, blockhash)
246
        with_progress_bar = False if download_gen is None else True
247
        h.load(fileobj, with_progress_bar)
248
        resumed = {}
249
        for i, x in enumerate(h):
250
            existing_hash = hexlify(x)
251
            if existing_hash in map_dict:
252
        #resume if some blocks have been downloaded
253
                resumed[existing_hash] = i
254
                if with_progress_bar:
255
                    download_gen.next()
256
            elif not overide:
257
                raise ClientError(message='Local file is substantialy different',
258
                    status=600)
259
        return resumed
260

    
261
    def _get_block_range(self, blockid, blocksize, total_size, custom_start, custom_end):
262
        start = blockid*blocksize
263
        if custom_start is not None:
264
            if start < custom_start:
265
                start = custom_start
266
            elif start > custom_end:
267
                return (None, None)
268
        end = start + blocksize -1 if start+blocksize < total_size else total_size -1
269
        if custom_end is not None and end > custom_end:
270
            end = custom_end
271
        return (start, end)
272

    
273
    def _manage_finished_downloading_greenlets(self, flying, objfile, sleeptime=0):
274
        newflying = []
275
        for v in flying:
276
            h = v['handler']
277
            if h.ready():
278
                if h.exception:
279
                    h.release()
280
                    raise h.exception
281
                objfile.seek(v['start'])
282
                objfile.write(h.value.content)
283
                objfile.flush()
284
            else:
285
                #if there are unfinished greenlets, sleep for some time - be carefull with that
286
                sleep(sleeptime)
287
                newflying.append(v)
288
        return newflying
289

    
290
    def _get_block(self, obj, **kwargs):
291
        return self.object_get(obj, success=(200, 206), binary=True, **kwargs)
292

    
293
    def _get_block_async(self, obj, **kwargs):
294
        class SilentGreenlet(gevent.Greenlet):
295
            def _report_error(self, exc_info):
296
                _stderr = sys._stderr
297
                try:
298
                    sys.stderr = StringIO()
299
                    gevent.Greenlet._report_error(self, exc_info)
300
                finally:
301
                    sys.stderr = _stderr
302
        POOL_SIZE =5
303
        if self.async_pool is None:
304
            self.async_pool = gevent.pool.Pool(size=POOL_SIZE)
305
        g = SilentGreenlet(self._get_block, obj, **kwargs)
306
        self.async_pool.start(g)
307
        return g
308
    def _async_download_missing_blocks(self, obj, objfile, hmap, resumed, blocksize, total_size, 
309
        download_gen=None, custom_start = None, custom_end=None, **restargs):
310
        """Attempt pseudo-multithreaded (with greenlets) download of blocks, or if that
311
        is not possible retreat to sequensial block download
312
        """
313

    
314
        flying = []
315
        for i, h in enumerate(hmap):
316
            if h in resumed:
317
                continue
318
            if download_gen:
319
                try:
320
                    download_gen.next()
321
                except StopIteration:
322
                    pass
323
            (start, end) = self._get_block_range(i, blocksize, total_size, custom_start, custom_end)
324
            if start is None:
325
                continue
326
            data_range = 'bytes=%s-%s'%(start, end)
327
            handler = self._get_block_async(obj, data_range=data_range, **restargs)
328
            flying.append({'handler':handler, 'start':start, 'data_range':data_range})
329
            flying = self._manage_finished_downloading_greenlets(flying, objfile)
330
                               
331
        #write the last results and exit
332
        while len(flying) > 0:
333
            flying=self._manage_finished_downloading_greenlets(flying, objfile, sleeptime=0.1)
334
        objfile.truncate(total_size)
335

    
336
        gevent.joinall(flying)
337

    
338
    def _append_missing_blocks(self, obj, objfile, hmap, resumed, blocksize, total_size,
339
        download_gen=None, custom_start=None, custom_end=None, **restargs):
340
        for i, h in enumerate(hmap):
341
            if h in resumed:
342
                continue
343
            if download_gen:
344
                try:
345
                    download_gen.next()
346
                except StopIteration:
347
                    pass
348
            (start, end) = self._get_block_range(i, blocksize, total_size, custom_start, custom_end)
349
            data_range = 'bytes=%s-%s'%(start, end)
350
            r = self._get_block(obj, data_range=data_range, **restargs)
351
            objfile.write(r.content)
352
            objfile.flush() 
353

    
354
    def download_object(self, obj, objfile, download_cb=None, version=None, overide=False, range=None,
355
        if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
356
        """overide is forcing the local file to become exactly as the remote, even if it is
357
        substantialy different
358
        """
359

    
360
        self.assert_container()
361

    
362
        (blocksize, blockhash, total_size, hmap, map_dict) = self._get_object_block_info(obj,
363
            version=version, if_match=if_match, if_none_match=if_none_match,
364
            if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since)
365

    
366
        if total_size <= 0:
367
            return
368

    
369
        (custom_start, custom_end) = (None, None) if range is None \
370
            else self._get_range_limits(range)
371

    
372
        #load progress bar
373
        if download_cb is not None:
374
            download_gen = download_cb(total_size/blocksize + 1)
375
            download_gen.next()
376

    
377
        resumed = self._get_downloaded_blocks(hmap, objfile, blocksize, blockhash, map_dict,
378
            overide=overide, download_gen=download_gen)
379
        restargs=dict(version=version, if_etag_match=if_match, if_etag_not_match=if_none_match,
380
            if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since)
381

    
382
        if objfile.isatty():
383
            self._append_missing_blocks(obj, objfile, hmap, resumed, blocksize, total_size,
384
                download_gen, custom_start=custom_start, custom_end=custom_end, **restargs)
385
        else:
386
            self._async_download_missing_blocks(obj, objfile, hmap, resumed, blocksize, total_size,
387
                download_gen, custom_start=custom_start, custom_end=custom_end, **restargs)
388

    
389

    
390
    def get_object_hashmap(self, obj, version=None, if_match=None, if_none_match=None,
391
        if_modified_since=None, if_unmodified_since=None):
392
        try:
393
            r = self.object_get(obj, hashmap=True, version=version, if_etag_match=if_match,
394
                if_etag_not_match=if_none_match, if_modified_since=if_modified_since,
395
                if_unmodified_since=if_unmodified_since)
396
        except ClientError as err:
397
            
398
            if err.status == 304 or err.status == 412:
399
                return {}
400
            raise
401
        return r.json
402

    
403
    def set_account_group(self, group, usernames):
404
        self.account_post(update=True, groups = {group:usernames})
405

    
406
    def del_account_group(self, group):
407
        self.account_post(update=True, groups={group:[]})
408

    
409
    def get_account_info(self, until=None):
410
        r = self.account_head(until=until)
411
        if r.status_code == 401:
412
            raise ClientError("No authorization")
413
        return r.headers
414

    
415
    def get_account_quota(self):
416
        return filter_in(self.get_account_info(), 'X-Account-Policy-Quota', exactMatch = True)
417

    
418
    def get_account_versioning(self):
419
        return filter_in(self.get_account_info(), 'X-Account-Policy-Versioning', exactMatch = True)
420

    
421
    def get_account_meta(self, until=None):
422
        return filter_in(self.get_account_info(until = until), 'X-Account-Meta-')
423

    
424
    def get_account_group(self):
425
        return filter_in(self.get_account_info(), 'X-Account-Group-')
426

    
427
    def set_account_meta(self, metapairs):
428
        assert(type(metapairs) is dict)
429
        self.account_post(update=True, metadata=metapairs)
430

    
431
    def del_account_meta(self, metakey):
432
        self.account_post(update=True, metadata={metakey:''})
433

    
434
    def set_account_quota(self, quota):
435
        self.account_post(update=True, quota=quota)
436

    
437
    def set_account_versioning(self, versioning):
438
        self.account_post(update=True, versioning = versioning)
439

    
440
    def list_containers(self):
441
        r = self.account_get()
442
        return r.json
443

    
444
    def del_container(self, until=None, delimiter=None):
445
        self.assert_container()
446
        r = self.container_delete(until=until, delimiter=delimiter, success=(204, 404, 409))
447
        if r.status_code == 404:
448
            raise ClientError('Container "%s" does not exist'%self.container, r.status_code)
449
        elif r.status_code == 409:
450
            raise ClientError('Container "%s" is not empty'%self.container, r.status_code)
451

    
452
    def get_container_versioning(self, container):
453
        self.container = container
454
        return filter_in(self.get_container_info(), 'X-Container-Policy-Versioning')
455

    
456
    def get_container_quota(self, container):
457
        self.container = container
458
        return filter_in(self.get_container_info(), 'X-Container-Policy-Quota')
459

    
460
    def get_container_info(self, until = None):
461
        r = self.container_head(until=until)
462
        return r.headers
463

    
464
    def get_container_meta(self, until = None):
465
        return filter_in(self.get_container_info(until=until), 'X-Container-Meta')
466

    
467
    def get_container_object_meta(self, until = None):
468
        return filter_in(self.get_container_info(until=until), 'X-Container-Object-Meta')
469

    
470
    def set_container_meta(self, metapairs):
471
        assert(type(metapairs) is dict)
472
        self.container_post(update=True, metadata=metapairs)
473
        
474
    def del_container_meta(self, metakey):
475
        self.container_post(update=True, metadata={metakey:''})
476

    
477
    def set_container_quota(self, quota):
478
        self.container_post(update=True, quota=quota)
479

    
480
    def set_container_versioning(self, versioning):
481
        self.container_post(update=True, versioning=versioning)
482

    
483
    def del_object(self, obj, until=None, delimiter=None):
484
        self.assert_container()
485
        self.object_delete(obj, until=until, delimiter=delimiter)
486

    
487
    def set_object_meta(self, object, metapairs):
488
        assert(type(metapairs) is dict)
489
        self.object_post(object, update=True, metadata=metapairs)
490

    
491
    def del_object_meta(self, metakey, object):
492
        self.object_post(object, update=True, metadata={metakey:''})
493

    
494
    def publish_object(self, object):
495
        self.object_post(object, update=True, public=True)
496

    
497
    def unpublish_object(self, object):
498
        self.object_post(object, update=True, public=False)
499

    
500
    def get_object_info(self, obj, version=None):
501
        r = self.object_head(obj, version=version)
502
        return r.headers
503

    
504
    def get_object_meta(self, obj, version=None):
505
        return filter_in(self.get_object_info(obj, version=version), 'X-Object-Meta')
506

    
507
    def get_object_sharing(self, object):
508
        r = filter_in(self.get_object_info(object), 'X-Object-Sharing', exactMatch = True)
509
        reply = {}
510
        if len(r) > 0:
511
            perms = r['x-object-sharing'].split(';')
512
            for perm in perms:
513
                try:
514
                    perm.index('=')
515
                except ValueError:
516
                    raise ClientError('Incorrect reply format')
517
                (key, val) = perm.strip().split('=')
518
                reply[key] = val
519
        return reply
520

    
521
    def set_object_sharing(self, object, read_permition = False, write_permition = False):
522
        """Give read/write permisions to an object.
523
           @param object is the object to change sharing permitions onto
524
           @param read_permition is a list of users and user groups that get read permition for this object
525
                False means all previous read permitions will be removed
526
           @param write_perimition is a list of users and user groups to get write permition for this object
527
                False means all previous read permitions will be removed
528
        """
529
        perms = {}
530
        perms['read'] = read_permition if isinstance(read_permition, list) else ''
531
        perms['write'] = write_permition if isinstance(write_permition, list) else ''
532
        self.object_post(object, update=True, permitions=perms)
533

    
534
    def del_object_sharing(self, object):
535
        self.set_object_sharing(object)
536

    
537
    def append_object(self, object, source_file, upload_cb = None):
538
        """@param upload_db is a generator for showing progress of upload
539
            to caller application, e.g. a progress bar. Its next is called
540
            whenever a block is uploaded
541
        """
542
        self.assert_container()
543
        meta = self.get_container_info()
544
        blocksize = int(meta['x-container-block-size'])
545
        filesize = fstat(source_file.fileno()).st_size
546
        nblocks = 1 + (filesize - 1)//blocksize
547
        offset = 0
548
        if upload_cb is not None:
549
            upload_gen = upload_cb(nblocks)
550
        for i in range(nblocks):
551
            block = source_file.read(min(blocksize, filesize - offset))
552
            offset += len(block)
553
            self.object_post(object, update=True, content_range='bytes */*',
554
                content_type='application/octet-stream', content_length=len(block), data=block)
555
            
556
            if upload_cb is not None:
557
                upload_gen.next()
558

    
559
    def truncate_object(self, object, upto_bytes):
560
        self.object_post(object, update=True, content_range='bytes 0-%s/*'%upto_bytes,
561
            content_type='application/octet-stream', object_bytes=upto_bytes,
562
            source_object=path4url(self.container, object))
563

    
564
    def overwrite_object(self, object, start, end, source_file, upload_cb=None):
565
        """Overwrite a part of an object with given source file
566
           @start the part of the remote object to start overwriting from, in bytes
567
           @end the part of the remote object to stop overwriting to, in bytes
568
        """
569
        self.assert_container()
570
        meta = self.get_container_info()
571
        blocksize = int(meta['x-container-block-size'])
572
        filesize = fstat(source_file.fileno()).st_size
573
        datasize = int(end) - int(start) + 1
574
        nblocks = 1 + (datasize - 1)//blocksize
575
        offset = 0
576
        if upload_cb is not None:
577
            upload_gen = upload_cb(nblocks)
578
        for i in range(nblocks):
579
            block = source_file.read(min(blocksize, filesize - offset, datasize - offset))
580
            offset += len(block)
581
            self.object_post(object, update=True, content_type='application/octet-stream', 
582
                content_length=len(block), content_range='bytes %s-%s/*'%(start,end), data=block)
583
            
584
            if upload_cb is not None:
585
                upload_gen.next()
586

    
587
    def copy_object(self, src_container, src_object, dst_container, dst_object=False,
588
        source_version = None, public=False, content_type=None, delimiter=None):
589
        self.assert_account()
590
        self.container = dst_container
591
        dst_object = dst_object or src_object
592
        src_path = path4url(src_container, src_object)
593
        self.object_put(dst_object, success=201, copy_from=src_path, content_length=0,
594
            source_version=source_version, public=public, content_type=content_type,
595
            delimiter=delimiter)
596

    
597
    def move_object(self, src_container, src_object, dst_container, dst_object=False,
598
        source_version = None, public=False, content_type=None, delimiter=None):
599
        self.assert_account()
600
        self.container = dst_container
601
        dst_object = dst_object or src_object
602
        src_path = path4url(src_container, src_object)
603
        self.object_put(dst_object, success=201, move_from=src_path, content_length=0,
604
            source_version=source_version, public=public, content_type=content_type,
605
            delimiter=delimiter)