Revision f6c0005f

b/tools/lib/migrate.py
34 34
# or implied, of GRNET S.A.
35 35

  
36 36
from sqlalchemy import create_engine
37
from sqlalchemy import Table, MetaData
37
from sqlalchemy import Table, Column, String, MetaData
38
from sqlalchemy.sql import select
38 39

  
39 40
from django.conf import settings
40 41

  
......
51 52
        self.backend = ModularBackend(*options)
52 53
    
53 54
    def execute(self):
54
        pass
55
        pass
56

  
57
class Cache():
58
    def __init__(self, db):
59
        self.engine = create_engine(db)
60
        metadata = MetaData(self.engine)
61
        
62
        columns=[]
63
        columns.append(Column('path', String(2048), primary_key=True))
64
        columns.append(Column('hash', String(255)))
65
        self.files = Table('files', metadata, *columns)
66
        self.conn = self.engine.connect()
67
        self.engine.echo = True
68
        metadata.create_all(self.engine)
69
    
70
    def put(self, path, hash):
71
        # Insert or replace.
72
        s = self.files.delete().where(self.files.c.path==path)
73
        r = self.conn.execute(s)
74
        r.close()
75
        s = self.files.insert()
76
        r = self.conn.execute(s, {'path': path, 'hash': hash})
77
        r.close()
78
    
79
    def get(self, path):
80
        s = select([self.files.c.hash], self.files.c.path == path)
81
        r = self.conn.execute(s)
82
        l = r.fetchone()
83
        r.close()
84
        if not l:
85
            return l
86
        return l[0]
b/tools/migrate-data
35 35

  
36 36
from binascii import hexlify
37 37

  
38
from sqlalchemy import create_engine
39
from sqlalchemy import Table, Column, String, MetaData
38
from sqlalchemy import Table
40 39
from sqlalchemy.sql import select
41 40

  
42 41
from pithos import settings
43 42
from pithos.backends.modular import ModularBackend
44 43

  
45 44
from lib.hashmap import HashMap
46
from lib.migrate import Migration
45
from lib.migrate import Migration, Cache
47 46

  
47
import os
48
    
48 49
class DataMigration(Migration):
49 50
    def __init__(self, pithosdb, db):
50 51
        Migration.__init__(self,  pithosdb)
51
        # XXX Need more columns for primary key - last modified timestamp...
52
        engine = create_engine(db)
53
        metadata = MetaData(engine)
54
        
55
        columns=[]
56
        columns.append(Column('path', String(2048), primary_key=True))
57
        columns.append(Column('hash', String(255)))
58
        self.files = Table('files', metadata, *columns)
59
        metadata.create_all(engine)
60
    
61
    def cache_put(self, path, hash):
62
        # Insert or replace.
63
        s = self.files.delete().where(self.files.c.path==path)
64
        r = self.conn.execute(s)
65
        r.close()
66
        s = self.files.insert()
67
        r = self.conn.execute(s, {'path': path, 'hash': hash})
68
        r.close()
52
        self.cache = Cache(db)
69 53
    
70
    def cache_get(self, path):
71
        s = select([self.files.c.hash], self.files.c.path == path)
72
        r = self.conn.execute(s)
73
        l = r.fetchone()
74
        r.close()
75
        if not l:
76
            return l
77
        return l[0]
78
    
79
    def execute(self):
80
        blocksize = self.backend.block_size
81
        blockhash = self.backend.hash_algorithm
82
        
54
    def retrieve_files(self):
83 55
        # Loop for all available files.
84 56
        filebody = Table('filebody', self.metadata, autoload=True)
85 57
        s = select([filebody.c.storedfilepath])
86 58
        rp = self.conn.execute(s)
87
        paths = rp.fetchall()
59
        path = rp.fetchone()
60
        while path:
61
            yield path
62
            path = rp.fetchone()
88 63
        rp.close()
64
    
65
    def execute(self):
66
        blocksize = self.backend.block_size
67
        blockhash = self.backend.hash_algorithm
89 68
        
90
        for path in paths:
69
        for (path,) in self.retrieve_files():
91 70
            map = HashMap(blocksize, blockhash)
92
            map.load(path)
71
            try:
72
                map.load(open(path))
73
            except Exception, e:
74
                print e
75
                continue
93 76
            hash = hexlify(map.hash())
94 77
            
95
            if hash != self.cache_get(path):
78
            if hash != self.cache.get(path):
96 79
                missing = self.backend.blocker.block_ping(map) # XXX Backend hack...
97 80
                status = '[>] ' + path
98 81
                if missing:
......
105 88
                            self.backend.put_block(block)
106 89
                else:
107 90
                    status += ' - no blocks missing'
108
                self.cache_put(path, hash)
91
                self.cache.put(path, hash)
109 92
            else:
110 93
                status = '[-] ' + path
111 94
            print status
b/tools/migrate-db
1
#!/usr/bin/env python
2

  
3
# Copyright 2011 GRNET S.A. All rights reserved.
4
# 
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
# 
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
# 
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
# 
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
# 
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35

  
36
from sqlalchemy import Table
37
from sqlalchemy.sql import select
38

  
39
from binascii import hexlify
40

  
41
from pithos.backends.lib.hashfiler import Blocker
42
from pithos.backends.lib.sqlalchemy import Node
43
from pithos.aai.models import PithosUser
44

  
45
from django.conf import settings
46

  
47
from pithos.backends.modular import CLUSTER_NORMAL, CLUSTER_HISTORY, CLUSTER_DELETED
48
from pithos.backends.lib.sqlalchemy.node import Node
49

  
50
from lib.transfer import upload
51
from lib.hashmap import HashMap, file_read_iterator
52
from lib.client import Fault
53
from lib.migrate import Migration, Cache
54
from calendar import timegm
55

  
56
import json
57
import os
58
import sys
59
import hashlib
60
import mimetypes
61

  
62
class ObjectMigration(Migration):
63
    def __init__(self, old_db, db, f):
64
        Migration.__init__(self, old_db)
65
        self.cache = Cache(db)
66
    
67
    def create_node(self, username, container, object, filepath, mimetype):
68
        obj = ''
69
        path = '/'.join(object.split('/')[:-1])
70
        name =  object.split('/')[-1]
71
        #create directory markers
72
        for f in path.split('/'):
73
            obj = '%s/%s' %(obj, f) if obj else f
74
            try:
75
                md5 = hashlib.md5()
76
                meta = {'Content-Type':'application/directory',
77
                        'hash':  md5.hexdigest().lower()}
78
                self.backend.update_object_hashmap(username, username, container, obj, 0, [], meta) 
79
            except NameError, e:
80
                pass
81
        
82
        parent_path = '%s/%s' %(username, container)
83
        parent_node = self.backend.node.node_lookup(parent_path)
84
        path = '%s/%s' %(parent_path, object)
85
        nodeid = self.backend.node.node_create(parent_node, path)
86
        return nodeid
87
    
88
    def create_history(self, header_id, node_id, deleted=False):
89
        i = 0
90
        map = HashMap(self.backend.block_size, self.backend.hash_algorithm)
91
        for t, rowcount  in self.retrieve_node_versions(header_id):
92
            size, modyfied_by, filepath, mimetype, modificationdate = t
93
            cluster = CLUSTER_HISTORY if i < rowcount - 1 else CLUSTER_NORMAL
94
            cluster = cluster if not deleted else CLUSTER_DELETED
95
            hash = self.cache.get(filepath)
96
            if hash == None:
97
                raise Exception("Missing hash") 
98
            args = (node_id, hash, size, None, modyfied_by, cluster)
99
            serial = self.backend.node.version_create(*args)[0]
100
            meta = {'hash':hash,
101
                    'content-type':mimetype}
102
            self.backend.node.attribute_set(serial, ((k, v) for k, v in meta.iteritems()))
103
            timestamp = timegm(modificationdate.timetuple())
104
            microseconds = modificationdate.time().microsecond
105
            f.write('update versions set mtime=\'%10d.%6d\' where serial=%s;' %(timestamp, microseconds, serial))
106
            i += 1
107
    
108
    def create_metadata(self, header_id, node_id):
109
        for t in self.retrieve_metadata(header_id):
110
            pass
111
    
112
    def create_objects(self):
113
        for username, headerid, folderid, filename, deleted, filepath, mimetype in self.retrieve_current_nodes():
114
            path = self.retrieve_path(folderid)[1:]
115
            container = 'pithos' if not deleted else 'trash'
116
            
117
            #create container if it does not exist
118
            try:
119
                self.backend._lookup_container(username, container)
120
            except NameError:
121
                self.backend.put_container(username, username, container) 
122
            
123
            #create node
124
            object = '%s/%s' %(path, filename)
125
            nodeid = self.create_node(username, container, object, filepath, mimetype)
126
            
127
            #create node history
128
            self.create_history(headerid, nodeid, deleted)
129
            
130
            self.create_metadata(headerid, nodeid)
131
            #self.set_public()
132
            #self.statistics()
133
            #self.set_permissions()
134
    
135
    def retrieve_path(self, child_id):
136
        folderTable = Table('folder', self.metadata, autoload=True)
137
        s = select([folderTable.c.parent_id, folderTable.c.name])
138
        s = s.where(folderTable.c.id == child_id)
139
        rp = self.conn.execute(s)
140
        parent_id, foldername = rp.fetchone()
141
        if not parent_id:
142
            return ''
143
        else:
144
            return '%s/%s' %(self.retrieve_path(parent_id), foldername)
145
    
146
    def retrieve_current_nodes(self):
147
        fileheader = Table('fileheader', self.metadata, autoload=True)
148
        filebody = Table('filebody', self.metadata, autoload=True)
149
        folder = Table('folder', self.metadata, autoload=True)
150
        gss_user = Table('gss_user', self.metadata, autoload=True)
151
        j = filebody.join(fileheader, filebody.c.id == fileheader.c.currentbody_id)
152
        j = j.join(folder, fileheader.c.folder_id == folder.c.id)
153
        j = j.join(gss_user, fileheader.c.owner_id == gss_user.c.id)
154
        s = select([gss_user.c.username,  fileheader.c.id, fileheader.c.folder_id,
155
                    fileheader.c.name,  fileheader.c.deleted, filebody.c.storedfilepath,
156
                    filebody.c.mimetype], from_obj=j)
157
        s = s.limit(1)
158
        rp = self.conn.execute(s)
159
        object = rp.fetchone()
160
        while object:
161
            yield object
162
            object = rp.fetchone()
163
        rp.close()
164
    
165
    def retrieve_node_versions(self, header_id):
166
        filebody = Table('filebody', self.metadata, autoload=True)
167
        gss_user = Table('gss_user', self.metadata, autoload=True)
168
        j = filebody.join(gss_user, filebody.c.modifiedby_id == gss_user.c.id)
169
        s = select([filebody.c.filesize, gss_user.c.username,
170
                    filebody.c.storedfilepath, filebody.c.mimetype,
171
                    filebody.c.modificationdate], from_obj=j)
172
        s = s.where(filebody.c.header_id == header_id)
173
        s = s.order_by(filebody.c.version)
174
        rp = self.conn.execute(s)
175
        version = rp.fetchone()
176
        while version:
177
            yield version, rp.rowcount
178
            version = rp.fetchone()
179
        rp.close()
180
    
181
    def retrieve_metadata(self, header_id):
182
        filetag = Table('filetag', self.metadata, autoload=True)
183
        s = filetag.select(filetag.c.fileid == header_id)
184
        rp = self.conn.execute(s)
185
        tag = rp.fetchone()
186
        while tag:
187
            yield tag
188
            tag = tp.fetchone()
189
        rp.close()
190
    
191
    def handle_deleted(self):
192
        pass
193

  
194
if __name__ == "__main__":
195
    old_db = ''
196
    db = ''
197
    
198
    f = open('fixdates.sql', 'w')
199
    ot = ObjectMigration(old_db, db, f)
200
    ot.create_objects()
201
    f.close()
202
    
203
    
b/tools/migrate-users
1
#!/usr/bin/env python
2

  
3
# Copyright 2011 GRNET S.A. All rights reserved.
4
# 
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
# 
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
# 
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
# 
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
# 
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35

  
36
from lib.migrate import Migration
37

  
38
from sqlalchemy import Table
39
from pithos.aai.models import PithosUser
40

  
41
import base64
42

  
43
class UserMigration(Migration):
44
    def __init__(self, db):
45
        Migration.__init__(self, db)
46
        self.gss_users = Table('gss_user', self.metadata, autoload=True)
47
    
48
    def execute(self):
49
        s = self.gss_users.select()
50
        users = self.conn.execute(s).fetchall()
51
        l = []
52
        for u in users:
53
            user = PithosUser()
54
            user.pk = u['id']
55
            user.uniq = u['username']
56
            user.realname = u['name']
57
            user.is_admin = False
58
            user.affiliation = u['homeorganization'] if u['homeorganization'] else ''
59
            user.auth_token = base64.b64encode(u['authtoken'])
60
            user.auth_token_created = u['creationdate']
61
            user.auth_token_expires = u['authtokenexpirydate']
62
            user.created = u['creationdate']
63
            user.updated = u['modificationdate']
64
            print '#', user
65
            user.save(update_timestamps=False)
66

  
67
if __name__ == "__main__":
68
    db = 'postgresql://gss@localhost/pithos'
69
    m = UserMigration(db)
70
    m.execute()
/dev/null
1
#!/usr/bin/env python
2

  
3
# Copyright 2011 GRNET S.A. All rights reserved.
4
# 
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
# 
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
# 
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
# 
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
# 
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35

  
36
from sqlalchemy import Table
37
from sqlalchemy.sql import select
38

  
39
from binascii import hexlify
40

  
41
from pithos.backends.lib.hashfiler import Blocker
42
from pithos.backends.lib.sqlalchemy import Node
43
from pithos.aai.models import PithosUser
44

  
45
from django.conf import settings
46

  
47
from pithos.backends.modular import CLUSTER_NORMAL, CLUSTER_HISTORY, CLUSTER_DELETED
48
from pithos.backends.lib.sqlalchemy.node import Node
49
from pithos.backends.lib.sqlalchemy.dbwrapper import DBWrapper
50

  
51
from lib.transfer import upload
52
from lib.hashmap import HashMap, file_read_iterator
53
from lib.client import Fault
54
from lib.migrate import Migration
55

  
56
import json
57
import os
58
import sys
59
import hashlib
60
import mimetypes
61

  
62
class ObjectMigration(Migration):
63
    def __init__(self, old_db):
64
        Migration.__init__(self, old_db)
65
        self.wrapper = ClientWrapper(self.backend)
66
        params = {'wrapper': DBWrapper(self.backend.db)}
67
        self.node = Node(**params)
68
        
69
    def create_default_containers(self):
70
        users = PithosUser.objects.all()
71
        for u in users:
72
            print '#', u.uniq
73
            try:
74
                self.wrapper.create_container('pithos', u.uniq)
75
                self.wrapper.create_container('trash', u.uniq)
76
            except NameError, e:
77
                pass
78
    
79
    def get_path(self, child_id):
80
        folderTable = Table('folder', self.metadata, autoload=True)
81
        s = select([folderTable.c.parent_id, folderTable.c.name])
82
        s = s.where(folderTable.c.id == child_id)
83
        rp = self.conn.execute(s)
84
        parent_id, foldername = rp.fetchone()
85
        if not parent_id:
86
            return ''
87
        else:
88
            return '%s/%s' %(self.get_path(parent_id), foldername)
89
    
90
    def create_object(self, username, container, object, filepath, mimetype):
91
        obj = ''
92
        path = '/'.join(object.split('/')[:-1])
93
        name =  object.split('/')[-1]
94
        #create directory markers
95
        for f in path.split('/'):
96
            obj = '%s/%s' %(obj, f) if obj else f
97
            try:
98
                self.wrapper.create_directory_marker(container, obj, username)
99
            except NameError, e:
100
                pass
101
        self.wrapper.set_account(username)
102
                
103
        prefix = '%s/' %path if path else ''
104
        print '#', filepath, container, prefix, name, mimetype
105
        return upload(self.wrapper, filepath, container, prefix, name, mimetype)
106
    
107
    def create_history(self, user, header_id, node_id, deleted=False):
108
        filebody = Table('filebody', self.metadata, autoload=True)
109
        gss_user = Table('gss_user', self.metadata, autoload=True)
110
        j = filebody.join(gss_user, filebody.c.modifiedby_id == gss_user.c.id)
111
        s = select([filebody.c.filesize, gss_user.c.username], from_obj=j)
112
        s = s.where(filebody.c.header_id == header_id)
113
        s = s.order_by(filebody.c.version)
114
        rp = self.conn.execute(s)
115
        versions = rp.fetchall()
116
        print '#', len(versions)
117
        rp.close()
118
        i = 0
119
        for size, modyfied_by  in versions:
120
            cluster = CLUSTER_HISTORY if i < len(versions) - 1 else CLUSTER_NORMAL
121
            cluster = cluster if not deleted else CLUSTER_DELETED
122
            args = (node_id, size, None, modyfied_by, cluster)
123
            self.node.version_create(*args)
124
            i += 1
125
    
126
    def create_objects(self):
127
        fileheader = Table('fileheader', self.metadata, autoload=True)
128
        filebody = Table('filebody', self.metadata, autoload=True)
129
        folder = Table('folder', self.metadata, autoload=True)
130
        gss_user = Table('gss_user', self.metadata, autoload=True)
131
        j = filebody.join(fileheader, filebody.c.id == fileheader.c.currentbody_id)
132
        j = j.join(folder, fileheader.c.folder_id == folder.c.id)
133
        j = j.join(gss_user, fileheader.c.owner_id == gss_user.c.id)
134
        s = select([gss_user.c.username,  fileheader.c.id, fileheader.c.folder_id,
135
                    fileheader.c.name,  fileheader.c.deleted, filebody.c.storedfilepath,
136
                    filebody.c.mimetype], from_obj=j)
137
        rp = self.conn.execute(s)
138
        objects = rp.fetchall()
139
        for username, headerid, folderid, filename, deleted, filepath, mimetype in objects:
140
            path = self.get_path(folderid)[1:]
141
            container = 'pithos' if not deleted else 'trash'
142
            object = '%s/%s' %(path, filename)
143
            #filepath = '/Users/butters/Downloads/torvalds-linux-0f86267'
144
            vserial = self.create_object(username, container, object, filepath, mimetype)
145
            nodeid = self.node.version_get_properties(vserial, keys=('node',))[0]
146
            self.create_history(username, headerid, nodeid, deleted)
147
            self.node.version_remove(vserial)
148
            #self.set_metadata()
149
            #self.set_public()
150
            #self.statistics()
151
            #self.set_permissions()
152
    
153
    def handle_deleted(self):
154
        pass
155
    
156
    def upload_dir(self, dir, prefix, user, container):
157
        for f in os.listdir(dir):
158
            fullpath = '%s/%s' %(dir, f)
159
            if os.path.isfile(fullpath):
160
                type = mimetypes.guess_type(fullpath)[0]
161
                name = '/'.join(fullpath.split(prefix)[1:])
162
                print '@', user, container, name, fullpath, type
163
                self.create_object(user, container, name, fullpath, type)
164
            else: self.upload_dir(fullpath, prefix, user, container)
165

  
166
class ClientWrapper(object):
167
    """Wraps client methods used by transfer.upload()
168
    to ModularBackend methods"""
169
    
170
    def __init__(self, backend):
171
        self.backend = backend
172
        self.block_size = self.backend.block_size
173
        self.block_hash = self.backend.hash_algorithm
174
    
175
    def set_account(self, account):
176
        self.account = account
177
    
178
    def create_container(self, container, account=None, **meta):
179
        self.backend.put_container(account, account, container, meta)
180
    
181
    def create_directory_marker(self, container, object, account=None):
182
        md5 = hashlib.md5()
183
        meta = {'Content-Type':'application/directory',
184
                'hash':  md5.hexdigest().lower()}
185
        self.backend.update_object_hashmap(account, account, container, object, 0, [], meta)   
186
    
187
    def create_object_by_hashmap(self, container, object, map, mimetype=None):
188
        hashmap = HashMap(self.block_size, self.block_hash)
189
        for h in map['hashes']:
190
            hashmap.append(h)
191
        meta = {'hash':hexlify(hashmap.hash())}
192
        if mimetype:
193
            meta['content-type'] = mimetype
194
        size = map['bytes']
195
        try:
196
            args = [self.account, self.account, container, object, size,  map['hashes'], meta]
197
            return self.backend.update_object_hashmap(*args)
198
        except IndexError, ie:
199
            fault = Fault(ie.data, 409)
200
            raise fault
201
    
202
    def update_container_data(self, container, f):
203
        #just put the blocks
204
        for block in file_read_iterator(f, self.block_size):
205
            self.backend.put_block(block)
206
    
207
    def retrieve_container_metadata(self, container):
208
        return {'x-container-block-size':self.block_size,
209
                'x-container-block-hash':self.block_hash}
210

  
211
if __name__ == "__main__":
212
    old_db = ''
213
    
214
    ot = ObjectMigration(old_db)
215
    #ot.create_default_containers()
216
    #ot.create_objects()
217
    
218
    p = ''
219
    ot.upload_dir(p, p, 'chstath', 'linux')
220
    
221
    
/dev/null
1
#!/usr/bin/env python
2

  
3
# Copyright 2011 GRNET S.A. All rights reserved.
4
# 
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
# 
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
# 
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
# 
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
# 
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35

  
36
from lib.migrate import Migration
37

  
38
from sqlalchemy import Table
39
from pithos.aai.models import PithosUser
40

  
41
import base64
42

  
43
class UserMigration(Migration):
44
    def __init__(self, db):
45
        Migration.__init__(self, db)
46
        self.gss_users = Table('gss_user', self.metadata, autoload=True)
47
    
48
    def execute(self):
49
        s = self.gss_users.select()
50
        users = self.conn.execute(s).fetchall()
51
        l = []
52
        for u in users:
53
            user = PithosUser()
54
            user.pk = u['id']
55
            user.uniq = u['username']
56
            user.realname = u['name']
57
            user.is_admin = False
58
            user.affiliation = u['homeorganization'] if u['homeorganization'] else ''
59
            user.auth_token = base64.b64encode(u['authtoken'])
60
            user.auth_token_created = u['creationdate']
61
            user.auth_token_expires = u['authtokenexpirydate']
62
            user.created = u['creationdate']
63
            user.updated = u['modificationdate']
64
            print '#', user
65
            user.save(update_timestamps=False)
66

  
67
if __name__ == "__main__":
68
    db = 'postgresql://gss:m0ust@rda@62.217.112.56/pithos'
69
    m = UserMigration(db)
70
    m.execute()

Also available in: Unified diff