Package web2py :: Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module web2py.gluon.dal

   1  #!/bin/env python 
   2  # -*- coding: utf-8 -*- 
   3   
   4  """ 
   5  This file is part of the web2py Web Framework 
   6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
   7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
   8   
   9  Thanks to 
  10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
  11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
  12      * Denes 
  13      * Chris Clark 
  14      * clach05 
  15      * Denes Lengyel 
  16      * and many others who have contributed to current and previous versions 
  17   
  18  This file contains the DAL support for many relational databases, 
  19  including: 
  20  - SQLite 
  21  - MySQL 
  22  - Postgres 
  23  - Oracle 
  24  - MS SQL 
  25  - DB2 
  26  - Interbase 
  27  - Ingres 
  28  - SapDB (experimental) 
  29  - Cubrid (experimental) 
  30  - CouchDB (experimental) 
  31  - MongoDB (in progress) 
  32  - Google:nosql 
  33  - Google:sql 
  34   
  35  Example of usage: 
  36   
  37  >>> # from dal import DAL, Field 
  38   
  39  ### create DAL connection (and create DB if not exists) 
  40  >>> db=DAL(('mysql://a:b@locahost/x','sqlite://storage.sqlite'),folder=None) 
  41   
  42  ### define a table 'person' (create/aster as necessary) 
  43  >>> person = db.define_table('person',Field('name','string')) 
  44   
  45  ### insert a record 
  46  >>> id = person.insert(name='James') 
  47   
  48  ### retrieve it by id 
  49  >>> james = person(id) 
  50   
  51  ### retrieve it by name 
  52  >>> james = person(name='James') 
  53   
  54  ### retrieve it by arbitrary query 
  55  >>> query = (person.name=='James')&(person.name.startswith('J')) 
  56  >>> james = db(query).select(person.ALL)[0] 
  57   
  58  ### update one record 
  59  >>> james.update_record(name='Jim') 
  60   
  61  ### update multiple records by query 
  62  >>> db(person.name.like('J%')).update(name='James') 
  63  1 
  64   
  65  ### delete records by query 
  66  >>> db(person.name.lower()=='jim').delete() 
  67  0 
  68   
  69  ### retrieve multiple records (rows) 
  70  >>> people = db(person).select(orderby=person.name,groupby=person.name,limitby=(0,100)) 
  71   
  72  ### further filter them 
  73  >>> james = people.find(lambda row: row.name=='James').first() 
  74  >>> print james.id, james.name 
  75  1 James 
  76   
  77  ### check aggrgates 
  78  >>> counter = person.id.count() 
  79  >>> print db(person).select(counter).first()(counter) 
  80  1 
  81   
  82  ### delete one record 
  83  >>> james.delete_record() 
  84  1 
  85   
  86  ### delete (drop) entire database table 
  87  >>> person.drop() 
  88   
  89  Supported field types: 
  90  id string text boolean integer double decimal password upload blob time date datetime, 
  91   
  92  Supported DAL URI strings: 
  93  'sqlite://test.db' 
  94  'sqlite:memory' 
  95  'jdbc:sqlite://test.db' 
  96  'mysql://root:none@localhost/test' 
  97  'postgres://mdipierro:none@localhost/test' 
  98  'jdbc:postgres://mdipierro:none@localhost/test' 
  99  'mssql://web2py:none@A64X2/web2py_test' 
 100  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
 101  'oracle://username:password@database' 
 102  'firebird://user:password@server:3050/database' 
 103  'db2://DSN=dsn;UID=user;PWD=pass' 
 104  'firebird://username:password@hostname/database' 
 105  'firebird_embedded://username:password@c://path' 
 106  'informix://user:password@server:3050/database' 
 107  'informixu://user:password@server:3050/database' # unicode informix 
 108  'google:datastore' # for google app engine datastore 
 109  'google:sql' # for google app engine with sql (mysql compatible) 
 110  'teradata://DSN=dsn;UID=user;PWD=pass' # experimental 
 111   
 112  For more info: 
 113  help(DAL) 
 114  help(Field) 
 115  """ 
 116   
 117  ################################################################################### 
 118  # this file orly exposes DAL and Field 
 119  ################################################################################### 
 120   
 121  __all__ = ['DAL', 'Field'] 
 122   
 123  MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length 
 124  DEFAULTLENGTH = {'string':512, 
 125                   'password':512, 
 126                   'upload':512, 
 127                   'text':2**15, 
 128                   'blob':2**31} 
 129   
 130  import re 
 131  import sys 
 132  import locale 
 133  import os 
 134  import types 
 135  import cPickle 
 136  import datetime 
 137  import threading 
 138  import time 
 139  import cStringIO 
 140  import csv 
 141  import copy 
 142  import socket 
 143  import logging 
 144  import copy_reg 
 145  import base64 
 146  import shutil 
 147  import marshal 
 148  import decimal 
 149  import struct 
 150  import urllib 
 151  import hashlib 
 152  import uuid 
 153  import glob 
 154   
 155  CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, 
 156                   types.MethodType, types.BuiltinMethodType) 
 157   
 158   
 159  ################################################################################### 
 160  # following checks allows running of dal without web2py as a standalone module 
 161  ################################################################################### 
 162  try: 
 163      from utils import web2py_uuid 
 164  except ImportError: 
 165      import uuid 
166 - def web2py_uuid(): return str(uuid.uuid4())
167 168 try: 169 import portalocker 170 have_portalocker = True 171 except ImportError: 172 have_portalocker = False 173 174 try: 175 import serializers 176 have_serializers = True 177 except ImportError: 178 have_serializers = False 179 180 try: 181 import validators 182 have_validators = True 183 except ImportError: 184 have_validators = False 185 186 logger = logging.getLogger("web2py.dal") 187 DEFAULT = lambda:0 188 189 sql_locker = threading.RLock() 190 thread = threading.local() 191 192 # internal representation of tables with field 193 # <table>.<field>, tables and fields may only be [a-zA-Z0-0_] 194 195 regex_dbname = re.compile('^(\w+)(\:\w+)*') 196 table_field = re.compile('^([\w_]+)\.([\w_]+)$') 197 regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 198 regex_cleanup_fn = re.compile('[\'"\s;]+') 199 string_unpack=re.compile('(?<!\|)\|(?!\|)') 200 regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 201 202 203 204 # list of drivers will be built on the fly 205 # and lists only what is available 206 drivers = [] 207 208 try: 209 from new import classobj 210 from google.appengine.ext import db as gae 211 from google.appengine.api import namespace_manager, rdbms 212 from google.appengine.api.datastore_types import Key ### needed for belongs on ID 213 from google.appengine.ext.db.polymodel import PolyModel 214 drivers.append('google') 215 except ImportError: 216 pass 217 218 if not 'google' in drivers: 219 220 try: 221 from pysqlite2 import dbapi2 as sqlite3 222 drivers.append('pysqlite2') 223 except ImportError: 224 try: 225 from sqlite3 import dbapi2 as sqlite3 226 drivers.append('SQLite3') 227 except ImportError: 228 logger.debug('no sqlite3 or pysqlite2.dbapi2 driver') 229 230 try: 231 import contrib.pymysql as pymysql 232 drivers.append('pymysql') 233 except ImportError: 234 logger.debug('no pymysql driver') 235 236 try: 237 import psycopg2 238 from psycopg2.extensions import adapt as psycopg2_adapt 239 drivers.append('PostgreSQL') 240 except ImportError: 241 logger.debug('no psycopg2 driver') 242 243 try: 244 import cx_Oracle 245 drivers.append('Oracle') 246 except ImportError: 247 logger.debug('no cx_Oracle driver') 248 249 try: 250 import pyodbc 251 drivers.append('MSSQL/DB2') 252 except ImportError: 253 logger.debug('no MSSQL/DB2 driver') 254 255 try: 256 import kinterbasdb 257 drivers.append('Interbase') 258 except ImportError: 259 logger.debug('no kinterbasdb driver') 260 261 try: 262 import firebirdsql 263 drivers.append('Firebird') 264 except ImportError: 265 logger.debug('no Firebird driver') 266 267 try: 268 import informixdb 269 drivers.append('Informix') 270 logger.warning('Informix support is experimental') 271 except ImportError: 272 logger.debug('no informixdb driver') 273 274 try: 275 import sapdb 276 drivers.append('SAPDB') 277 logger.warning('SAPDB support is experimental') 278 except ImportError: 279 logger.debug('no sapdb driver') 280 281 try: 282 import cubriddb 283 drivers.append('Cubrid') 284 logger.warning('Cubrid support is experimental') 285 except ImportError: 286 logger.debug('no cubriddb driver') 287 288 try: 289 from com.ziclix.python.sql import zxJDBC 290 import java.sql 291 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 292 from org.sqlite import JDBC # required by java.sql; ensure we have it 293 drivers.append('zxJDBC') 294 logger.warning('zxJDBC support is experimental') 295 is_jdbc = True 296 except ImportError: 297 logger.debug('no zxJDBC driver') 298 is_jdbc = False 299 300 try: 301 import ingresdbi 302 drivers.append('Ingres') 303 except ImportError: 304 logger.debug('no Ingres driver') 305 # NOTE could try JDBC....... 306 307 try: 308 import couchdb 309 drivers.append('CouchDB') 310 except ImportError: 311 logger.debug('no couchdb driver') 312 313 try: 314 import pymongo 315 drivers.append('mongoDB') 316 except: 317 logger.debug('no mongoDB driver') 318
319 -def OR(a,b):
320 return a|b
321
322 -def AND(a,b):
323 return a&b
324 325 if 'google' in drivers: 326 327 is_jdbc = False 328
329 - class GAEDecimalProperty(gae.Property):
330 """ 331 GAE decimal implementation 332 """ 333 data_type = decimal.Decimal 334
335 - def __init__(self, precision, scale, **kwargs):
336 super(GAEDecimalProperty, self).__init__(self, **kwargs) 337 d = '1.' 338 for x in range(scale): 339 d += '0' 340 self.round = decimal.Decimal(d)
341
342 - def get_value_for_datastore(self, model_instance):
343 value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance) 344 if value: 345 return str(value) 346 else: 347 return None
348
349 - def make_value_from_datastore(self, value):
350 if value: 351 return decimal.Decimal(value).quantize(self.round) 352 else: 353 return None
354
355 - def validate(self, value):
356 value = super(GAEDecimalProperty, self).validate(value) 357 if value is None or isinstance(value, decimal.Decimal): 358 return value 359 elif isinstance(value, basestring): 360 return decimal.Decimal(value) 361 raise gae.BadValueError("Property %s must be a Decimal or string." % self.name)
362 363 ################################################################################### 364 # class that handles connection pooling (all adapters derived form this one) 365 ################################################################################### 366
367 -class ConnectionPool(object):
368 369 pools = {} 370 check_active_connection = True 371 372 @staticmethod
373 - def set_folder(folder):
374 thread.folder = folder
375 376 # ## this allows gluon to commit/rollback all dbs in this thread 377 378 @staticmethod
379 - def close_all_instances(action):
380 """ to close cleanly databases in a multithreaded environment """ 381 if not hasattr(thread,'instances'): 382 return 383 while thread.instances: 384 instance = thread.instances.pop() 385 getattr(instance,action)() 386 # ## if you want pools, recycle this connection 387 really = True 388 if instance.pool_size: 389 sql_locker.acquire() 390 pool = ConnectionPool.pools[instance.uri] 391 if len(pool) < instance.pool_size: 392 pool.append(instance.connection) 393 really = False 394 sql_locker.release() 395 if really: 396 getattr(instance,'close')() 397 return
398
399 - def find_or_make_work_folder(self):
400 """ this actually does not make the folder. it has to be there """ 401 if hasattr(thread,'folder'): 402 self.folder = thread.folder 403 else: 404 self.folder = thread.folder = '' 405 406 # Creating the folder if it does not exist 407 if False and self.folder and not os.path.exists(self.folder): 408 os.mkdir(self.folder)
409
410 - def pool_connection(self, f, cursor=True):
411 """ 412 this function defines: self.connection and self.cursor (iff cursor is True) 413 if self.pool_size>0 it will try pull the connection from the pool 414 if the connection is not active (closed by db server) it will loop 415 if not self.pool_size or no active connections in pool makes a new one 416 """ 417 if not self.pool_size: 418 self.connection = f() 419 self.cursor = cursor and self.connection.cursor() 420 else: 421 uri = self.uri 422 while True: 423 sql_locker.acquire() 424 if not uri in ConnectionPool.pools: 425 ConnectionPool.pools[uri] = [] 426 if ConnectionPool.pools[uri]: 427 self.connection = ConnectionPool.pools[uri].pop() 428 sql_locker.release() 429 self.cursor = cursor and self.connection.cursor() 430 try: 431 if self.cursor and self.check_active_connection: 432 self.execute('SELECT 1;') 433 break 434 except: 435 pass 436 else: 437 sql_locker.release() 438 self.connection = f() 439 self.cursor = cursor and self.connection.cursor() 440 break 441 if not hasattr(thread,'instances'): 442 thread.instances = [] 443 thread.instances.append(self)
444 445 446 ################################################################################### 447 # this is a generic adapter that does nothing; all others are derived form this one 448 ################################################################################### 449
450 -class BaseAdapter(ConnectionPool):
451 452 driver = None 453 maxcharlength = MAXCHARLENGTH 454 commit_on_alter_table = False 455 support_distributed_transaction = False 456 uploads_in_blob = False 457 types = { 458 'boolean': 'CHAR(1)', 459 'string': 'CHAR(%(length)s)', 460 'text': 'TEXT', 461 'password': 'CHAR(%(length)s)', 462 'blob': 'BLOB', 463 'upload': 'CHAR(%(length)s)', 464 'integer': 'INTEGER', 465 'double': 'DOUBLE', 466 'decimal': 'DOUBLE', 467 'date': 'DATE', 468 'time': 'TIME', 469 'datetime': 'TIMESTAMP', 470 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 471 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 472 'list:integer': 'TEXT', 473 'list:string': 'TEXT', 474 'list:reference': 'TEXT', 475 } 476
477 - def adapt(self,obj):
478 return "'%s'" % obj.replace("'", "''")
479
480 - def integrity_error(self):
481 return self.driver.IntegrityError
482
483 - def operational_error(self):
484 return self.driver.OperationalError
485
486 - def file_exists(self, filename):
487 """ 488 to be used ONLY for files that on GAE may not be on filesystem 489 """ 490 return os.path.exists(filename)
491
492 - def file_open(self, filename, mode='rb', lock=True):
493 """ 494 to be used ONLY for files that on GAE may not be on filesystem 495 """ 496 fileobj = open(filename,mode) 497 if have_portalocker and lock: 498 if mode in ('r','rb'): 499 portalocker.lock(fileobj,portalocker.LOCK_SH) 500 elif mode in ('w','wb','a'): 501 portalocker.lock(fileobj,portalocker.LOCK_EX) 502 else: 503 fileobj.close() 504 raise RuntimeError, "Unsupported file_open mode" 505 return fileobj
506
507 - def file_close(self, fileobj, unlock=True):
508 """ 509 to be used ONLY for files that on GAE may not be on filesystem 510 """ 511 if fileobj: 512 if have_portalocker and unlock: 513 portalocker.unlock(fileobj) 514 fileobj.close()
515
516 - def file_delete(self, filename):
517 os.unlink(filename)
518
519 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 520 credential_decoder=lambda x:x, driver_args={}, 521 adapter_args={}):
522 self.db = db 523 self.dbengine = "None" 524 self.uri = uri 525 self.pool_size = pool_size 526 self.folder = folder 527 self.db_codec = db_codec 528 class Dummy(object): 529 lastrowid = 1 530 def __getattr__(self, value): 531 return lambda *a, **b: []
532 self.connection = Dummy() 533 self.cursor = Dummy() 534
535 - def sequence_name(self,tablename):
536 return '%s_sequence' % tablename
537
538 - def trigger_name(self,tablename):
539 return '%s_sequence' % tablename
540 541
542 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
543 fields = [] 544 sql_fields = {} 545 sql_fields_aux = {} 546 TFK = {} 547 tablename = table._tablename 548 sortable = 0 549 for field in table: 550 sortable += 1 551 k = field.name 552 if isinstance(field.type,SQLCustomType): 553 ftype = field.type.native or field.type.type 554 elif field.type.startswith('reference'): 555 referenced = field.type[10:].strip() 556 constraint_name = self.constraint_name(tablename, field.name) 557 if hasattr(table,'_primarykey'): 558 rtablename,rfieldname = referenced.split('.') 559 rtable = table._db[rtablename] 560 rfield = rtable[rfieldname] 561 # must be PK reference or unique 562 if rfieldname in rtable._primarykey or rfield.unique: 563 ftype = self.types[rfield.type[:9]] % dict(length=rfield.length) 564 # multicolumn primary key reference? 565 if not rfield.unique and len(rtable._primarykey)>1 : 566 # then it has to be a table level FK 567 if rtablename not in TFK: 568 TFK[rtablename] = {} 569 TFK[rtablename][rfieldname] = field.name 570 else: 571 ftype = ftype + \ 572 self.types['reference FK'] %dict(\ 573 constraint_name=constraint_name, 574 table_name=tablename, 575 field_name=field.name, 576 foreign_key='%s (%s)'%(rtablename, rfieldname), 577 on_delete_action=field.ondelete) 578 else: 579 # make a guess here for circular references 580 id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id' 581 ftype = self.types[field.type[:9]]\ 582 % dict(table_name=tablename, 583 field_name=field.name, 584 constraint_name=constraint_name, 585 foreign_key=referenced + ('(%s)' % id_fieldname), 586 on_delete_action=field.ondelete) 587 elif field.type.startswith('list:reference'): 588 ftype = self.types[field.type[:14]] 589 elif field.type.startswith('decimal'): 590 precision, scale = map(int,field.type[8:-1].split(',')) 591 ftype = self.types[field.type[:7]] % \ 592 dict(precision=precision,scale=scale) 593 elif not field.type in self.types: 594 raise SyntaxError, 'Field: unknown field type: %s for %s' % \ 595 (field.type, field.name) 596 else: 597 ftype = self.types[field.type]\ 598 % dict(length=field.length) 599 if not field.type.startswith('id') and not field.type.startswith('reference'): 600 if field.notnull: 601 ftype += ' NOT NULL' 602 else: 603 ftype += self.ALLOW_NULL() 604 if field.unique: 605 ftype += ' UNIQUE' 606 607 # add to list of fields 608 sql_fields[field.name] = dict(sortable=sortable, 609 type=str(field.type), 610 sql=ftype) 611 612 if isinstance(field.default,(str,int,float)): 613 # caveat: sql_fields and sql_fields_aux differ for default values 614 # sql_fields is used to trigger migrations and sql_fields_aux 615 # are used for create table 616 # the reason is that we do not want to trigger a migration simply 617 # because a default value changes 618 not_null = self.NOT_NULL(field.default,field.type) 619 ftype = ftype.replace('NOT NULL',not_null) 620 sql_fields_aux[field.name] = dict(sql=ftype) 621 622 fields.append('%s %s' % (field.name, ftype)) 623 other = ';' 624 625 # backend-specific extensions to fields 626 if self.dbengine == 'mysql': 627 if not hasattr(table, "_primarykey"): 628 fields.append('PRIMARY KEY(%s)' % table._id.name) 629 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 630 631 fields = ',\n '.join(fields) 632 for rtablename in TFK: 633 rfields = TFK[rtablename] 634 pkeys = table._db[rtablename]._primarykey 635 fkeys = [ rfields[k] for k in pkeys ] 636 fields = fields + ',\n ' + \ 637 self.types['reference TFK'] %\ 638 dict(table_name=tablename, 639 field_name=', '.join(fkeys), 640 foreign_table=rtablename, 641 foreign_key=', '.join(pkeys), 642 on_delete_action=field.ondelete) 643 644 if hasattr(table,'_primarykey'): 645 query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \ 646 (tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other) 647 else: 648 query = '''CREATE TABLE %s(\n %s\n)%s''' % \ 649 (tablename, fields, other) 650 651 if self.uri.startswith('sqlite:///'): 652 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 653 dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding) 654 else: 655 dbpath = self.folder 656 657 if not migrate: 658 return query 659 elif self.uri.startswith('sqlite:memory'): 660 table._dbt = None 661 elif isinstance(migrate, str): 662 table._dbt = os.path.join(dbpath, migrate) 663 else: 664 table._dbt = os.path.join(dbpath, '%s_%s.table' \ 665 % (table._db._uri_hash, tablename)) 666 if table._dbt: 667 table._loggername = os.path.join(dbpath, 'sql.log') 668 logfile = self.file_open(table._loggername, 'a') 669 else: 670 logfile = None 671 if not table._dbt or not self.file_exists(table._dbt): 672 if table._dbt: 673 logfile.write('timestamp: %s\n' 674 % datetime.datetime.today().isoformat()) 675 logfile.write(query + '\n') 676 if not fake_migrate: 677 self.create_sequence_and_triggers(query,table) 678 table._db.commit() 679 if table._dbt: 680 tfile = self.file_open(table._dbt, 'w') 681 cPickle.dump(sql_fields, tfile) 682 self.file_close(tfile) 683 if fake_migrate: 684 logfile.write('faked!\n') 685 else: 686 logfile.write('success!\n') 687 else: 688 tfile = self.file_open(table._dbt, 'r') 689 try: 690 sql_fields_old = cPickle.load(tfile) 691 except EOFError: 692 self.file_close(tfile) 693 self.file_close(logfile) 694 raise RuntimeError, 'File %s appears corrupted' % table._dbt 695 self.file_close(tfile) 696 if sql_fields != sql_fields_old: 697 self.migrate_table(table, 698 sql_fields, sql_fields_old, 699 sql_fields_aux, logfile, 700 fake_migrate=fake_migrate) 701 self.file_close(logfile) 702 return query
703
704 - def migrate_table( 705 self, 706 table, 707 sql_fields, 708 sql_fields_old, 709 sql_fields_aux, 710 logfile, 711 fake_migrate=False, 712 ):
713 tablename = table._tablename 714 def fix(item): 715 k,v=item 716 if not isinstance(v,dict): 717 v=dict(type='unkown',sql=v) 718 return k.lower(),v
719 ### make sure all field names are lower case to avoid conflicts 720 sql_fields = dict(map(fix,sql_fields.items())) 721 sql_fields_old = dict(map(fix,sql_fields_old.items())) 722 sql_fields_aux = dict(map(fix,sql_fields_aux.items())) 723 724 keys = sql_fields.keys() 725 for key in sql_fields_old: 726 if not key in keys: 727 keys.append(key) 728 if self.dbengine == 'mssql': 729 new_add = '; ALTER TABLE %s ADD ' % tablename 730 else: 731 new_add = ', ADD ' 732 733 metadata_change = False 734 sql_fields_current = copy.copy(sql_fields_old) 735 for key in keys: 736 query = None 737 if not key in sql_fields_old: 738 sql_fields_current[key] = sql_fields[key] 739 query = ['ALTER TABLE %s ADD %s %s;' % \ 740 (tablename, key, 741 sql_fields_aux[key]['sql'].replace(', ', new_add))] 742 metadata_change = True 743 elif self.dbengine == 'sqlite': 744 if key in sql_fields: 745 sql_fields_current[key] = sql_fields[key] 746 metadata_change = True 747 elif not key in sql_fields: 748 del sql_fields_current[key] 749 if not self.dbengine in ('firebird',): 750 query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)] 751 else: 752 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 753 metadata_change = True 754 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 755 and not isinstance(table[key].type, SQLCustomType) \ 756 and not (table[key].type.startswith('reference') and \ 757 sql_fields[key]['sql'].startswith('INT,') and \ 758 sql_fields_old[key]['sql'].startswith('INT NOT NULL,')): 759 sql_fields_current[key] = sql_fields[key] 760 t = tablename 761 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 762 if not self.dbengine in ('firebird',): 763 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 764 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 765 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 766 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 767 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 768 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 769 else: 770 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 771 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 772 'ALTER TABLE %s DROP %s;' % (t, key), 773 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 774 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 775 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 776 metadata_change = True 777 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 778 sql_fields_current[key] = sql_fields[key] 779 metadata_change = True 780 781 if query: 782 logfile.write('timestamp: %s\n' 783 % datetime.datetime.today().isoformat()) 784 table._db['_lastsql'] = '\n'.join(query) 785 for sub_query in query: 786 logfile.write(sub_query + '\n') 787 if not fake_migrate: 788 self.execute(sub_query) 789 # caveat. mysql, oracle and firebird do not allow multiple alter table 790 # in one transaction so we must commit partial transactions and 791 # update table._dbt after alter table. 792 if table._db._adapter.commit_on_alter_table: 793 table._db.commit() 794 tfile = self.file_open(table._dbt, 'w') 795 cPickle.dump(sql_fields_current, tfile) 796 self.file_close(tfile) 797 logfile.write('success!\n') 798 else: 799 logfile.write('faked!\n') 800 elif metadata_change: 801 tfile = self.file_open(table._dbt, 'w') 802 cPickle.dump(sql_fields_current, tfile) 803 self.file_close(tfile) 804 805 if metadata_change and \ 806 not (query and self.dbengine in ('mysql','oracle','firebird')): 807 table._db.commit() 808 tfile = self.file_open(table._dbt, 'w') 809 cPickle.dump(sql_fields_current, tfile) 810 self.file_close(tfile) 811
812 - def LOWER(self,first):
813 return 'LOWER(%s)' % self.expand(first)
814
815 - def UPPER(self,first):
816 return 'UPPER(%s)' % self.expand(first)
817
818 - def EXTRACT(self,first,what):
819 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
820
821 - def AGGREGATE(self,first,what):
822 return "%s(%s)" % (what,self.expand(first))
823
824 - def JOIN(self):
825 return 'JOIN'
826
827 - def LEFT_JOIN(self):
828 return 'LEFT JOIN'
829
830 - def RANDOM(self):
831 return 'Random()'
832
833 - def NOT_NULL(self,default,field_type):
834 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
835
836 - def COALESCE(self,first,second):
837 expressions = [self.expand(first)]+[self.expand(e) for e in second] 838 return 'COALESCE(%s)' % ','.join(expressions)
839
840 - def COALESCE_ZERO(self,first):
841 return 'COALESCE(%s,0)' % self.expand(first)
842
843 - def RAW(self,first):
844 return first
845
846 - def ALLOW_NULL(self):
847 return ''
848
849 - def SUBSTRING(self,field,parameters):
850 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
851
852 - def PRIMARY_KEY(self,key):
853 return 'PRIMARY KEY(%s)' % key
854
855 - def _drop(self,table,mode):
856 return ['DROP TABLE %s;' % table]
857
858 - def drop(self, table, mode=''):
859 if table._dbt: 860 logfile = self.file_open(table._loggername, 'a') 861 queries = self._drop(table, mode) 862 for query in queries: 863 if table._dbt: 864 logfile.write(query + '\n') 865 self.execute(query) 866 table._db.commit() 867 del table._db[table._tablename] 868 del table._db.tables[table._db.tables.index(table._tablename)] 869 table._db._update_referenced_by(table._tablename) 870 if table._dbt: 871 self.file_delete(table._dbt) 872 logfile.write('success!\n')
873
874 - def _insert(self,table,fields):
875 keys = ','.join(f.name for f,v in fields) 876 values = ','.join(self.expand(v,f.type) for f,v in fields) 877 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
878
879 - def insert(self,table,fields):
880 query = self._insert(table,fields) 881 try: 882 self.execute(query) 883 except Exception, e: 884 if isinstance(e,self.integrity_error_class()): 885 return None 886 raise e 887 if hasattr(table,'_primarykey'): 888 return dict([(k[0].name, k[1]) for k in fields \ 889 if k[0].name in table._primarykey]) 890 id = self.lastrowid(table) 891 if not isinstance(id,int): 892 return id 893 rid = Reference(id) 894 (rid._table, rid._record) = (table, None) 895 return rid
896
897 - def bulk_insert(self,table,items):
898 return [self.insert(table,item) for item in items]
899
900 - def NOT(self,first):
901 return '(NOT %s)' % self.expand(first)
902
903 - def AND(self,first,second):
904 return '(%s AND %s)' % (self.expand(first),self.expand(second))
905
906 - def OR(self,first,second):
907 return '(%s OR %s)' % (self.expand(first),self.expand(second))
908
909 - def BELONGS(self,first,second):
910 if isinstance(second,str): 911 return '(%s IN (%s))' % (self.expand(first),second[:-1]) 912 elif second==[] or second==(): 913 return '(1=0)' 914 items =','.join(self.expand(item,first.type) for item in second) 915 return '(%s IN (%s))' % (self.expand(first),items)
916
917 - def LIKE(self,first,second):
918 return '(%s LIKE %s)' % (self.expand(first),self.expand(second,'string'))
919
920 - def STARTSWITH(self,first,second):
921 return '(%s LIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
922
923 - def ENDSWITH(self,first,second):
924 return '(%s LIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
925
926 - def CONTAINS(self,first,second):
927 if first.type in ('string','text'): 928 key = '%'+str(second).replace('%','%%')+'%' 929 elif first.type.startswith('list:'): 930 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 931 return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string'))
932
933 - def EQ(self,first,second=None):
934 if second is None: 935 return '(%s IS NULL)' % self.expand(first) 936 return '(%s = %s)' % (self.expand(first),self.expand(second,first.type))
937
938 - def NE(self,first,second=None):
939 if second is None: 940 return '(%s IS NOT NULL)' % self.expand(first) 941 return '(%s <> %s)' % (self.expand(first),self.expand(second,first.type))
942
943 - def LT(self,first,second=None):
944 return '(%s < %s)' % (self.expand(first),self.expand(second,first.type))
945
946 - def LE(self,first,second=None):
947 return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type))
948
949 - def GT(self,first,second=None):
950 return '(%s > %s)' % (self.expand(first),self.expand(second,first.type))
951
952 - def GE(self,first,second=None):
953 return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type))
954
955 - def ADD(self,first,second):
956 return '(%s + %s)' % (self.expand(first),self.expand(second,first.type))
957
958 - def SUB(self,first,second):
959 return '(%s - %s)' % (self.expand(first),self.expand(second,first.type))
960
961 - def MUL(self,first,second):
962 return '(%s * %s)' % (self.expand(first),self.expand(second,first.type))
963
964 - def DIV(self,first,second):
965 return '(%s / %s)' % (self.expand(first),self.expand(second,first.type))
966
967 - def MOD(self,first,second):
968 return '(%s %% %s)' % (self.expand(first),self.expand(second,first.type))
969
970 - def AS(self,first,second):
971 return '%s AS %s' % (self.expand(first),second)
972
973 - def ON(self,first,second):
974 return '%s ON %s' % (self.expand(first),self.expand(second))
975
976 - def INVERT(self,first):
977 return '%s DESC' % self.expand(first)
978
979 - def COMMA(self,first,second):
980 return '%s, %s' % (self.expand(first),self.expand(second))
981
982 - def expand(self,expression,field_type=None):
983 if isinstance(expression,Field): 984 return str(expression) 985 elif isinstance(expression, (Expression, Query)): 986 if not expression.second is None: 987 return expression.op(expression.first, expression.second) 988 elif not expression.first is None: 989 return expression.op(expression.first) 990 elif not isinstance(expression.op,str): 991 return expression.op() 992 else: 993 return '(%s)' % expression.op 994 elif field_type: 995 return self.represent(expression,field_type) 996 elif isinstance(expression,(list,tuple)): 997 return ','.join([self.represent(item,field_type) for item in expression]) 998 else: 999 return str(expression)
1000
1001 - def alias(self,table,alias):
1002 """ 1003 given a table object, makes a new table object 1004 with alias name. 1005 """ 1006 other = copy.copy(table) 1007 other['_ot'] = other._tablename 1008 other['ALL'] = SQLALL(other) 1009 other['_tablename'] = alias 1010 for fieldname in other.fields: 1011 other[fieldname] = copy.copy(other[fieldname]) 1012 other[fieldname]._tablename = alias 1013 other[fieldname].tablename = alias 1014 other[fieldname].table = other 1015 table._db[alias] = other 1016 return other
1017
1018 - def _truncate(self,table,mode = ''):
1019 tablename = table._tablename 1020 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1021
1022 - def truncate(self,table,mode= ' '):
1023 # Prepare functions "write_to_logfile" and "close_logfile" 1024 if table._dbt: 1025 logfile = self.file_open(table._loggername, 'a') 1026 else: 1027 class Logfile(object): 1028 def write(self, value): 1029 pass
1030 def close(self): 1031 pass 1032 logfile = Logfile() 1033 1034 try: 1035 queries = table._db._adapter._truncate(table, mode) 1036 for query in queries: 1037 logfile.write(query + '\n') 1038 self.execute(query) 1039 table._db.commit() 1040 logfile.write('success!\n') 1041 finally: 1042 logfile.close() 1043
1044 - def _update(self,tablename,query,fields):
1045 query = self.filter_tenant(query,[tablename]) 1046 if query: 1047 sql_w = ' WHERE ' + self.expand(query) 1048 else: 1049 sql_w = '' 1050 sql_v = ','.join(['%s=%s' % (field.name, self.expand(value,field.type)) for (field,value) in fields]) 1051 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1052
1053 - def update(self,tablename,query,fields):
1054 sql = self._update(tablename,query,fields) 1055 self.execute(sql) 1056 try: 1057 return self.cursor.rowcount 1058 except: 1059 return None
1060
1061 - def _delete(self,tablename, query):
1062 query = self.filter_tenant(query,[tablename]) 1063 if query: 1064 sql_w = ' WHERE ' + self.expand(query) 1065 else: 1066 sql_w = '' 1067 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1068
1069 - def delete(self,tablename,query):
1070 sql = self._delete(tablename,query) 1071 ### special code to handle CASCADE in SQLite 1072 db = self.db 1073 table = db[tablename] 1074 if self.dbengine=='sqlite' and table._referenced_by: 1075 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1076 ### end special code to handle CASCADE in SQLite 1077 self.execute(sql) 1078 try: 1079 counter = self.cursor.rowcount 1080 except: 1081 counter = None 1082 ### special code to handle CASCADE in SQLite 1083 if self.dbengine=='sqlite' and counter: 1084 for tablename,fieldname in table._referenced_by: 1085 f = db[tablename][fieldname] 1086 if f.type=='reference '+table._tablename and f.ondelete=='CASCADE': 1087 db(db[tablename][fieldname].belongs(deleted)).delete() 1088 ### end special code to handle CASCADE in SQLite 1089 return counter
1090
1091 - def get_table(self,query):
1092 tablenames = self.tables(query) 1093 if len(tablenames)==1: 1094 return tablenames[0] 1095 elif len(tablenames)<1: 1096 raise RuntimeError, "No table selected" 1097 else: 1098 raise RuntimeError, "Too many tables selected"
1099
1100 - def _select(self, query, fields, attributes):
1101 for key in set(attributes.keys())-set(('orderby','groupby','limitby', 1102 'required','cache','left', 1103 'distinct','having', 'join')): 1104 raise SyntaxError, 'invalid select attribute: %s' % key 1105 # ## if not fields specified take them all from the requested tables 1106 new_fields = [] 1107 for item in fields: 1108 if isinstance(item,SQLALL): 1109 new_fields += item.table 1110 else: 1111 new_fields.append(item) 1112 fields = new_fields 1113 tablenames = self.tables(query) 1114 query = self.filter_tenant(query,tablenames) 1115 if not fields: 1116 for table in tablenames: 1117 for field in self.db[table]: 1118 fields.append(field) 1119 else: 1120 for field in fields: 1121 if isinstance(field,basestring) and table_field.match(field): 1122 tn,fn = field.split('.') 1123 field = self.db[tn][fn] 1124 for tablename in self.tables(field): 1125 if not tablename in tablenames: 1126 tablenames.append(tablename) 1127 if len(tablenames) < 1: 1128 raise SyntaxError, 'Set: no tables selected' 1129 sql_f = ', '.join(map(self.expand,fields)) 1130 self._colnames = [c.strip() for c in sql_f.split(', ')] 1131 if query: 1132 sql_w = ' WHERE ' + self.expand(query) 1133 else: 1134 sql_w = '' 1135 sql_o = '' 1136 sql_s = '' 1137 left = attributes.get('left', False) 1138 inner_join = attributes.get('join', False) 1139 distinct = attributes.get('distinct', False) 1140 groupby = attributes.get('groupby', False) 1141 orderby = attributes.get('orderby', False) 1142 having = attributes.get('having', False) 1143 limitby = attributes.get('limitby', False) 1144 if distinct is True: 1145 sql_s += 'DISTINCT' 1146 elif distinct: 1147 sql_s += 'DISTINCT ON (%s)' % distinct 1148 if inner_join: 1149 icommand = self.JOIN() 1150 if not isinstance(inner_join, (tuple, list)): 1151 inner_join = [inner_join] 1152 ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)] 1153 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1154 ijoinont = [t.first._tablename for t in ijoinon] 1155 iexcluded = [t for t in tablenames if not t in ijoint + ijoinont] 1156 if left: 1157 join = attributes['left'] 1158 command = self.LEFT_JOIN() 1159 if not isinstance(join, (tuple, list)): 1160 join = [join] 1161 joint = [t._tablename for t in join if not isinstance(t,Expression)] 1162 joinon = [t for t in join if isinstance(t, Expression)] 1163 #patch join+left patch (solves problem with ordering in left joins) 1164 tables_to_merge={} 1165 [tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon] 1166 joinont = [t.first._tablename for t in joinon] 1167 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1168 important_tablenames = joint + joinont + tables_to_merge.keys() 1169 excluded = [t for t in tablenames if not t in important_tablenames ] 1170 def alias(t): 1171 return str(self.db[t])
1172 if inner_join and not left: 1173 sql_t = ', '.join(alias(t) for t in iexcluded) 1174 for t in ijoinon: 1175 sql_t += ' %s %s' % (icommand, str(t)) 1176 elif not inner_join and left: 1177 sql_t = ', '.join([alias(t) for t in excluded + tables_to_merge.keys()]) 1178 if joint: 1179 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1180 for t in joinon: 1181 sql_t += ' %s %s' % (command, str(t)) 1182 elif inner_join and left: 1183 sql_t = ','.join([alias(t) for t in excluded + \ 1184 tables_to_merge.keys() if t in iexcluded ]) 1185 for t in ijoinon: 1186 sql_t += ' %s %s' % (icommand, str(t)) 1187 if joint: 1188 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1189 for t in joinon: 1190 sql_t += ' %s %s' % (command, str(t)) 1191 else: 1192 sql_t = ', '.join(alias(t) for t in tablenames) 1193 if groupby: 1194 if isinstance(groupby, (list, tuple)): 1195 groupby = xorify(groupby) 1196 sql_o += ' GROUP BY %s' % self.expand(groupby) 1197 if having: 1198 sql_o += ' HAVING %s' % attributes['having'] 1199 if orderby: 1200 if isinstance(orderby, (list, tuple)): 1201 orderby = xorify(orderby) 1202 if str(orderby) == '<random>': 1203 sql_o += ' ORDER BY %s' % self.RANDOM() 1204 else: 1205 sql_o += ' ORDER BY %s' % self.expand(orderby) 1206 if limitby: 1207 if not orderby and tablenames: 1208 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])]) 1209 # oracle does not support limitby 1210 return self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1211
1212 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1213 if limitby: 1214 (lmin, lmax) = limitby 1215 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1216 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1217
1218 - def select(self,query,fields,attributes):
1219 """ 1220 Always returns a Rows object, even if it may be empty 1221 """ 1222 def response(sql): 1223 self.execute(sql) 1224 return self.cursor.fetchall()
1225 sql = self._select(query,fields,attributes) 1226 if attributes.get('cache', None): 1227 (cache_model, time_expire) = attributes['cache'] 1228 del attributes['cache'] 1229 key = self.uri + '/' + sql 1230 key = (key<=200) and key or hashlib.md5(key).hexdigest() 1231 rows = cache_model(key, lambda: response(sql), time_expire) 1232 else: 1233 rows = response(sql) 1234 if isinstance(rows,tuple): 1235 rows = list(rows) 1236 limitby = attributes.get('limitby',None) or (0,) 1237 rows = self.rowslice(rows,limitby[0],None) 1238 return self.parse(rows,self._colnames) 1239
1240 - def _count(self,query,distinct=None):
1241 tablenames = self.tables(query) 1242 query = self.filter_tenant(query,tablenames) 1243 if query: 1244 sql_w = ' WHERE ' + self.expand(query) 1245 else: 1246 sql_w = '' 1247 sql_t = ','.join(tablenames) 1248 if distinct: 1249 if isinstance(distinct,(list,tuple)): 1250 distinct = xorify(distinct) 1251 sql_d = self.expand(distinct) 1252 return 'SELECT count(DISTINCT %s) FROM %s%s;' % (sql_d, sql_t, sql_w) 1253 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1254
1255 - def count(self,query,distinct=None):
1256 self.execute(self._count(query,distinct)) 1257 return self.cursor.fetchone()[0]
1258 1259
1260 - def tables(self,query):
1261 tables = set() 1262 if isinstance(query, Field): 1263 tables.add(query.tablename) 1264 elif isinstance(query, (Expression, Query)): 1265 if not query.first is None: 1266 tables = tables.union(self.tables(query.first)) 1267 if not query.second is None: 1268 tables = tables.union(self.tables(query.second)) 1269 return list(tables)
1270
1271 - def commit(self):
1272 return self.connection.commit()
1273
1274 - def rollback(self):
1275 return self.connection.rollback()
1276
1277 - def close(self):
1278 return self.connection.close()
1279
1280 - def distributed_transaction_begin(self,key):
1281 return
1282
1283 - def prepare(self,key):
1284 self.connection.prepare()
1285
1286 - def commit_prepared(self,key):
1287 self.connection.commit()
1288
1289 - def rollback_prepared(self,key):
1290 self.connection.rollback()
1291
1292 - def concat_add(self,table):
1293 return ', ADD '
1294
1295 - def constraint_name(self, table, fieldname):
1296 return '%s_%s__constraint' % (table,fieldname)
1297
1298 - def create_sequence_and_triggers(self, query, table, **args):
1299 self.execute(query)
1300
1301 - def log_execute(self,*a,**b):
1302 self.db._lastsql = a[0] 1303 t0 = time.time() 1304 ret = self.cursor.execute(*a,**b) 1305 self.db._timings.append((a[0],time.time()-t0)) 1306 return ret
1307
1308 - def execute(self,*a,**b):
1309 return self.log_execute(*a, **b)
1310
1311 - def represent(self, obj, fieldtype):
1312 if isinstance(obj,CALLABLETYPES): 1313 obj = obj() 1314 if isinstance(fieldtype, SQLCustomType): 1315 return fieldtype.encoder(obj) 1316 if isinstance(obj, (Expression, Field)): 1317 return str(obj) 1318 if fieldtype.startswith('list:'): 1319 if not obj: 1320 obj = [] 1321 if not isinstance(obj, (list, tuple)): 1322 obj = [obj] 1323 if isinstance(obj, (list, tuple)): 1324 obj = bar_encode(obj) 1325 if obj is None: 1326 return 'NULL' 1327 if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']: 1328 return 'NULL' 1329 r = self.represent_exceptions(obj,fieldtype) 1330 if not r is None: 1331 return r 1332 if fieldtype == 'boolean': 1333 if obj and not str(obj)[:1].upper() in ['F', '0']: 1334 return "'T'" 1335 else: 1336 return "'F'" 1337 if fieldtype == 'id' or fieldtype == 'integer': 1338 return str(int(obj)) 1339 if fieldtype.startswith('decimal'): 1340 return str(obj) 1341 elif fieldtype.startswith('reference'): # reference 1342 if fieldtype.find('.')>0: 1343 return repr(obj) 1344 elif isinstance(obj, (Row, Reference)): 1345 return str(obj['id']) 1346 return str(int(obj)) 1347 elif fieldtype == 'double': 1348 return repr(float(obj)) 1349 if isinstance(obj, unicode): 1350 obj = obj.encode(self.db_codec) 1351 if fieldtype == 'blob': 1352 obj = base64.b64encode(str(obj)) 1353 elif fieldtype == 'date': 1354 if isinstance(obj, (datetime.date, datetime.datetime)): 1355 obj = obj.isoformat()[:10] 1356 else: 1357 obj = str(obj) 1358 elif fieldtype == 'datetime': 1359 if isinstance(obj, datetime.datetime): 1360 obj = obj.isoformat()[:19].replace('T',' ') 1361 elif isinstance(obj, datetime.date): 1362 obj = obj.isoformat()[:10]+' 00:00:00' 1363 else: 1364 obj = str(obj) 1365 elif fieldtype == 'time': 1366 if isinstance(obj, datetime.time): 1367 obj = obj.isoformat()[:10] 1368 else: 1369 obj = str(obj) 1370 if not isinstance(obj,str): 1371 obj = str(obj) 1372 try: 1373 obj.decode(self.db_codec) 1374 except: 1375 obj = obj.decode('latin1').encode(self.db_codec) 1376 return self.adapt(obj)
1377
1378 - def represent_exceptions(self, obj, fieldtype):
1379 return None
1380
1381 - def lastrowid(self,table):
1382 return None
1383
1384 - def integrity_error_class(self):
1385 return type(None)
1386
1387 - def rowslice(self,rows,minimum=0,maximum=None):
1388 """ by default this function does nothing, overload when db does not do slicing """ 1389 return rows
1390
1391 - def parse(self, rows, colnames, blob_decode=True):
1392 db = self.db 1393 virtualtables = [] 1394 new_rows = [] 1395 for (i,row) in enumerate(rows): 1396 new_row = Row() 1397 for j,colname in enumerate(colnames): 1398 value = row[j] 1399 if not table_field.match(colnames[j]): 1400 if not '_extra' in new_row: 1401 new_row['_extra'] = Row() 1402 new_row['_extra'][colnames[j]] = value 1403 select_as_parser = re.compile("\s+AS\s+(\S+)") 1404 new_column_name = select_as_parser.search(colnames[j]) 1405 if not new_column_name is None: 1406 column_name = new_column_name.groups(0) 1407 setattr(new_row,column_name[0],value) 1408 continue 1409 (tablename, fieldname) = colname.split('.') 1410 table = db[tablename] 1411 field = table[fieldname] 1412 field_type = field.type 1413 if field.type != 'blob' and isinstance(value, str): 1414 try: 1415 value = value.decode(db._db_codec) 1416 except Exception: 1417 pass 1418 if isinstance(value, unicode): 1419 value = value.encode('utf-8') 1420 if not tablename in new_row: 1421 colset = new_row[tablename] = Row() 1422 if tablename not in virtualtables: 1423 virtualtables.append(tablename) 1424 else: 1425 colset = new_row[tablename] 1426 1427 if isinstance(field_type, SQLCustomType): 1428 colset[fieldname] = field_type.decoder(value) 1429 # field_type = field_type.type 1430 elif not isinstance(field_type, str) or value is None: 1431 colset[fieldname] = value 1432 elif isinstance(field_type, str) and \ 1433 field_type.startswith('reference'): 1434 referee = field_type[10:].strip() 1435 if not '.' in referee: 1436 colset[fieldname] = rid = Reference(value) 1437 (rid._table, rid._record) = (db[referee], None) 1438 else: ### reference not by id 1439 colset[fieldname] = value 1440 elif field_type == 'boolean': 1441 if value == True or str(value)[:1].lower() == 't': 1442 colset[fieldname] = True 1443 else: 1444 colset[fieldname] = False 1445 elif field_type == 'date' \ 1446 and (not isinstance(value, datetime.date)\ 1447 or isinstance(value, datetime.datetime)): 1448 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1449 colset[fieldname] = datetime.date(y, m, d) 1450 elif field_type == 'time' \ 1451 and not isinstance(value, datetime.time): 1452 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1453 if len(time_items) == 3: 1454 (h, mi, s) = time_items 1455 else: 1456 (h, mi, s) = time_items + [0] 1457 colset[fieldname] = datetime.time(h, mi, s) 1458 elif field_type == 'datetime'\ 1459 and not isinstance(value, datetime.datetime): 1460 (y, m, d) = map(int,str(value)[:10].strip().split('-')) 1461 time_items = map(int,str(value)[11:19].strip().split(':')[:3]) 1462 if len(time_items) == 3: 1463 (h, mi, s) = time_items 1464 else: 1465 (h, mi, s) = time_items + [0] 1466 colset[fieldname] = datetime.datetime(y, m, d, h, mi, s) 1467 elif field_type == 'blob' and blob_decode: 1468 colset[fieldname] = base64.b64decode(str(value)) 1469 elif field_type.startswith('decimal'): 1470 decimals = int(field_type[8:-1].split(',')[-1]) 1471 if self.dbengine == 'sqlite': 1472 value = ('%.' + str(decimals) + 'f') % value 1473 if not isinstance(value, decimal.Decimal): 1474 value = decimal.Decimal(str(value)) 1475 colset[fieldname] = value 1476 elif field_type.startswith('list:integer'): 1477 if not self.dbengine=='google:datastore': 1478 colset[fieldname] = bar_decode_integer(value) 1479 else: 1480 colset[fieldname] = value 1481 elif field_type.startswith('list:reference'): 1482 if not self.dbengine=='google:datastore': 1483 colset[fieldname] = bar_decode_integer(value) 1484 else: 1485 colset[fieldname] = value 1486 elif field_type.startswith('list:string'): 1487 if not self.dbengine=='google:datastore': 1488 colset[fieldname] = bar_decode_string(value) 1489 else: 1490 colset[fieldname] = value 1491 else: 1492 colset[fieldname] = value 1493 if field_type == 'id': 1494 id = colset[field.name] 1495 colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a) 1496 colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete() 1497 for (referee_table, referee_name) in \ 1498 table._referenced_by: 1499 s = db[referee_table][referee_name] 1500 referee_link = db._referee_name and \ 1501 db._referee_name % dict(table=referee_table,field=referee_name) 1502 if referee_link and not referee_link in colset: 1503 colset[referee_link] = Set(db, s == id) 1504 colset['id'] = id 1505 new_rows.append(new_row) 1506 1507 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 1508 1509 for tablename in virtualtables: 1510 ### new style virtual fields 1511 table = db[tablename] 1512 fields_virtual = [(f,v) for (f,v) in table.items() if isinstance(v,FieldVirtual)] 1513 fields_lazy = [(f,v) for (f,v) in table.items() if isinstance(v,FieldLazy)] 1514 if fields_virtual or fields_lazy: 1515 for row in rowsobj.records: 1516 box = row[tablename] 1517 for f,v in fields_virtual: 1518 box[f] = v.f(row) 1519 for f,v in fields_lazy: 1520 box[f] = (v.handler or VirtualCommand)(v.f,row) 1521 1522 ### old style virtual fields 1523 for item in table.virtualfields: 1524 try: 1525 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 1526 except KeyError: 1527 # to avoid breaking virtualfields when partial select 1528 pass 1529 return rowsobj
1530
1531 - def filter_tenant(self,query,tablenames):
1532 fieldname = self.db._request_tenant 1533 for tablename in tablenames: 1534 table = self.db[tablename] 1535 if fieldname in table: 1536 default = table[fieldname].default 1537 if not default is None: 1538 newquery = table[fieldname]==default 1539 if query is None: 1540 query = newquery 1541 else: 1542 query = query&newquery 1543 return query
1544 1545 ################################################################################### 1546 # List of all the available adapters, they all extend BaseAdapter 1547 ################################################################################### 1548
1549 -class SQLiteAdapter(BaseAdapter):
1550 1551 driver = globals().get('sqlite3',None) 1552
1553 - def EXTRACT(self,field,what):
1554 return "web2py_extract('%s',%s)" % (what,self.expand(field))
1555 1556 @staticmethod
1557 - def web2py_extract(lookup, s):
1558 table = { 1559 'year': (0, 4), 1560 'month': (5, 7), 1561 'day': (8, 10), 1562 'hour': (11, 13), 1563 'minute': (14, 16), 1564 'second': (17, 19), 1565 } 1566 try: 1567 (i, j) = table[lookup] 1568 return int(s[i:j]) 1569 except: 1570 return None
1571
1572 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1573 credential_decoder=lambda x:x, driver_args={}, 1574 adapter_args={}):
1575 self.db = db 1576 self.dbengine = "sqlite" 1577 self.uri = uri 1578 self.pool_size = pool_size 1579 self.folder = folder 1580 self.db_codec = db_codec 1581 self.find_or_make_work_folder() 1582 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 1583 if uri.startswith('sqlite:memory'): 1584 dbpath = ':memory:' 1585 else: 1586 dbpath = uri.split('://')[1] 1587 if dbpath[0] != '/': 1588 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) 1589 if not 'check_same_thread' in driver_args: 1590 driver_args['check_same_thread'] = False 1591 if not 'detect_types' in driver_args: 1592 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 1593 def connect(dbpath=dbpath, driver_args=driver_args): 1594 return self.driver.Connection(dbpath, **driver_args)
1595 self.pool_connection(connect) 1596 self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
1597
1598 - def _truncate(self,table,mode = ''):
1599 tablename = table._tablename 1600 return ['DELETE FROM %s;' % tablename, 1601 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
1602
1603 - def lastrowid(self,table):
1604 return self.cursor.lastrowid
1605 1606
1607 -class JDBCSQLiteAdapter(SQLiteAdapter):
1608 1609 driver = globals().get('zxJDBC',None) 1610
1611 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1612 credential_decoder=lambda x:x, driver_args={}, 1613 adapter_args={}):
1614 self.db = db 1615 self.dbengine = "sqlite" 1616 self.uri = uri 1617 self.pool_size = pool_size 1618 self.folder = folder 1619 self.db_codec = db_codec 1620 self.find_or_make_work_folder() 1621 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 1622 if uri.startswith('sqlite:memory'): 1623 dbpath = ':memory:' 1624 else: 1625 dbpath = uri.split('://')[1] 1626 if dbpath[0] != '/': 1627 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) 1628 def connect(dbpath=dbpath,driver_args=driver_args): 1629 return self.driver.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath),**driver_args)
1630 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 1631 # self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) 1632
1633 - def execute(self,a):
1634 return self.log_execute(a) 1635 1636
1637 -class MySQLAdapter(BaseAdapter):
1638 1639 driver = globals().get('pymysql',None) 1640 maxcharlength = 255 1641 commit_on_alter_table = True 1642 support_distributed_transaction = True 1643 types = { 1644 'boolean': 'CHAR(1)', 1645 'string': 'VARCHAR(%(length)s)', 1646 'text': 'LONGTEXT', 1647 'password': 'VARCHAR(%(length)s)', 1648 'blob': 'LONGBLOB', 1649 'upload': 'VARCHAR(%(length)s)', 1650 'integer': 'INT', 1651 'double': 'DOUBLE', 1652 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1653 'date': 'DATE', 1654 'time': 'TIME', 1655 'datetime': 'DATETIME', 1656 'id': 'INT AUTO_INCREMENT NOT NULL', 1657 'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1658 'list:integer': 'LONGTEXT', 1659 'list:string': 'LONGTEXT', 1660 'list:reference': 'LONGTEXT', 1661 } 1662
1663 - def RANDOM(self):
1664 return 'RAND()'
1665
1666 - def SUBSTRING(self,field,parameters):
1667 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1668
1669 - def _drop(self,table,mode):
1670 # breaks db integrity but without this mysql does not drop table 1671 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;']
1672
1673 - def distributed_transaction_begin(self,key):
1674 self.execute('XA START;')
1675
1676 - def prepare(self,key):
1677 self.execute("XA END;") 1678 self.execute("XA PREPARE;")
1679
1680 - def commit_prepared(self,ley):
1681 self.execute("XA COMMIT;")
1682
1683 - def rollback_prepared(self,key):
1684 self.execute("XA ROLLBACK;")
1685
1686 - def concat_add(self,table):
1687 return '; ALTER TABLE %s ADD ' % table
1688
1689 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1690 credential_decoder=lambda x:x, driver_args={}, 1691 adapter_args={}):
1692 self.db = db 1693 self.dbengine = "mysql" 1694 self.uri = uri 1695 self.pool_size = pool_size 1696 self.folder = folder 1697 self.db_codec = db_codec 1698 self.find_or_make_work_folder() 1699 uri = uri.split('://')[1] 1700 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 1701 if not m: 1702 raise SyntaxError, \ 1703 "Invalid URI string in DAL: %s" % self.uri 1704 user = credential_decoder(m.group('user')) 1705 if not user: 1706 raise SyntaxError, 'User required' 1707 password = credential_decoder(m.group('password')) 1708 if not password: 1709 password = '' 1710 host = m.group('host') 1711 if not host: 1712 raise SyntaxError, 'Host name required' 1713 db = m.group('db') 1714 if not db: 1715 raise SyntaxError, 'Database name required' 1716 port = int(m.group('port') or '3306') 1717 charset = m.group('charset') or 'utf8' 1718 driver_args.update(dict(db=db, 1719 user=credential_decoder(user), 1720 passwd=credential_decoder(password), 1721 host=host, 1722 port=port, 1723 charset=charset)) 1724 def connect(driver_args=driver_args): 1725 return self.driver.connect(**driver_args)
1726 self.pool_connection(connect) 1727 self.execute('SET FOREIGN_KEY_CHECKS=1;') 1728 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
1729
1730 - def lastrowid(self,table):
1731 self.execute('select last_insert_id();') 1732 return int(self.cursor.fetchone()[0])
1733
1734 -class PostgreSQLAdapter(BaseAdapter):
1735 1736 driver = globals().get('psycopg2',None) 1737 1738 support_distributed_transaction = True 1739 types = { 1740 'boolean': 'CHAR(1)', 1741 'string': 'VARCHAR(%(length)s)', 1742 'text': 'TEXT', 1743 'password': 'VARCHAR(%(length)s)', 1744 'blob': 'BYTEA', 1745 'upload': 'VARCHAR(%(length)s)', 1746 'integer': 'INTEGER', 1747 'double': 'FLOAT8', 1748 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1749 'date': 'DATE', 1750 'time': 'TIME', 1751 'datetime': 'TIMESTAMP', 1752 'id': 'SERIAL PRIMARY KEY', 1753 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1754 'list:integer': 'TEXT', 1755 'list:string': 'TEXT', 1756 'list:reference': 'TEXT', 1757 } 1758
1759 - def adapt(self,obj):
1760 return psycopg2_adapt(obj).getquoted()
1761
1762 - def sequence_name(self,table):
1763 return '%s_id_Seq' % table
1764
1765 - def RANDOM(self):
1766 return 'RANDOM()'
1767
1768 - def distributed_transaction_begin(self,key):
1769 return
1770
1771 - def prepare(self,key):
1772 self.execute("PREPARE TRANSACTION '%s';" % key)
1773
1774 - def commit_prepared(self,key):
1775 self.execute("COMMIT PREPARED '%s';" % key)
1776
1777 - def rollback_prepared(self,key):
1778 self.execute("ROLLBACK PREPARED '%s';" % key)
1779
1780 - def create_sequence_and_triggers(self, query, table, **args):
1781 # following lines should only be executed if table._sequence_name does not exist 1782 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 1783 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 1784 # % (table._tablename, table._fieldname, table._sequence_name)) 1785 self.execute(query)
1786
1787 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1788 credential_decoder=lambda x:x, driver_args={}, 1789 adapter_args={}):
1790 self.db = db 1791 self.dbengine = "postgres" 1792 self.uri = uri 1793 self.pool_size = pool_size 1794 self.folder = folder 1795 self.db_codec = db_codec 1796 self.find_or_make_work_folder() 1797 uri = uri.split('://')[1] 1798 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 1799 if not m: 1800 raise SyntaxError, "Invalid URI string in DAL" 1801 user = credential_decoder(m.group('user')) 1802 if not user: 1803 raise SyntaxError, 'User required' 1804 password = credential_decoder(m.group('password')) 1805 if not password: 1806 password = '' 1807 host = m.group('host') 1808 if not host: 1809 raise SyntaxError, 'Host name required' 1810 db = m.group('db') 1811 if not db: 1812 raise SyntaxError, 'Database name required' 1813 port = m.group('port') or '5432' 1814 sslmode = m.group('sslmode') 1815 if sslmode: 1816 msg = ("dbname='%s' user='%s' host='%s'" 1817 "port=%s password='%s' sslmode='%s'") \ 1818 % (db, user, host, port, password, sslmode) 1819 else: 1820 msg = ("dbname='%s' user='%s' host='%s'" 1821 "port=%s password='%s'") \ 1822 % (db, user, host, port, password) 1823 def connect(msg=msg,driver_args=driver_args): 1824 return self.driver.connect(msg,**driver_args)
1825 self.pool_connection(connect) 1826 self.connection.set_client_encoding('UTF8') 1827 self.execute("SET standard_conforming_strings=on;")
1828
1829 - def lastrowid(self,table):
1830 self.execute("select currval('%s')" % table._sequence_name) 1831 return int(self.cursor.fetchone()[0])
1832
1833 - def LIKE(self,first,second):
1834 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second,'string'))
1835
1836 - def STARTSWITH(self,first,second):
1837 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
1838
1839 - def ENDSWITH(self,first,second):
1840 return '(%s ILIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
1841
1842 - def CONTAINS(self,first,second):
1843 if first.type in ('string','text'): 1844 key = '%'+str(second).replace('%','%%')+'%' 1845 elif first.type.startswith('list:'): 1846 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 1847 return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string'))
1848
1849 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
1850
1851 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1852 credential_decoder=lambda x:x, driver_args={}, 1853 adapter_args={}):
1854 self.db = db 1855 self.dbengine = "postgres" 1856 self.uri = uri 1857 self.pool_size = pool_size 1858 self.folder = folder 1859 self.db_codec = db_codec 1860 self.find_or_make_work_folder() 1861 uri = uri.split('://')[1] 1862 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 1863 if not m: 1864 raise SyntaxError, "Invalid URI string in DAL" 1865 user = credential_decoder(m.group('user')) 1866 if not user: 1867 raise SyntaxError, 'User required' 1868 password = credential_decoder(m.group('password')) 1869 if not password: 1870 password = '' 1871 host = m.group('host') 1872 if not host: 1873 raise SyntaxError, 'Host name required' 1874 db = m.group('db') 1875 if not db: 1876 raise SyntaxError, 'Database name required' 1877 port = m.group('port') or '5432' 1878 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 1879 def connect(msg=msg,driver_args=driver_args): 1880 return self.driver.connect(*msg,**driver_args)
1881 self.pool_connection(connect) 1882 self.connection.set_client_encoding('UTF8') 1883 self.execute('BEGIN;') 1884 self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
1885 1886
1887 -class OracleAdapter(BaseAdapter):
1888 1889 driver = globals().get('cx_Oracle',None) 1890 1891 commit_on_alter_table = False 1892 types = { 1893 'boolean': 'CHAR(1)', 1894 'string': 'VARCHAR2(%(length)s)', 1895 'text': 'CLOB', 1896 'password': 'VARCHAR2(%(length)s)', 1897 'blob': 'CLOB', 1898 'upload': 'VARCHAR2(%(length)s)', 1899 'integer': 'INT', 1900 'double': 'FLOAT', 1901 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1902 'date': 'DATE', 1903 'time': 'CHAR(8)', 1904 'datetime': 'DATE', 1905 'id': 'NUMBER PRIMARY KEY', 1906 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1907 'list:integer': 'CLOB', 1908 'list:string': 'CLOB', 1909 'list:reference': 'CLOB', 1910 } 1911
1912 - def sequence_name(self,tablename):
1913 return '%s_sequence' % tablename
1914
1915 - def trigger_name(self,tablename):
1916 return '%s_trigger' % tablename
1917
1918 - def LEFT_JOIN(self):
1919 return 'LEFT OUTER JOIN'
1920
1921 - def RANDOM(self):
1922 return 'dbms_random.value'
1923
1924 - def NOT_NULL(self,default,field_type):
1925 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
1926
1927 - def _drop(self,table,mode):
1928 sequence_name = table._sequence_name 1929 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
1930
1931 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1932 if limitby: 1933 (lmin, lmax) = limitby 1934 if len(sql_w) > 1: 1935 sql_w_row = sql_w + ' AND w_row > %i' % lmin 1936 else: 1937 sql_w_row = 'WHERE w_row > %i' % lmin 1938 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 1939 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1940
1941 - def constraint_name(self, tablename, fieldname):
1942 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 1943 if len(constraint_name)>30: 1944 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 1945 return constraint_name
1946
1947 - def represent_exceptions(self, obj, fieldtype):
1948 if fieldtype == 'blob': 1949 obj = base64.b64encode(str(obj)) 1950 return ":CLOB('%s')" % obj 1951 elif fieldtype == 'date': 1952 if isinstance(obj, (datetime.date, datetime.datetime)): 1953 obj = obj.isoformat()[:10] 1954 else: 1955 obj = str(obj) 1956 return "to_date('%s','yyyy-mm-dd')" % obj 1957 elif fieldtype == 'datetime': 1958 if isinstance(obj, datetime.datetime): 1959 obj = obj.isoformat()[:19].replace('T',' ') 1960 elif isinstance(obj, datetime.date): 1961 obj = obj.isoformat()[:10]+' 00:00:00' 1962 else: 1963 obj = str(obj) 1964 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 1965 return None
1966
1967 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1968 credential_decoder=lambda x:x, driver_args={}, 1969 adapter_args={}):
1970 self.db = db 1971 self.dbengine = "oracle" 1972 self.uri = uri 1973 self.pool_size = pool_size 1974 self.folder = folder 1975 self.db_codec = db_codec 1976 self.find_or_make_work_folder() 1977 uri = uri.split('://')[1] 1978 if not 'threaded' in driver_args: 1979 driver_args['threaded']=True 1980 def connect(uri=uri,driver_args=driver_args): 1981 return self.driver.connect(uri,**driver_args)
1982 self.pool_connection(connect) 1983 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 1984 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
1985 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 1986
1987 - def execute(self, command):
1988 args = [] 1989 i = 1 1990 while True: 1991 m = self.oracle_fix.match(command) 1992 if not m: 1993 break 1994 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 1995 args.append(m.group('clob')[6:-2].replace("''", "'")) 1996 i += 1 1997 if command[-1:]==';': 1998 command = command[:-1] 1999 return self.log_execute(command, args)
2000
2001 - def create_sequence_and_triggers(self, query, table, **args):
2002 tablename = table._tablename 2003 sequence_name = table._sequence_name 2004 trigger_name = table._trigger_name 2005 self.execute(query) 2006 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name) 2007 self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name))
2008
2009 - def lastrowid(self,table):
2010 sequence_name = table._sequence_name 2011 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 2012 return int(self.cursor.fetchone()[0])
2013 2014
2015 -class MSSQLAdapter(BaseAdapter):
2016 2017 driver = globals().get('pyodbc',None) 2018 2019 types = { 2020 'boolean': 'BIT', 2021 'string': 'VARCHAR(%(length)s)', 2022 'text': 'TEXT', 2023 'password': 'VARCHAR(%(length)s)', 2024 'blob': 'IMAGE', 2025 'upload': 'VARCHAR(%(length)s)', 2026 'integer': 'INT', 2027 'double': 'FLOAT', 2028 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2029 'date': 'DATETIME', 2030 'time': 'CHAR(8)', 2031 'datetime': 'DATETIME', 2032 'id': 'INT IDENTITY PRIMARY KEY', 2033 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2034 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2035 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2036 'list:integer': 'TEXT', 2037 'list:string': 'TEXT', 2038 'list:reference': 'TEXT', 2039 } 2040
2041 - def EXTRACT(self,field,what):
2042 return "DATEPART(%s,%s)" % (what, self.expand(field))
2043
2044 - def LEFT_JOIN(self):
2045 return 'LEFT OUTER JOIN'
2046
2047 - def RANDOM(self):
2048 return 'NEWID()'
2049
2050 - def ALLOW_NULL(self):
2051 return ' NULL'
2052
2053 - def SUBSTRING(self,field,parameters):
2054 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
2055
2056 - def PRIMARY_KEY(self,key):
2057 return 'PRIMARY KEY CLUSTERED (%s)' % key
2058
2059 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2060 if limitby: 2061 (lmin, lmax) = limitby 2062 sql_s += ' TOP %i' % lmax 2063 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2064
2065 - def represent_exceptions(self, obj, fieldtype):
2066 if fieldtype == 'boolean': 2067 if obj and not str(obj)[0].upper() == 'F': 2068 return '1' 2069 else: 2070 return '0' 2071 return None
2072
2073 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2074 credential_decoder=lambda x:x, driver_args={}, 2075 adapter_args={}, fake_connect=False):
2076 self.db = db 2077 self.dbengine = "mssql" 2078 self.uri = uri 2079 self.pool_size = pool_size 2080 self.folder = folder 2081 self.db_codec = db_codec 2082 self.find_or_make_work_folder() 2083 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 2084 uri = uri.split('://')[1] 2085 if '@' not in uri: 2086 try: 2087 m = re.compile('^(?P<dsn>.+)$').match(uri) 2088 if not m: 2089 raise SyntaxError, \ 2090 'Parsing uri string(%s) has no result' % self.uri 2091 dsn = m.group('dsn') 2092 if not dsn: 2093 raise SyntaxError, 'DSN required' 2094 except SyntaxError, e: 2095 logger.error('NdGpatch error') 2096 raise e 2097 cnxn = 'DSN=%s' % dsn 2098 else: 2099 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri) 2100 if not m: 2101 raise SyntaxError, \ 2102 "Invalid URI string in DAL: %s" % uri 2103 user = credential_decoder(m.group('user')) 2104 if not user: 2105 raise SyntaxError, 'User required' 2106 password = credential_decoder(m.group('password')) 2107 if not password: 2108 password = '' 2109 host = m.group('host') 2110 if not host: 2111 raise SyntaxError, 'Host name required' 2112 db = m.group('db') 2113 if not db: 2114 raise SyntaxError, 'Database name required' 2115 port = m.group('port') or '1433' 2116 # Parse the optional url name-value arg pairs after the '?' 2117 # (in the form of arg1=value1&arg2=value2&...) 2118 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 2119 argsdict = { 'DRIVER':'{SQL Server}' } 2120 urlargs = m.group('urlargs') or '' 2121 argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 2122 for argmatch in argpattern.finditer(urlargs): 2123 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 2124 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()]) 2125 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 2126 % (host, port, db, user, password, urlargs) 2127 def connect(cnxn=cnxn,driver_args=driver_args): 2128 return self.driver.connect(cnxn,**driver_args)
2129 if not fake_connect: 2130 self.pool_connection(connect)
2131
2132 - def lastrowid(self,table):
2133 #self.execute('SELECT @@IDENTITY;') 2134 self.execute('SELECT SCOPE_IDENTITY();') 2135 return int(self.cursor.fetchone()[0])
2136
2137 - def integrity_error_class(self):
2138 return pyodbc.IntegrityError
2139
2140 - def rowslice(self,rows,minimum=0,maximum=None):
2141 if maximum is None: 2142 return rows[minimum:] 2143 return rows[minimum:maximum]
2144 2145
2146 -class MSSQL2Adapter(MSSQLAdapter):
2147 types = { 2148 'boolean': 'CHAR(1)', 2149 'string': 'NVARCHAR(%(length)s)', 2150 'text': 'NTEXT', 2151 'password': 'NVARCHAR(%(length)s)', 2152 'blob': 'IMAGE', 2153 'upload': 'NVARCHAR(%(length)s)', 2154 'integer': 'INT', 2155 'double': 'FLOAT', 2156 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2157 'date': 'DATETIME', 2158 'time': 'CHAR(8)', 2159 'datetime': 'DATETIME', 2160 'id': 'INT IDENTITY PRIMARY KEY', 2161 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2162 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2163 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2164 'list:integer': 'NTEXT', 2165 'list:string': 'NTEXT', 2166 'list:reference': 'NTEXT', 2167 } 2168
2169 - def represent(self, obj, fieldtype):
2170 value = BaseAdapter.represent(self, obj, fieldtype) 2171 if (fieldtype == 'string' or fieldtype == 'text') and value[:1]=="'": 2172 value = 'N'+value 2173 return value
2174
2175 - def execute(self,a):
2176 return self.log_execute(a.decode('utf8'))
2177 2178
2179 -class FireBirdAdapter(BaseAdapter):
2180 2181 driver = globals().get('pyodbc',None) 2182 2183 commit_on_alter_table = False 2184 support_distributed_transaction = True 2185 types = { 2186 'boolean': 'CHAR(1)', 2187 'string': 'VARCHAR(%(length)s)', 2188 'text': 'BLOB SUB_TYPE 1', 2189 'password': 'VARCHAR(%(length)s)', 2190 'blob': 'BLOB SUB_TYPE 0', 2191 'upload': 'VARCHAR(%(length)s)', 2192 'integer': 'INTEGER', 2193 'double': 'DOUBLE PRECISION', 2194 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 2195 'date': 'DATE', 2196 'time': 'TIME', 2197 'datetime': 'TIMESTAMP', 2198 'id': 'INTEGER PRIMARY KEY', 2199 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2200 'list:integer': 'BLOB SUB_TYPE 1', 2201 'list:string': 'BLOB SUB_TYPE 1', 2202 'list:reference': 'BLOB SUB_TYPE 1', 2203 } 2204
2205 - def sequence_name(self,tablename):
2206 return 'genid_%s' % tablename
2207
2208 - def trigger_name(self,tablename):
2209 return 'trg_id_%s' % tablename
2210
2211 - def RANDOM(self):
2212 return 'RAND()'
2213
2214 - def NOT_NULL(self,default,field_type):
2215 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2216
2217 - def SUBSTRING(self,field,parameters):
2218 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
2219
2220 - def _drop(self,table,mode):
2221 sequence_name = table._sequence_name 2222 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
2223
2224 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2225 if limitby: 2226 (lmin, lmax) = limitby 2227 sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin) 2228 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2229
2230 - def _truncate(self,table,mode = ''):
2231 return ['DELETE FROM %s;' % table._tablename, 2232 'SET GENERATOR %s TO 0;' % table._sequence_name]
2233
2234 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2235 credential_decoder=lambda x:x, driver_args={}, 2236 adapter_args={}):
2237 self.db = db 2238 self.dbengine = "firebird" 2239 self.uri = uri 2240 self.pool_size = pool_size 2241 self.folder = folder 2242 self.db_codec = db_codec 2243 self.find_or_make_work_folder() 2244 uri = uri.split('://')[1] 2245 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2246 if not m: 2247 raise SyntaxError, "Invalid URI string in DAL: %s" % uri 2248 user = credential_decoder(m.group('user')) 2249 if not user: 2250 raise SyntaxError, 'User required' 2251 password = credential_decoder(m.group('password')) 2252 if not password: 2253 password = '' 2254 host = m.group('host') 2255 if not host: 2256 raise SyntaxError, 'Host name required' 2257 port = int(m.group('port') or 3050) 2258 db = m.group('db') 2259 if not db: 2260 raise SyntaxError, 'Database name required' 2261 charset = m.group('charset') or 'UTF8' 2262 driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db), 2263 user = credential_decoder(user), 2264 password = credential_decoder(password), 2265 charset = charset)) 2266 if adapter_args.has_key('driver_name'): 2267 if adapter_args['driver_name'] == 'kinterbasdb': 2268 self.driver = kinterbasdb 2269 elif adapter_args['driver_name'] == 'firebirdsql': 2270 self.driver = firebirdsql 2271 else: 2272 self.driver = kinterbasdb 2273 def connect(driver_args=driver_args): 2274 return self.driver.connect(**driver_args)
2275 self.pool_connection(connect)
2276
2277 - def create_sequence_and_triggers(self, query, table, **args):
2278 tablename = table._tablename 2279 sequence_name = table._sequence_name 2280 trigger_name = table._trigger_name 2281 self.execute(query) 2282 self.execute('create generator %s;' % sequence_name) 2283 self.execute('set generator %s to 0;' % sequence_name) 2284 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
2285
2286 - def lastrowid(self,table):
2287 sequence_name = table._sequence_name 2288 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 2289 return int(self.cursor.fetchone()[0])
2290 2291
2292 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
2293
2294 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2295 credential_decoder=lambda x:x, driver_args={}, 2296 adapter_args={}):
2297 self.db = db 2298 self.dbengine = "firebird" 2299 self.uri = uri 2300 self.pool_size = pool_size 2301 self.folder = folder 2302 self.db_codec = db_codec 2303 self.find_or_make_work_folder() 2304 uri = uri.split('://')[1] 2305 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2306 if not m: 2307 raise SyntaxError, \ 2308 "Invalid URI string in DAL: %s" % self.uri 2309 user = credential_decoder(m.group('user')) 2310 if not user: 2311 raise SyntaxError, 'User required' 2312 password = credential_decoder(m.group('password')) 2313 if not password: 2314 password = '' 2315 pathdb = m.group('path') 2316 if not pathdb: 2317 raise SyntaxError, 'Path required' 2318 charset = m.group('charset') 2319 if not charset: 2320 charset = 'UTF8' 2321 host = '' 2322 driver_args.update(dict(host=host, 2323 database=pathdb, 2324 user=credential_decoder(user), 2325 password=credential_decoder(password), 2326 charset=charset)) 2327 #def connect(driver_args=driver_args): 2328 # return kinterbasdb.connect(**driver_args) 2329 if adapter_args.has_key('driver_name'): 2330 if adapter_args['driver_name'] == 'kinterbasdb': 2331 self.driver = kinterbasdb 2332 elif adapter_args['driver_name'] == 'firebirdsql': 2333 self.driver = firebirdsql 2334 else: 2335 self.driver = kinterbasdb 2336 def connect(driver_args=driver_args): 2337 return self.driver.connect(**driver_args)
2338 self.pool_connection(connect)
2339 2340
2341 -class InformixAdapter(BaseAdapter):
2342 2343 driver = globals().get('informixdb',None) 2344 2345 types = { 2346 'boolean': 'CHAR(1)', 2347 'string': 'VARCHAR(%(length)s)', 2348 'text': 'BLOB SUB_TYPE 1', 2349 'password': 'VARCHAR(%(length)s)', 2350 'blob': 'BLOB SUB_TYPE 0', 2351 'upload': 'VARCHAR(%(length)s)', 2352 'integer': 'INTEGER', 2353 'double': 'FLOAT', 2354 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2355 'date': 'DATE', 2356 'time': 'CHAR(8)', 2357 'datetime': 'DATETIME', 2358 'id': 'SERIAL', 2359 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2360 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 2361 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 2362 'list:integer': 'BLOB SUB_TYPE 1', 2363 'list:string': 'BLOB SUB_TYPE 1', 2364 'list:reference': 'BLOB SUB_TYPE 1', 2365 } 2366
2367 - def RANDOM(self):
2368 return 'Random()'
2369
2370 - def NOT_NULL(self,default,field_type):
2371 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2372
2373 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2374 if limitby: 2375 (lmin, lmax) = limitby 2376 fetch_amt = lmax - lmin 2377 dbms_version = int(self.connection.dbms_version.split('.')[0]) 2378 if lmin and (dbms_version >= 10): 2379 # Requires Informix 10.0+ 2380 sql_s += ' SKIP %d' % (lmin, ) 2381 if fetch_amt and (dbms_version >= 9): 2382 # Requires Informix 9.0+ 2383 sql_s += ' FIRST %d' % (fetch_amt, ) 2384 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2385
2386 - def represent_exceptions(self, obj, fieldtype):
2387 if fieldtype == 'date': 2388 if isinstance(obj, (datetime.date, datetime.datetime)): 2389 obj = obj.isoformat()[:10] 2390 else: 2391 obj = str(obj) 2392 return "to_date('%s','yyyy-mm-dd')" % obj 2393 elif fieldtype == 'datetime': 2394 if isinstance(obj, datetime.datetime): 2395 obj = obj.isoformat()[:19].replace('T',' ') 2396 elif isinstance(obj, datetime.date): 2397 obj = obj.isoformat()[:10]+' 00:00:00' 2398 else: 2399 obj = str(obj) 2400 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2401 return None
2402
2403 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2404 credential_decoder=lambda x:x, driver_args={}, 2405 adapter_args={}):
2406 self.db = db 2407 self.dbengine = "informix" 2408 self.uri = uri 2409 self.pool_size = pool_size 2410 self.folder = folder 2411 self.db_codec = db_codec 2412 self.find_or_make_work_folder() 2413 uri = uri.split('://')[1] 2414 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 2415 if not m: 2416 raise SyntaxError, \ 2417 "Invalid URI string in DAL: %s" % self.uri 2418 user = credential_decoder(m.group('user')) 2419 if not user: 2420 raise SyntaxError, 'User required' 2421 password = credential_decoder(m.group('password')) 2422 if not password: 2423 password = '' 2424 host = m.group('host') 2425 if not host: 2426 raise SyntaxError, 'Host name required' 2427 db = m.group('db') 2428 if not db: 2429 raise SyntaxError, 'Database name required' 2430 user = credential_decoder(user) 2431 password = credential_decoder(password) 2432 dsn = '%s@%s' % (db,host) 2433 driver_args.update(dict(user=user,password=password,autocommit=True)) 2434 def connect(dsn=dsn,driver_args=driver_args): 2435 return self.driver.connect(dsn,**driver_args)
2436 self.pool_connection(connect)
2437
2438 - def execute(self,command):
2439 if command[-1:]==';': 2440 command = command[:-1] 2441 return self.log_execute(command)
2442
2443 - def lastrowid(self,table):
2444 return self.cursor.sqlerrd[1]
2445
2446 - def integrity_error_class(self):
2447 return informixdb.IntegrityError
2448 2449
2450 -class DB2Adapter(BaseAdapter):
2451 2452 driver = globals().get('pyodbc',None) 2453 2454 types = { 2455 'boolean': 'CHAR(1)', 2456 'string': 'VARCHAR(%(length)s)', 2457 'text': 'CLOB', 2458 'password': 'VARCHAR(%(length)s)', 2459 'blob': 'BLOB', 2460 'upload': 'VARCHAR(%(length)s)', 2461 'integer': 'INT', 2462 'double': 'DOUBLE', 2463 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2464 'date': 'DATE', 2465 'time': 'TIME', 2466 'datetime': 'TIMESTAMP', 2467 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 2468 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2469 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2470 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2471 'list:integer': 'CLOB', 2472 'list:string': 'CLOB', 2473 'list:reference': 'CLOB', 2474 } 2475
2476 - def LEFT_JOIN(self):
2477 return 'LEFT OUTER JOIN'
2478
2479 - def RANDOM(self):
2480 return 'RAND()'
2481
2482 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2483 if limitby: 2484 (lmin, lmax) = limitby 2485 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 2486 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2487
2488 - def represent_exceptions(self, obj, fieldtype):
2489 if fieldtype == 'blob': 2490 obj = base64.b64encode(str(obj)) 2491 return "BLOB('%s')" % obj 2492 elif fieldtype == 'datetime': 2493 if isinstance(obj, datetime.datetime): 2494 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 2495 elif isinstance(obj, datetime.date): 2496 obj = obj.isoformat()[:10]+'-00.00.00' 2497 return "'%s'" % obj 2498 return None
2499
2500 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2501 credential_decoder=lambda x:x, driver_args={}, 2502 adapter_args={}):
2503 self.db = db 2504 self.dbengine = "db2" 2505 self.uri = uri 2506 self.pool_size = pool_size 2507 self.folder = folder 2508 self.db_codec = db_codec 2509 self.find_or_make_work_folder() 2510 cnxn = uri.split('://', 1)[1] 2511 def connect(cnxn=cnxn,driver_args=driver_args): 2512 return self.driver.connect(cnxn,**driver_args)
2513 self.pool_connection(connect)
2514
2515 - def execute(self,command):
2516 if command[-1:]==';': 2517 command = command[:-1] 2518 return self.log_execute(command)
2519
2520 - def lastrowid(self,table):
2521 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 2522 return int(self.cursor.fetchone()[0])
2523
2524 - def rowslice(self,rows,minimum=0,maximum=None):
2525 if maximum is None: 2526 return rows[minimum:] 2527 return rows[minimum:maximum]
2528 2529
2530 -class TeradataAdapter(DB2Adapter):
2531 2532 driver = globals().get('pyodbc',None) 2533 2534 types = { 2535 'boolean': 'CHAR(1)', 2536 'string': 'VARCHAR(%(length)s)', 2537 'text': 'CLOB', 2538 'password': 'VARCHAR(%(length)s)', 2539 'blob': 'BLOB', 2540 'upload': 'VARCHAR(%(length)s)', 2541 'integer': 'INT', 2542 'double': 'DOUBLE', 2543 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2544 'date': 'DATE', 2545 'time': 'TIME', 2546 'datetime': 'TIMESTAMP', 2547 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 2548 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2549 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2550 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2551 'list:integer': 'CLOB', 2552 'list:string': 'CLOB', 2553 'list:reference': 'CLOB', 2554 } 2555 2556
2557 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2558 credential_decoder=lambda x:x, driver_args={}, 2559 adapter_args={}):
2560 self.db = db 2561 self.dbengine = "teradata" 2562 self.uri = uri 2563 self.pool_size = pool_size 2564 self.folder = folder 2565 self.db_codec = db_codec 2566 self.find_or_make_work_folder() 2567 cnxn = uri.split('://', 1)[1] 2568 def connect(cnxn=cnxn,driver_args=driver_args): 2569 return self.driver.connect(cnxn,**driver_args)
2570 self.pool_connection(connect)
2571 2572 2573 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name 2574 # (ANSI-SQL wants this form of name 2575 # to be a delimited identifier) 2576
2577 -class IngresAdapter(BaseAdapter):
2578 2579 driver = globals().get('ingresdbi',None) 2580 2581 types = { 2582 'boolean': 'CHAR(1)', 2583 'string': 'VARCHAR(%(length)s)', 2584 'text': 'CLOB', 2585 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2586 'blob': 'BLOB', 2587 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2588 'integer': 'INTEGER4', # or int8... 2589 'double': 'FLOAT8', 2590 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2591 'date': 'ANSIDATE', 2592 'time': 'TIME WITHOUT TIME ZONE', 2593 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2594 'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME, 2595 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2596 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2597 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2598 'list:integer': 'CLOB', 2599 'list:string': 'CLOB', 2600 'list:reference': 'CLOB', 2601 } 2602
2603 - def LEFT_JOIN(self):
2604 return 'LEFT OUTER JOIN'
2605
2606 - def RANDOM(self):
2607 return 'RANDOM()'
2608
2609 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2610 if limitby: 2611 (lmin, lmax) = limitby 2612 fetch_amt = lmax - lmin 2613 if fetch_amt: 2614 sql_s += ' FIRST %d ' % (fetch_amt, ) 2615 if lmin: 2616 # Requires Ingres 9.2+ 2617 sql_o += ' OFFSET %d' % (lmin, ) 2618 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2619
2620 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2621 credential_decoder=lambda x:x, driver_args={}, 2622 adapter_args={}):
2623 self.db = db 2624 self.dbengine = "ingres" 2625 self.uri = uri 2626 self.pool_size = pool_size 2627 self.folder = folder 2628 self.db_codec = db_codec 2629 self.find_or_make_work_folder() 2630 connstr = self._uri.split(':', 1)[1] 2631 # Simple URI processing 2632 connstr = connstr.lstrip() 2633 while connstr.startswith('/'): 2634 connstr = connstr[1:] 2635 database_name=connstr # Assume only (local) dbname is passed in 2636 vnode = '(local)' 2637 servertype = 'ingres' 2638 trace = (0, None) # No tracing 2639 driver_args.update(dict(database=database_name, 2640 vnode=vnode, 2641 servertype=servertype, 2642 trace=trace)) 2643 def connect(driver_args=driver_args): 2644 return self.driver.connect(**driver_args)
2645 self.pool_connection(connect)
2646
2647 - def create_sequence_and_triggers(self, query, table, **args):
2648 # post create table auto inc code (if needed) 2649 # modify table to btree for performance.... 2650 # Older Ingres releases could use rule/trigger like Oracle above. 2651 if hasattr(table,'_primarykey'): 2652 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 2653 (table._tablename, 2654 ', '.join(["'%s'" % x for x in table.primarykey])) 2655 self.execute(modify_tbl_sql) 2656 else: 2657 tmp_seqname='%s_iisq' % table._tablename 2658 query=query.replace(INGRES_SEQNAME, tmp_seqname) 2659 self.execute('create sequence %s' % tmp_seqname) 2660 self.execute(query) 2661 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
2662 2663
2664 - def lastrowid(self,table):
2665 tmp_seqname='%s_iisq' % table 2666 self.execute('select current value for %s' % tmp_seqname) 2667 return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
2668
2669 - def integrity_error_class(self):
2670 return ingresdbi.IntegrityError
2671 2672
2673 -class IngresUnicodeAdapter(IngresAdapter):
2674 types = { 2675 'boolean': 'CHAR(1)', 2676 'string': 'NVARCHAR(%(length)s)', 2677 'text': 'NCLOB', 2678 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2679 'blob': 'BLOB', 2680 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2681 'integer': 'INTEGER4', # or int8... 2682 'double': 'FLOAT8', 2683 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2684 'date': 'ANSIDATE', 2685 'time': 'TIME WITHOUT TIME ZONE', 2686 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2687 'id': 'integer4 not null unique with default next value for %s'% INGRES_SEQNAME, 2688 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2689 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2690 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2691 'list:integer': 'NCLOB', 2692 'list:string': 'NCLOB', 2693 'list:reference': 'NCLOB', 2694 }
2695
2696 -class SAPDBAdapter(BaseAdapter):
2697 2698 driver = globals().get('sapdb',None) 2699 support_distributed_transaction = False 2700 types = { 2701 'boolean': 'CHAR(1)', 2702 'string': 'VARCHAR(%(length)s)', 2703 'text': 'LONG', 2704 'password': 'VARCHAR(%(length)s)', 2705 'blob': 'LONG', 2706 'upload': 'VARCHAR(%(length)s)', 2707 'integer': 'INT', 2708 'double': 'FLOAT', 2709 'decimal': 'FIXED(%(precision)s,%(scale)s)', 2710 'date': 'DATE', 2711 'time': 'TIME', 2712 'datetime': 'TIMESTAMP', 2713 'id': 'INT PRIMARY KEY', 2714 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2715 'list:integer': 'LONG', 2716 'list:string': 'LONG', 2717 'list:reference': 'LONG', 2718 } 2719
2720 - def sequence_name(self,table):
2721 return '%s_id_Seq' % table
2722
2723 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2724 if limitby: 2725 (lmin, lmax) = limitby 2726 if len(sql_w) > 1: 2727 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2728 else: 2729 sql_w_row = 'WHERE w_row > %i' % lmin 2730 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2731 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2732
2733 - def create_sequence_and_triggers(self, query, table, **args):
2734 # following lines should only be executed if table._sequence_name does not exist 2735 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2736 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2737 % (table._tablename, table._id.name, table._sequence_name)) 2738 self.execute(query)
2739
2740 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2741 credential_decoder=lambda x:x, driver_args={}, 2742 adapter_args={}):
2743 self.db = db 2744 self.dbengine = "sapdb" 2745 self.uri = uri 2746 self.pool_size = pool_size 2747 self.folder = folder 2748 self.db_codec = db_codec 2749 self.find_or_make_work_folder() 2750 uri = uri.split('://')[1] 2751 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 2752 if not m: 2753 raise SyntaxError, "Invalid URI string in DAL" 2754 user = credential_decoder(m.group('user')) 2755 if not user: 2756 raise SyntaxError, 'User required' 2757 password = credential_decoder(m.group('password')) 2758 if not password: 2759 password = '' 2760 host = m.group('host') 2761 if not host: 2762 raise SyntaxError, 'Host name required' 2763 db = m.group('db') 2764 if not db: 2765 raise SyntaxError, 'Database name required' 2766 def connect(user=user,password=password,database=db, 2767 host=host,driver_args=driver_args): 2768 return self.driver.Connection(user,password,database, 2769 host,**driver_args)
2770 self.pool_connection(connect)
2771
2772 - def lastrowid(self,table):
2773 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 2774 return int(self.cursor.fetchone()[0])
2775
2776 -class CubridAdapter(MySQLAdapter):
2777 2778 driver = globals().get('cubriddb',None) 2779
2780 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2781 credential_decoder=lambda x:x, driver_args={}, 2782 adapter_args={}):
2783 self.db = db 2784 self.dbengine = "cubrid" 2785 self.uri = uri 2786 self.pool_size = pool_size 2787 self.folder = folder 2788 self.db_codec = db_codec 2789 self.find_or_make_work_folder() 2790 uri = uri.split('://')[1] 2791 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2792 if not m: 2793 raise SyntaxError, \ 2794 "Invalid URI string in DAL: %s" % self.uri 2795 user = credential_decoder(m.group('user')) 2796 if not user: 2797 raise SyntaxError, 'User required' 2798 password = credential_decoder(m.group('password')) 2799 if not password: 2800 password = '' 2801 host = m.group('host') 2802 if not host: 2803 raise SyntaxError, 'Host name required' 2804 db = m.group('db') 2805 if not db: 2806 raise SyntaxError, 'Database name required' 2807 port = int(m.group('port') or '30000') 2808 charset = m.group('charset') or 'utf8' 2809 user=credential_decoder(user), 2810 passwd=credential_decoder(password), 2811 def connect(host,port,db,user,passwd,driver_args=driver_args): 2812 return self.driver.connect(host,port,db,user,passwd,**driver_args)
2813 self.pool_connection(connect) 2814 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2815 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2816 2817 2818 ######## GAE MySQL ########## 2819
2820 -class DatabaseStoredFile:
2821 2822 web2py_filesystem = False 2823
2824 - def escape(self,obj):
2825 return self.db._adapter.esacpe(obj)
2826
2827 - def __init__(self,db,filename,mode):
2828 if db._adapter.dbengine != 'mysql': 2829 raise RuntimeError, "only MySQL can store metadata .table files in database for now" 2830 self.db = db 2831 self.filename = filename 2832 self.mode = mode 2833 if not self.web2py_filesystem: 2834 self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;") 2835 DatabaseStoredFile.web2py_filesystem = True 2836 self.p=0 2837 self.data = '' 2838 if mode in ('r','rw','a'): 2839 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 2840 % filename 2841 rows = self.db.executesql(query) 2842 if rows: 2843 self.data = rows[0][0] 2844 elif os.path.exists(filename): 2845 datafile = open(filename, 'r') 2846 try: 2847 self.data = datafile.read() 2848 finally: 2849 datafile.close() 2850 elif mode in ('r','rw'): 2851 raise RuntimeError, "File %s does not exist" % filename
2852
2853 - def read(self, bytes):
2854 data = self.data[self.p:self.p+bytes] 2855 self.p += len(data) 2856 return data
2857
2858 - def readline(self):
2859 i = self.data.find('\n',self.p)+1 2860 if i>0: 2861 data, self.p = self.data[self.p:i], i 2862 else: 2863 data, self.p = self.data[self.p:], len(self.data) 2864 return data
2865
2866 - def write(self,data):
2867 self.data += data
2868
2869 - def close(self):
2870 self.db.executesql("DELETE FROM web2py_filesystem WHERE path=%s" \ 2871 % self.adapt(self.filename)) 2872 query = "INSERT INTO web2py_filesystem(path,content) VALUES (%s,%s)"\ 2873 % (self.adapt(self.filename), self.adapt(self.data)) 2874 self.db.executesql(query) 2875 self.db.commit()
2876 2877 @staticmethod
2878 - def exists(db,filename):
2879 if os.path.exists(filename): 2880 return True 2881 query = "SELECT path FROM web2py_filesystem WHERE path=%s" \ 2882 % self.adapt(filename) 2883 if db.executesql(query): 2884 return True 2885 return False
2886 2887
2888 -class UseDatabaseStoredFile:
2889
2890 - def file_exists(self, filename):
2891 return DatabaseStoredFile.exists(self.db,filename)
2892
2893 - def file_open(self, filename, mode='rb', lock=True):
2894 return DatabaseStoredFile(self.db,filename,mode)
2895
2896 - def file_close(self, fileobj, unlock=True):
2897 fileobj.close()
2898
2899 - def file_delete(self,filename):
2900 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 2901 self.db.executesql(query) 2902 self.db.commit()
2903
2904 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
2905
2906 - def __init__(self, db, uri='google:sql://realm:domain/database', 2907 pool_size=0, folder=None, db_codec='UTF-8', 2908 credential_decoder = lambda x:x, driver_args={}, 2909 adapter_args={}):
2910 2911 self.db = db 2912 self.dbengine = "mysql" 2913 self.uri = uri 2914 self.pool_size = pool_size 2915 self.folder = folder 2916 self.db_codec = db_codec 2917 self.folder = folder or '$HOME/'+thread.folder.split('/applications/',1)[1] 2918 2919 m = re.compile('^(?P<instance>.*)/(?P<db>.*)$').match(self.uri[len('google:sql://'):]) 2920 if not m: 2921 raise SyntaxError, "Invalid URI string in SQLDB: %s" % self._uri 2922 instance = credential_decoder(m.group('instance')) 2923 db = credential_decoder(m.group('db')) 2924 driver_args['instance'] = instance 2925 createdb = adapter_args.get('createdb',True) 2926 if not createdb: 2927 driver_args['database'] = db 2928 def connect(driver_args=driver_args): 2929 return rdbms.connect(**driver_args)
2930 self.pool_connection(connect) 2931 if createdb: 2932 # self.execute('DROP DATABASE %s' % db) 2933 self.execute('CREATE DATABASE IF NOT EXISTS %s' % db) 2934 self.execute('USE %s' % db) 2935 self.execute("SET FOREIGN_KEY_CHECKS=1;") 2936 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2937
2938 -class NoSQLAdapter(BaseAdapter):
2939 2940 @staticmethod
2941 - def to_unicode(obj):
2942 if isinstance(obj, str): 2943 return obj.decode('utf8') 2944 elif not isinstance(obj, unicode): 2945 return unicode(obj) 2946 return obj
2947
2948 - def represent(self, obj, fieldtype):
2949 if isinstance(obj,CALLABLETYPES): 2950 obj = obj() 2951 if isinstance(fieldtype, SQLCustomType): 2952 return fieldtype.encoder(obj) 2953 if isinstance(obj, (Expression, Field)): 2954 raise SyntaxError, "non supported on GAE" 2955 if self.dbengine=='google:datastore' in globals(): 2956 if isinstance(fieldtype, gae.Property): 2957 return obj 2958 if fieldtype.startswith('list:'): 2959 if not obj: 2960 obj = [] 2961 if not isinstance(obj, (list, tuple)): 2962 obj = [obj] 2963 if obj == '' and not fieldtype[:2] in ['st','te','pa','up']: 2964 return None 2965 if not obj is None: 2966 if isinstance(obj, list) and not fieldtype.startswith('list'): 2967 obj = [self.represent(o, fieldtype) for o in obj] 2968 elif fieldtype in ('integer','id'): 2969 obj = long(obj) 2970 elif fieldtype == 'double': 2971 obj = float(obj) 2972 elif fieldtype.startswith('reference'): 2973 if isinstance(obj, (Row, Reference)): 2974 obj = obj['id'] 2975 obj = long(obj) 2976 elif fieldtype == 'boolean': 2977 if obj and not str(obj)[0].upper() == 'F': 2978 obj = True 2979 else: 2980 obj = False 2981 elif fieldtype == 'date': 2982 if not isinstance(obj, datetime.date): 2983 (y, m, d) = map(int,str(obj).strip().split('-')) 2984 obj = datetime.date(y, m, d) 2985 elif isinstance(obj,datetime.datetime): 2986 (y, m, d) = (obj.year, obj.month, obj.day) 2987 obj = datetime.date(y, m, d) 2988 elif fieldtype == 'time': 2989 if not isinstance(obj, datetime.time): 2990 time_items = map(int,str(obj).strip().split(':')[:3]) 2991 if len(time_items) == 3: 2992 (h, mi, s) = time_items 2993 else: 2994 (h, mi, s) = time_items + [0] 2995 obj = datetime.time(h, mi, s) 2996 elif fieldtype == 'datetime': 2997 if not isinstance(obj, datetime.datetime): 2998 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 2999 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 3000 while len(time_items)<3: 3001 time_items.append(0) 3002 (h, mi, s) = time_items 3003 obj = datetime.datetime(y, m, d, h, mi, s) 3004 elif fieldtype == 'blob': 3005 pass 3006 elif fieldtype.startswith('list:string'): 3007 return map(self.to_unicode,obj) 3008 elif fieldtype.startswith('list:'): 3009 return map(int,obj) 3010 else: 3011 obj = self.to_unicode(obj) 3012 return obj
3013
3014 - def _insert(self,table,fields):
3015 return 'insert %s in %s' % (fields, table)
3016
3017 - def _count(self,query,distinct=None):
3018 return 'count %s' % repr(query)
3019
3020 - def _select(self,query,fields,attributes):
3021 return 'select %s where %s' % (repr(fields), repr(query))
3022
3023 - def _delete(self,tablename, query):
3024 return 'delete %s where %s' % (repr(tablename),repr(query))
3025
3026 - def _update(self,tablename,query,fields):
3027 return 'update %s (%s) where %s' % (repr(tablename), 3028 repr(fields),repr(query))
3029
3030 - def commit(self):
3031 """ 3032 remember: no transactions on many NoSQL 3033 """ 3034 pass
3035
3036 - def rollback(self):
3037 """ 3038 remember: no transactions on many NoSQL 3039 """ 3040 pass
3041
3042 - def close(self):
3043 """ 3044 remember: no transactions on many NoSQL 3045 """ 3046 pass
3047 3048 3049 # these functions should never be called!
3050 - def OR(self,first,second): raise SyntaxError, "Not supported"
3051 - def AND(self,first,second): raise SyntaxError, "Not supported"
3052 - def AS(self,first,second): raise SyntaxError, "Not supported"
3053 - def ON(self,first,second): raise SyntaxError, "Not supported"
3054 - def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported"
3055 - def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported"
3056 - def ADD(self,first,second): raise SyntaxError, "Not supported"
3057 - def SUB(self,first,second): raise SyntaxError, "Not supported"
3058 - def MUL(self,first,second): raise SyntaxError, "Not supported"
3059 - def DIV(self,first,second): raise SyntaxError, "Not supported"
3060 - def LOWER(self,first): raise SyntaxError, "Not supported"
3061 - def UPPER(self,first): raise SyntaxError, "Not supported"
3062 - def EXTRACT(self,first,what): raise SyntaxError, "Not supported"
3063 - def AGGREGATE(self,first,what): raise SyntaxError, "Not supported"
3064 - def LEFT_JOIN(self): raise SyntaxError, "Not supported"
3065 - def RANDOM(self): raise SyntaxError, "Not supported"
3066 - def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported"
3067 - def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported"
3068 - def LIKE(self,first,second): raise SyntaxError, "Not supported"
3069 - def drop(self,table,mode): raise SyntaxError, "Not supported"
3070 - def alias(self,table,alias): raise SyntaxError, "Not supported"
3071 - def migrate_table(self,*a,**b): raise SyntaxError, "Not supported"
3072 - def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported"
3073 - def prepare(self,key): raise SyntaxError, "Not supported"
3074 - def commit_prepared(self,key): raise SyntaxError, "Not supported"
3075 - def rollback_prepared(self,key): raise SyntaxError, "Not supported"
3076 - def concat_add(self,table): raise SyntaxError, "Not supported"
3077 - def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported"
3078 - def create_sequence_and_triggers(self, query, table, **args): pass
3079 - def log_execute(self,*a,**b): raise SyntaxError, "Not supported"
3080 - def execute(self,*a,**b): raise SyntaxError, "Not supported"
3081 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported"
3082 - def lastrowid(self,table): raise SyntaxError, "Not supported"
3083 - def integrity_error_class(self): raise SyntaxError, "Not supported"
3084 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported"
3085 3086
3087 -class GAEF(object):
3088 - def __init__(self,name,op,value,apply):
3089 self.name=name=='id' and '__key__' or name 3090 self.op=op 3091 self.value=value 3092 self.apply=apply
3093 - def __repr__(self):
3094 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
3095
3096 -class GoogleDatastoreAdapter(NoSQLAdapter):
3097 uploads_in_blob = True 3098 types = {} 3099
3100 - def file_exists(self, filename): pass
3101 - def file_open(self, filename, mode='rb', lock=True): pass
3102 - def file_close(self, fileobj, unlock=True): pass
3103
3104 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3105 credential_decoder=lambda x:x, driver_args={}, 3106 adapter_args={}):
3107 self.types.update({ 3108 'boolean': gae.BooleanProperty, 3109 'string': (lambda: gae.StringProperty(multiline=True)), 3110 'text': gae.TextProperty, 3111 'password': gae.StringProperty, 3112 'blob': gae.BlobProperty, 3113 'upload': gae.StringProperty, 3114 'integer': gae.IntegerProperty, 3115 'double': gae.FloatProperty, 3116 'decimal': GAEDecimalProperty, 3117 'date': gae.DateProperty, 3118 'time': gae.TimeProperty, 3119 'datetime': gae.DateTimeProperty, 3120 'id': None, 3121 'reference': gae.IntegerProperty, 3122 'list:string': (lambda: gae.StringListProperty(default=None)), 3123 'list:integer': (lambda: gae.ListProperty(int,default=None)), 3124 'list:reference': (lambda: gae.ListProperty(int,default=None)), 3125 }) 3126 self.db = db 3127 self.uri = uri 3128 self.dbengine = 'google:datastore' 3129 self.folder = folder 3130 db['_lastsql'] = '' 3131 self.db_codec = 'UTF-8' 3132 self.pool_size = 0 3133 match = re.compile('.*://(?P<namespace>.+)').match(uri) 3134 if match: 3135 namespace_manager.set_namespace(match.group('namespace'))
3136
3137 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
3138 myfields = {} 3139 for k in table.fields: 3140 if isinstance(polymodel,Table) and k in polymodel.fields(): 3141 continue 3142 field = table[k] 3143 attr = {} 3144 if isinstance(field.type, SQLCustomType): 3145 ftype = self.types[field.type.native or field.type.type](**attr) 3146 elif isinstance(field.type, gae.Property): 3147 ftype = field.type 3148 elif field.type.startswith('id'): 3149 continue 3150 elif field.type.startswith('decimal'): 3151 precision, scale = field.type[7:].strip('()').split(',') 3152 precision = int(precision) 3153 scale = int(scale) 3154 ftype = GAEDecimalProperty(precision, scale, **attr) 3155 elif field.type.startswith('reference'): 3156 if field.notnull: 3157 attr = dict(required=True) 3158 referenced = field.type[10:].strip() 3159 ftype = self.types[field.type[:9]](table._db[referenced]) 3160 elif field.type.startswith('list:reference'): 3161 if field.notnull: 3162 attr = dict(required=True) 3163 referenced = field.type[15:].strip() 3164 ftype = self.types[field.type[:14]](**attr) 3165 elif field.type.startswith('list:'): 3166 ftype = self.types[field.type](**attr) 3167 elif not field.type in self.types\ 3168 or not self.types[field.type]: 3169 raise SyntaxError, 'Field: unknown field type: %s' % field.type 3170 else: 3171 ftype = self.types[field.type](**attr) 3172 myfields[field.name] = ftype 3173 if not polymodel: 3174 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 3175 elif polymodel==True: 3176 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 3177 elif isinstance(polymodel,Table): 3178 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 3179 else: 3180 raise SyntaxError, "polymodel must be None, True, a table or a tablename" 3181 return None
3182
3183 - def expand(self,expression,field_type=None):
3184 if isinstance(expression,Field): 3185 if expression.type in ('text','blob'): 3186 raise SyntaxError, 'AppEngine does not index by: %s' % expression.type 3187 return expression.name 3188 elif isinstance(expression, (Expression, Query)): 3189 if not expression.second is None: 3190 return expression.op(expression.first, expression.second) 3191 elif not expression.first is None: 3192 return expression.op(expression.first) 3193 else: 3194 return expression.op() 3195 elif field_type: 3196 return self.represent(expression,field_type) 3197 elif isinstance(expression,(list,tuple)): 3198 return ','.join([self.represent(item,field_type) for item in expression]) 3199 else: 3200 return str(expression)
3201 3202 ### TODO from gql.py Expression
3203 - def AND(self,first,second):
3204 a = self.expand(first) 3205 b = self.expand(second) 3206 if b[0].name=='__key__' and a[0].name!='__key__': 3207 return b+a 3208 return a+b
3209
3210 - def EQ(self,first,second=None):
3211 if isinstance(second, Key): 3212 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 3213 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
3214
3215 - def NE(self,first,second=None):
3216 if first.type != 'id': 3217 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 3218 else: 3219 second = Key.from_path(first._tablename, long(second)) 3220 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
3221
3222 - def LT(self,first,second=None):
3223 if first.type != 'id': 3224 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 3225 else: 3226 second = Key.from_path(first._tablename, long(second)) 3227 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
3228
3229 - def LE(self,first,second=None):
3230 if first.type != 'id': 3231 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 3232 else: 3233 second = Key.from_path(first._tablename, long(second)) 3234 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
3235
3236 - def GT(self,first,second=None):
3237 if first.type != 'id' or second==0 or second == '0': 3238 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 3239 else: 3240 second = Key.from_path(first._tablename, long(second)) 3241 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
3242
3243 - def GE(self,first,second=None):
3244 if first.type != 'id': 3245 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 3246 else: 3247 second = Key.from_path(first._tablename, long(second)) 3248 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
3249
3250 - def INVERT(self,first):
3251 return '-%s' % first.name
3252
3253 - def COMMA(self,first,second):
3254 return '%s, %s' % (self.expand(first),self.expand(second))
3255
3256 - def BELONGS(self,first,second=None):
3257 if not isinstance(second,(list, tuple)): 3258 raise SyntaxError, "Not supported" 3259 if first.type != 'id': 3260 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 3261 else: 3262 second = [Key.from_path(first._tablename, i) for i in second] 3263 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
3264
3265 - def CONTAINS(self,first,second):
3266 if not first.type.startswith('list:'): 3267 raise SyntaxError, "Not supported" 3268 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:a in b)]
3269
3270 - def NOT(self,first):
3271 nops = { self.EQ: self.NE, 3272 self.NE: self.EQ, 3273 self.LT: self.GE, 3274 self.GT: self.LE, 3275 self.LE: self.GT, 3276 self.GE: self.LT} 3277 if not isinstance(first,Query): 3278 raise SyntaxError, "Not suported" 3279 nop = nops.get(first.op,None) 3280 if not nop: 3281 raise SyntaxError, "Not suported %s" % first.op.__name__ 3282 first.op = nop 3283 return self.expand(first)
3284
3285 - def truncate(self,table,mode):
3286 self.db(table._id > 0).delete()
3287
3288 - def select_raw(self,query,fields=None,attributes=None):
3289 fields = fields or [] 3290 attributes = attributes or {} 3291 new_fields = [] 3292 for item in fields: 3293 if isinstance(item,SQLALL): 3294 new_fields += item.table 3295 else: 3296 new_fields.append(item) 3297 fields = new_fields 3298 if query: 3299 tablename = self.get_table(query) 3300 elif fields: 3301 tablename = fields[0].tablename 3302 query = fields[0].table._id>0 3303 else: 3304 raise SyntaxError, "Unable to determine a tablename" 3305 query = self.filter_tenant(query,[tablename]) 3306 tableobj = self.db[tablename]._tableobj 3307 items = tableobj.all() 3308 filters = self.expand(query) 3309 for filter in filters: 3310 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 3311 continue 3312 elif filter.name=='__key__' and filter.op=='=': 3313 if filter.value==0: 3314 items = [] 3315 elif isinstance(filter.value, Key): 3316 item = tableobj.get(filter.value) 3317 items = (item and [item]) or [] 3318 else: 3319 item = tableobj.get_by_id(filter.value) 3320 items = (item and [item]) or [] 3321 elif isinstance(items,list): # i.e. there is a single record! 3322 items = [i for i in items if filter.apply(getattr(item,filter.name), 3323 filter.value)] 3324 else: 3325 if filter.name=='__key__': items.order('__key__') 3326 items = items.filter('%s %s' % (filter.name,filter.op),filter.value) 3327 if not isinstance(items,list): 3328 if attributes.get('left', None): 3329 raise SyntaxError, 'Set: no left join in appengine' 3330 if attributes.get('groupby', None): 3331 raise SyntaxError, 'Set: no groupby in appengine' 3332 orderby = attributes.get('orderby', False) 3333 if orderby: 3334 ### THIS REALLY NEEDS IMPROVEMENT !!! 3335 if isinstance(orderby, (list, tuple)): 3336 orderby = xorify(orderby) 3337 if isinstance(orderby,Expression): 3338 orderby = self.expand(orderby) 3339 orders = orderby.split(', ') 3340 for order in orders: 3341 order={'-id':'-__key__','id':'__key__'}.get(order,order) 3342 items = items.order(order) 3343 if attributes.get('limitby', None): 3344 (lmin, lmax) = attributes['limitby'] 3345 (limit, offset) = (lmax - lmin, lmin) 3346 items = items.fetch(limit, offset=offset) 3347 fields = self.db[tablename].fields 3348 return (items, tablename, fields)
3349
3350 - def select(self,query,fields,attributes):
3351 (items, tablename, fields) = self.select_raw(query,fields,attributes) 3352 # self.db['_lastsql'] = self._select(query,fields,attributes) 3353 rows = [ 3354 [t=='id' and int(item.key().id()) or getattr(item, t) for t in fields] 3355 for item in items] 3356 colnames = ['%s.%s' % (tablename, t) for t in fields] 3357 return self.parse(rows, colnames, False)
3358 3359
3360 - def count(self,query,distinct=None):
3361 if distinct: 3362 raise RuntimeError, "COUNT DISTINCT not supported" 3363 (items, tablename, fields) = self.select_raw(query) 3364 # self.db['_lastsql'] = self._count(query) 3365 try: 3366 return len(items) 3367 except TypeError: 3368 return items.count(limit=None)
3369
3370 - def delete(self,tablename, query):
3371 """ 3372 This function was changed on 2010-05-04 because according to 3373 http://code.google.com/p/googleappengine/issues/detail?id=3119 3374 GAE no longer support deleting more than 1000 records. 3375 """ 3376 # self.db['_lastsql'] = self._delete(tablename,query) 3377 (items, tablename, fields) = self.select_raw(query) 3378 # items can be one item or a query 3379 if not isinstance(items,list): 3380 counter = items.count(limit=None) 3381 leftitems = items.fetch(1000) 3382 while len(leftitems): 3383 gae.delete(leftitems) 3384 leftitems = items.fetch(1000) 3385 else: 3386 counter = len(items) 3387 gae.delete(items) 3388 return counter
3389
3390 - def update(self,tablename,query,update_fields):
3391 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 3392 (items, tablename, fields) = self.select_raw(query) 3393 counter = 0 3394 for item in items: 3395 for field, value in update_fields: 3396 setattr(item, field.name, self.represent(value,field.type)) 3397 item.put() 3398 counter += 1 3399 logger.info(str(counter)) 3400 return counter
3401
3402 - def insert(self,table,fields):
3403 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 3404 # table._db['_lastsql'] = self._insert(table,fields) 3405 tmp = table._tableobj(**dfields) 3406 tmp.put() 3407 rid = Reference(tmp.key().id()) 3408 (rid._table, rid._record) = (table, None) 3409 return rid
3410
3411 - def bulk_insert(self,table,items):
3412 parsed_items = [] 3413 for item in items: 3414 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 3415 parsed_items.append(table._tableobj(**dfields)) 3416 gae.put(parsed_items) 3417 return True
3418
3419 -def uuid2int(uuidv):
3420 return uuid.UUID(uuidv).int
3421
3422 -def int2uuid(n):
3423 return str(uuid.UUID(int=n))
3424
3425 -class CouchDBAdapter(NoSQLAdapter):
3426 uploads_in_blob = True 3427 types = { 3428 'boolean': bool, 3429 'string': str, 3430 'text': str, 3431 'password': str, 3432 'blob': str, 3433 'upload': str, 3434 'integer': long, 3435 'double': float, 3436 'date': datetime.date, 3437 'time': datetime.time, 3438 'datetime': datetime.datetime, 3439 'id': long, 3440 'reference': long, 3441 'list:string': list, 3442 'list:integer': list, 3443 'list:reference': list, 3444 } 3445
3446 - def file_exists(self, filename): pass
3447 - def file_open(self, filename, mode='rb', lock=True): pass
3448 - def file_close(self, fileobj, unlock=True): pass
3449
3450 - def expand(self,expression,field_type=None):
3451 if isinstance(expression,Field): 3452 if expression.type=='id': 3453 return "%s._id" % expression.tablename 3454 return BaseAdapter.expand(self,expression,field_type)
3455
3456 - def AND(self,first,second):
3457 return '(%s && %s)' % (self.expand(first),self.expand(second))
3458
3459 - def OR(self,first,second):
3460 return '(%s || %s)' % (self.expand(first),self.expand(second))
3461
3462 - def EQ(self,first,second):
3463 if second is None: 3464 return '(%s == null)' % self.expand(first) 3465 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
3466
3467 - def NE(self,first,second):
3468 if second is None: 3469 return '(%s != null)' % self.expand(first) 3470 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
3471
3472 - def COMMA(self,first,second):
3473 return '%s + %s' % (self.expand(first),self.expand(second))
3474
3475 - def represent(self, obj, fieldtype):
3476 value = NoSQLAdapter.represent(self, obj, fieldtype) 3477 if fieldtype=='id': 3478 return repr(str(int(value))) 3479 return repr(not isinstance(value,unicode) and value or value.encode('utf8'))
3480
3481 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 3482 pool_size=0,folder=None,db_codec ='UTF-8', 3483 credential_decoder=lambda x:x, driver_args={}, 3484 adapter_args={}):
3485 self.db = db 3486 self.uri = uri 3487 self.dbengine = 'couchdb' 3488 self.folder = folder 3489 db['_lastsql'] = '' 3490 self.db_codec = 'UTF-8' 3491 self.pool_size = pool_size 3492 3493 url='http://'+uri[10:] 3494 def connect(url=url,driver_args=driver_args): 3495 return couchdb.Server(url,**driver_args)
3496 self.pool_connection(connect,cursor=False)
3497
3498 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
3499 if migrate: 3500 try: 3501 self.connection.create(table._tablename) 3502 except: 3503 pass
3504
3505 - def insert(self,table,fields):
3506 id = uuid2int(web2py_uuid()) 3507 ctable = self.connection[table._tablename] 3508 values = dict((k.name,NoSQLAdapter.represent(self,v,k.type)) for k,v in fields) 3509 values['_id'] = str(id) 3510 ctable.save(values) 3511 return id
3512
3513 - def _select(self,query,fields,attributes):
3514 if not isinstance(query,Query): 3515 raise SyntaxError, "Not Supported" 3516 for key in set(attributes.keys())-set(('orderby','groupby','limitby', 3517 'required','cache','left', 3518 'distinct','having')): 3519 raise SyntaxError, 'invalid select attribute: %s' % key 3520 new_fields=[] 3521 for item in fields: 3522 if isinstance(item,SQLALL): 3523 new_fields += item.table 3524 else: 3525 new_fields.append(item) 3526 def uid(fd): 3527 return fd=='id' and '_id' or fd
3528 def get(row,fd): 3529 return fd=='id' and int(row['_id']) or row.get(fd,None) 3530 fields = new_fields 3531 tablename = self.get_table(query) 3532 fieldnames = [f.name for f in (fields or self.db[tablename])] 3533 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 3534 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 3535 fn="function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);}" %\ 3536 dict(t=tablename, 3537 query=self.expand(query), 3538 order='%s._id' % tablename, 3539 fields=fields) 3540 return fn, colnames 3541
3542 - def select(self,query,fields,attributes):
3543 if not isinstance(query,Query): 3544 raise SyntaxError, "Not Supported" 3545 fn, colnames = self._select(query,fields,attributes) 3546 tablename = colnames[0].split('.')[0] 3547 ctable = self.connection[tablename] 3548 rows = [cols['value'] for cols in ctable.query(fn)] 3549 return self.parse(rows, colnames, False)
3550
3551 - def delete(self,tablename,query):
3552 if not isinstance(query,Query): 3553 raise SyntaxError, "Not Supported" 3554 if query.first.type=='id' and query.op==self.EQ: 3555 id = query.second 3556 tablename = query.first.tablename 3557 assert(tablename == query.first.tablename) 3558 ctable = self.connection[tablename] 3559 try: 3560 del ctable[str(id)] 3561 return 1 3562 except couchdb.http.ResourceNotFound: 3563 return 0 3564 else: 3565 tablename = self.get_table(query) 3566 rows = self.select(query,[self.db[tablename]._id],{}) 3567 ctable = self.connection[tablename] 3568 for row in rows: 3569 del ctable[str(row.id)] 3570 return len(rows)
3571
3572 - def update(self,tablename,query,fields):
3573 if not isinstance(query,Query): 3574 raise SyntaxError, "Not Supported" 3575 if query.first.type=='id' and query.op==self.EQ: 3576 id = query.second 3577 tablename = query.first.tablename 3578 ctable = self.connection[tablename] 3579 try: 3580 doc = ctable[str(id)] 3581 for key,value in fields: 3582 doc[key.name] = NoSQLAdapter.represent(self,value,self.db[tablename][key.name].type) 3583 ctable.save(doc) 3584 return 1 3585 except couchdb.http.ResourceNotFound: 3586 return 0 3587 else: 3588 tablename = self.get_table(query) 3589 rows = self.select(query,[self.db[tablename]._id],{}) 3590 ctable = self.connection[tablename] 3591 table = self.db[tablename] 3592 for row in rows: 3593 doc = ctable[str(row.id)] 3594 for key,value in fields: 3595 doc[key.name] = NoSQLAdapter.represent(self,value,table[key.name].type) 3596 ctable.save(doc) 3597 return len(rows)
3598
3599 - def count(self,query,distinct=None):
3600 if distinct: 3601 raise RuntimeError, "COUNT DISTINCT not supported" 3602 if not isinstance(query,Query): 3603 raise SyntaxError, "Not Supported" 3604 tablename = self.get_table(query) 3605 rows = self.select(query,[self.db[tablename]._id],{}) 3606 return len(rows)
3607
3608 -def cleanup(text):
3609 """ 3610 validates that the given text is clean: only contains [0-9a-zA-Z_] 3611 """ 3612 3613 if re.compile('[^0-9a-zA-Z_]').findall(text): 3614 raise SyntaxError, \ 3615 'only [0-9a-zA-Z_] allowed in table and field names, received %s' \ 3616 % text 3617 return text
3618 3619
3620 -class MongoDBAdapter(NoSQLAdapter):
3621 uploads_in_blob = True 3622 types = { 3623 'boolean': bool, 3624 'string': str, 3625 'text': str, 3626 'password': str, 3627 'blob': str, 3628 'upload': str, 3629 'integer': long, 3630 'double': float, 3631 'date': datetime.date, 3632 'time': datetime.time, 3633 'datetime': datetime.datetime, 3634 'id': long, 3635 'reference': long, 3636 'list:string': list, 3637 'list:integer': list, 3638 'list:reference': list, 3639 } 3640
3641 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 3642 pool_size=0,folder=None,db_codec ='UTF-8', 3643 credential_decoder=lambda x:x, driver_args={}, 3644 adapter_args={}):
3645 self.db = db 3646 self.uri = uri 3647 self.dbengine = 'mongodb' 3648 self.folder = folder 3649 db['_lastsql'] = '' 3650 self.db_codec = 'UTF-8' 3651 self.pool_size = pool_size 3652 3653 m = re.compile('^(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self.uri[10:]) 3654 if not m: 3655 raise SyntaxError, "Invalid URI string in DAL: %s" % self.uri 3656 host = m.group('host') 3657 if not host: 3658 raise SyntaxError, 'mongodb: host name required' 3659 dbname = m.group('db') 3660 if not dbname: 3661 raise SyntaxError, 'mongodb: db name required' 3662 port = m.group('port') or 27017 3663 driver_args.update(dict(host=host,port=port)) 3664 def connect(dbname=dbname,driver_args=driver_args): 3665 return pymongo.Connection(**driver_args)[dbname]
3666 self.pool_connection(connect,cursor=False)
3667
3668 - def insert(self,table,fields):
3669 ctable = self.connection[table._tablename] 3670 values = dict((k,self.represent(v,table[k].type)) for k,v in fields) 3671 ctable.insert(values) 3672 return uuid2int(id)
3673 3674
3675 - def count(self,query):
3676 raise RuntimeError, "Not implemented"
3677
3678 - def select(self,query,fields,attributes):
3679 raise RuntimeError, "Not implemented"
3680
3681 - def delete(self,tablename, query):
3682 raise RuntimeError, "Not implemented"
3683
3684 - def update(self,tablename,query,fields):
3685 raise RuntimeError, "Not implemented"
3686 3687 3688 ######################################################################## 3689 # end of adapters 3690 ######################################################################## 3691 3692 ADAPTERS = { 3693 'sqlite': SQLiteAdapter, 3694 'sqlite:memory': SQLiteAdapter, 3695 'mysql': MySQLAdapter, 3696 'postgres': PostgreSQLAdapter, 3697 'oracle': OracleAdapter, 3698 'mssql': MSSQLAdapter, 3699 'mssql2': MSSQL2Adapter, 3700 'db2': DB2Adapter, 3701 'teradata': TeradataAdapter, 3702 'informix': InformixAdapter, 3703 'firebird': FireBirdAdapter, 3704 'firebird_embedded': FireBirdAdapter, 3705 'ingres': IngresAdapter, 3706 'ingresu': IngresUnicodeAdapter, 3707 'sapdb': SAPDBAdapter, 3708 'cubrid': CubridAdapter, 3709 'jdbc:sqlite': JDBCSQLiteAdapter, 3710 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 3711 'jdbc:postgres': JDBCPostgreSQLAdapter, 3712 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 3713 'google:datastore': GoogleDatastoreAdapter, 3714 'google:sql': GoogleSQLAdapter, 3715 'couchdb': CouchDBAdapter, 3716 'mongodb': MongoDBAdapter, 3717 } 3718 3719
3720 -def sqlhtml_validators(field):
3721 """ 3722 Field type validation, using web2py's validators mechanism. 3723 3724 makes sure the content of a field is in line with the declared 3725 fieldtype 3726 """ 3727 if not have_validators: 3728 return [] 3729 field_type, field_length = field.type, field.length 3730 if isinstance(field_type, SQLCustomType): 3731 if hasattr(field_type, 'validator'): 3732 return field_type.validator 3733 else: 3734 field_type = field_type.type 3735 elif not isinstance(field_type,str): 3736 return [] 3737 requires=[] 3738 def ff(r,id): 3739 row=r(id) 3740 if not row: 3741 return id 3742 elif hasattr(r, '_format') and isinstance(r._format,str): 3743 return r._format % row 3744 elif hasattr(r, '_format') and callable(r._format): 3745 return r._format(row) 3746 else: 3747 return id
3748 if field_type == 'string': 3749 requires.append(validators.IS_LENGTH(field_length)) 3750 elif field_type == 'text': 3751 requires.append(validators.IS_LENGTH(field_length)) 3752 elif field_type == 'password': 3753 requires.append(validators.IS_LENGTH(field_length)) 3754 elif field_type == 'double': 3755 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 3756 elif field_type == 'integer': 3757 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 3758 elif field_type.startswith('decimal'): 3759 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 3760 elif field_type == 'date': 3761 requires.append(validators.IS_DATE()) 3762 elif field_type == 'time': 3763 requires.append(validators.IS_TIME()) 3764 elif field_type == 'datetime': 3765 requires.append(validators.IS_DATETIME()) 3766 elif field.db and field_type.startswith('reference') and \ 3767 field_type.find('.') < 0 and \ 3768 field_type[10:] in field.db.tables: 3769 referenced = field.db[field_type[10:]] 3770 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 3771 field.represent = field.represent or repr_ref 3772 if hasattr(referenced, '_format') and referenced._format: 3773 requires = validators.IS_IN_DB(field.db,referenced._id, 3774 referenced._format) 3775 if field.unique: 3776 requires._and = validators.IS_NOT_IN_DB(field.db,field) 3777 if field.tablename == field_type[10:]: 3778 return validators.IS_EMPTY_OR(requires) 3779 return requires 3780 elif field.db and field_type.startswith('list:reference') and \ 3781 field_type.find('.') < 0 and \ 3782 field_type[15:] in field.db.tables: 3783 referenced = field.db[field_type[15:]] 3784 def list_ref_repr(ids, row=None, r=referenced, f=ff): 3785 if not ids: 3786 return None 3787 refs = r._db(r._id.belongs(ids)).select(r._id) 3788 return (refs and ', '.join(str(f(r,ref.id)) for ref in refs) or '') 3789 field.represent = field.represent or list_ref_repr 3790 if hasattr(referenced, '_format') and referenced._format: 3791 requires = validators.IS_IN_DB(field.db,referenced._id, 3792 referenced._format,multiple=True) 3793 else: 3794 requires = validators.IS_IN_DB(field.db,referenced._id, 3795 multiple=True) 3796 if field.unique: 3797 requires._and = validators.IS_NOT_IN_DB(field.db,field) 3798 return requires 3799 elif field_type.startswith('list:'): 3800 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 3801 field.represent = field.represent or repr_list 3802 if field.unique: 3803 requires.insert(0,validators.IS_NOT_IN_DB(field.db,field)) 3804 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 3805 if field.notnull and not field_type[:2] in sff: 3806 requires.insert(0, validators.IS_NOT_EMPTY()) 3807 elif not field.notnull and field_type[:2] in sff and requires: 3808 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 3809 return requires 3810 3811
3812 -def bar_escape(item):
3813 return str(item).replace('|', '||')
3814
3815 -def bar_encode(items):
3816 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
3817
3818 -def bar_decode_integer(value):
3819 return [int(x) for x in value.split('|') if x.strip()]
3820
3821 -def bar_decode_string(value):
3822 return [x.replace('||', '|') for x in string_unpack.split(value[1:-1]) if x.strip()]
3823 3824
3825 -class Row(dict):
3826 3827 """ 3828 a dictionary that lets you do d['a'] as well as d.a 3829 this is only used to store a Row 3830 """ 3831
3832 - def __getitem__(self, key):
3833 key=str(key) 3834 m = table_field.match(key) 3835 if key in self.get('_extra',{}): 3836 return self._extra[key] 3837 elif m: 3838 try: 3839 return dict.__getitem__(self, m.group(1))[m.group(2)] 3840 except (KeyError,TypeError): 3841 key = m.group(2) 3842 return dict.__getitem__(self, key)
3843
3844 - def __call__(self,key):
3845 return self.__getitem__(key)
3846
3847 - def __setitem__(self, key, value):
3848 dict.__setitem__(self, str(key), value)
3849
3850 - def __getattr__(self, key):
3851 return self[key]
3852
3853 - def __setattr__(self, key, value):
3854 self[key] = value
3855
3856 - def __repr__(self):
3857 return '<Row ' + dict.__repr__(self) + '>'
3858
3859 - def __int__(self):
3860 return dict.__getitem__(self,'id')
3861
3862 - def __eq__(self,other):
3863 try: 3864 return self.as_dict() == other.as_dict() 3865 except AttributeError: 3866 return False
3867
3868 - def __ne__(self,other):
3869 return not (self == other)
3870
3871 - def __copy__(self):
3872 return Row(dict(self))
3873
3874 - def as_dict(self,datetime_to_str=False):
3875 SERIALIZABLE_TYPES = (str,unicode,int,long,float,bool,list) 3876 d = dict(self) 3877 for k in copy.copy(d.keys()): 3878 v=d[k] 3879 if d[k] is None: 3880 continue 3881 elif isinstance(v,Row): 3882 d[k]=v.as_dict() 3883 elif isinstance(v,Reference): 3884 d[k]=int(v) 3885 elif isinstance(v,decimal.Decimal): 3886 d[k]=float(v) 3887 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 3888 if datetime_to_str: 3889 d[k] = v.isoformat().replace('T',' ')[:19] 3890 elif not isinstance(v,SERIALIZABLE_TYPES): 3891 del d[k] 3892 return d
3893 3894
3895 -def Row_unpickler(data):
3896 return Row(cPickle.loads(data))
3897
3898 -def Row_pickler(data):
3899 return Row_unpickler, (cPickle.dumps(data.as_dict(datetime_to_str=False)),)
3900 3901 copy_reg.pickle(Row, Row_pickler, Row_unpickler) 3902 3903 3904 ################################################################################ 3905 # Everything below should be independent on the specifics of the 3906 # database and should for RDBMs and some NoSQL databases 3907 ################################################################################ 3908
3909 -class SQLCallableList(list):
3910 - def __call__(self):
3911 return copy.copy(self)
3912
3913 -def smart_query(fields,text):
3914 if not isinstance(fields,(list,tuple)): 3915 fields = [fields] 3916 new_fields = [] 3917 for field in fields: 3918 if isinstance(field,Field): 3919 new_fields.append(field) 3920 elif isinstance(field,Table): 3921 for ofield in field: 3922 new_fields.append(ofield) 3923 else: 3924 raise RuntimeError, "fields must be a list of fields" 3925 field_map = {} 3926 for field in fields: 3927 n = field.name.lower() 3928 if not n in field_map: 3929 field_map[n] = field 3930 n = str(field).lower() 3931 if not n in field_map: 3932 field_map[n] = field 3933 re_constants = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 3934 constants = {} 3935 i = 0 3936 while True: 3937 m = re_constants.search(text) 3938 if not m: break 3939 text = text[:m.start()]+('#%i' % i)+text[m.end():] 3940 constants[str(i)] = m.group()[1:-1] 3941 i+=1 3942 text = re.sub('\s+',' ',text).lower() 3943 for a,b in [('&','and'), 3944 ('|','or'), 3945 ('~','not'), 3946 ('==','=='), 3947 ('<','<'), 3948 ('>','>'), 3949 ('<=','<='), 3950 ('>=','>='), 3951 ('<>','!='), 3952 ('=<','<='), 3953 ('=>','>='), 3954 ('=','=='), 3955 (' less or equal than ','<='), 3956 (' greater or equal than ','>='), 3957 (' equal or less than ','<='), 3958 (' equal or greater than ','>='), 3959 (' less or equal ','<='), 3960 (' greater or equal ','>='), 3961 (' equal or less ','<='), 3962 (' equal or greater ','>='), 3963 (' not equal to ','!='), 3964 (' not equal ','!='), 3965 (' equal to ','=='), 3966 (' equal ','=='), 3967 (' equals ','!='), 3968 (' less than ','<'), 3969 (' greater than ','>'), 3970 (' starts with ','startswith'), 3971 (' ends with ','endswith'), 3972 (' is ','==')]: 3973 if a[0]==' ': 3974 text = text.replace(' is'+a,' %s ' % b) 3975 text = text.replace(a,' %s ' % b) 3976 text = re.sub('\s+',' ',text).lower() 3977 query = field = neg = op = logic = None 3978 for item in text.split(): 3979 if field is None: 3980 if item == 'not': 3981 neg = True 3982 elif not neg and not logic and item in ('and','or'): 3983 logic = item 3984 elif item in field_map: 3985 field = field_map[item] 3986 else: 3987 raise RuntimeError, "Invalid syntax" 3988 elif not field is None and op is None: 3989 op = item 3990 elif not op is None: 3991 if item.startswith('#'): 3992 if not item[1:] in constants: 3993 raise RuntimeError, "Invalid syntax" 3994 value = constants[item[1:]] 3995 else: 3996 value = item 3997 if op == '==': op = 'like' 3998 if op == '==': new_query = field==value 3999 elif op == '<': new_query = field<value 4000 elif op == '>': new_query = field>value 4001 elif op == '<=': new_query = field<=value 4002 elif op == '>=': new_query = field>=value 4003 elif op == 'contains': new_query = field.contains(value) 4004 elif op == 'like': new_query = field.like(value) 4005 elif op == 'startswith': new_query = field.startswith(value) 4006 elif op == 'endswith': new_query = field.endswith(value) 4007 else: raise RuntimeError, "Invalid operation" 4008 if neg: new_query = ~new_query 4009 if query is None: 4010 query = new_query 4011 elif logic == 'and': 4012 query &= new_query 4013 elif logic == 'or': 4014 query |= new_query 4015 field = op = neg = logic = None 4016 return query
4017 4018
4019 -class DAL(dict):
4020 4021 """ 4022 an instance of this class represents a database connection 4023 4024 Example:: 4025 4026 db = DAL('sqlite://test.db') 4027 db.define_table('tablename', Field('fieldname1'), 4028 Field('fieldname2')) 4029 """ 4030 4031 @staticmethod
4032 - def set_folder(folder):
4033 """ 4034 # ## this allows gluon to set a folder for this thread 4035 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 4036 """ 4037 BaseAdapter.set_folder(folder)
4038 4039 @staticmethod
4040 - def distributed_transaction_begin(*instances):
4041 if not instances: 4042 return 4043 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 4044 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 4045 instances = enumerate(instances) 4046 for (i, db) in instances: 4047 if not db._adapter.support_distributed_transaction(): 4048 raise SyntaxError, \ 4049 'distributed transaction not suported by %s' % db._dbname 4050 for (i, db) in instances: 4051 db._adapter.distributed_transaction_begin(keys[i])
4052 4053 @staticmethod
4054 - def distributed_transaction_commit(*instances):
4055 if not instances: 4056 return 4057 instances = enumerate(instances) 4058 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 4059 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 4060 for (i, db) in instances: 4061 if not db._adapter.support_distributed_transaction(): 4062 raise SyntaxError, \ 4063 'distributed transaction not suported by %s' % db._dbanme 4064 try: 4065 for (i, db) in instances: 4066 db._adapter.prepare(keys[i]) 4067 except: 4068 for (i, db) in instances: 4069 db._adapter.rollback_prepared(keys[i]) 4070 raise RuntimeError, 'failure to commit distributed transaction' 4071 else: 4072 for (i, db) in instances: 4073 db._adapter.commit_prepared(keys[i]) 4074 return
4075 4076
4077 - def __init__(self, uri='sqlite://dummy.db', 4078 pool_size=0, folder=None, 4079 db_codec='UTF-8', check_reserved=None, 4080 migrate=True, fake_migrate=False, 4081 migrate_enabled=True, fake_migrate_all=False, 4082 decode_credentials=False, driver_args=None, 4083 adapter_args=None, attempts=5, auto_import=False):
4084 """ 4085 Creates a new Database Abstraction Layer instance. 4086 4087 Keyword arguments: 4088 4089 :uri: string that contains information for connecting to a database. 4090 (default: 'sqlite://dummy.db') 4091 :pool_size: How many open connections to make to the database object. 4092 :folder: <please update me> 4093 :db_codec: string encoding of the database (default: 'UTF-8') 4094 :check_reserved: list of adapters to check tablenames and column names 4095 against sql reserved keywords. (Default None) 4096 4097 * 'common' List of sql keywords that are common to all database types 4098 such as "SELECT, INSERT". (recommended) 4099 * 'all' Checks against all known SQL keywords. (not recommended) 4100 <adaptername> Checks against the specific adapters list of keywords 4101 (recommended) 4102 * '<adaptername>_nonreserved' Checks against the specific adapters 4103 list of nonreserved keywords. (if available) 4104 :migrate (defaults to True) sets default migrate behavior for all tables 4105 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 4106 :migrate_enabled (defaults to True). If set to False disables ALL migrations 4107 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 4108 :attempts (defaults to 5). Number of times to attempt connecting 4109 """ 4110 if not decode_credentials: 4111 credential_decoder = lambda cred: cred 4112 else: 4113 credential_decoder = lambda cred: urllib.unquote(cred) 4114 if folder: 4115 self.set_folder(folder) 4116 self._uri = uri 4117 self._pool_size = pool_size 4118 self._db_codec = db_codec 4119 self._lastsql = '' 4120 self._timings = [] 4121 self._pending_references = {} 4122 self._request_tenant = 'request_tenant' 4123 self._common_fields = [] 4124 self._referee_name = '%(table)s' 4125 if not str(attempts).isdigit() or attempts < 0: 4126 attempts = 5 4127 if uri: 4128 uris = isinstance(uri,(list,tuple)) and uri or [uri] 4129 error = '' 4130 connected = False 4131 for k in range(attempts): 4132 for uri in uris: 4133 try: 4134 if is_jdbc and not uri.startswith('jdbc:'): 4135 uri = 'jdbc:'+uri 4136 self._dbname = regex_dbname.match(uri).group() 4137 if not self._dbname in ADAPTERS: 4138 raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname 4139 # notice that driver args or {} else driver_args 4140 # defaults to {} global, not correct 4141 args = (self,uri,pool_size,folder, 4142 db_codec, credential_decoder, 4143 driver_args or {}, adapter_args or {}) 4144 self._adapter = ADAPTERS[self._dbname](*args) 4145 connected = True 4146 break 4147 except SyntaxError: 4148 raise 4149 except Exception, error: 4150 sys.stderr.write('DEBUG_c: Exception %r' % ((Exception, error,),)) 4151 if connected: 4152 break 4153 else: 4154 time.sleep(1) 4155 if not connected: 4156 raise RuntimeError, "Failure to connect, tried %d times:\n%s" % (attempts, error) 4157 else: 4158 args = (self,'None',0,folder,db_codec) 4159 self._adapter = BaseAdapter(*args) 4160 migrate = fake_migrate = False 4161 adapter = self._adapter 4162 self._uri_hash = hashlib.md5(adapter.uri).hexdigest() 4163 self.tables = SQLCallableList() 4164 self.check_reserved = check_reserved 4165 if self.check_reserved: 4166 from reserved_sql_keywords import ADAPTERS as RSK 4167 self.RSK = RSK 4168 self._migrate = migrate 4169 self._fake_migrate = fake_migrate 4170 self._migrate_enabled = migrate_enabled 4171 self._fake_migrate_all = fake_migrate_all 4172 if auto_import: 4173 self.import_table_definitions(adapter.folder)
4174
4175 - def import_table_definitions(self,path,migrate=False,fake_migrate=False):
4176 pattern = os.path.join(path,self._uri_hash+'_*.table') 4177 for filename in glob.glob(pattern): 4178 tfile = self._adapter.file_open(filename, 'r') 4179 try: 4180 sql_fields = cPickle.load(tfile) 4181 name = filename[len(pattern)-7:-6] 4182 mf = [(value['sortable'],Field(key,type=value['type'])) \ 4183 for key, value in sql_fields.items()] 4184 mf.sort(lambda a,b: cmp(a[0],b[0])) 4185 self.define_table(name,*[item[1] for item in mf], 4186 **dict(migrate=migrate,fake_migrate=fake_migrate)) 4187 finally: 4188 self._adapter.file_close(tfile)
4189
4190 - def check_reserved_keyword(self, name):
4191 """ 4192 Validates ``name`` against SQL keywords 4193 Uses self.check_reserve which is a list of 4194 operators to use. 4195 self.check_reserved 4196 ['common', 'postgres', 'mysql'] 4197 self.check_reserved 4198 ['all'] 4199 """ 4200 for backend in self.check_reserved: 4201 if name.upper() in self.RSK[backend]: 4202 raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper())
4203
4204 - def __contains__(self, tablename):
4205 if self.has_key(tablename): 4206 return True 4207 else: 4208 return False
4209
4210 - def parse_as_rest(self,patterns,args,vars,query=None,nested_select=True):
4211 """ 4212 EXAMPLE: 4213 4214 db.define_table('person',Field('name'),Field('info')) 4215 db.define_table('pet',Field('person',db.person),Field('name'),Field('info')) 4216 4217 @request.restful() 4218 def index(): 4219 def GET(*kargs,**kvars): 4220 patterns = [ 4221 "/persons[person]", 4222 "/{person.name.startswith}", 4223 "/{person.name}/:field", 4224 "/{person.name}/pets[pet.person]", 4225 "/{person.name}/pet[pet.person]/{pet.name}", 4226 "/{person.name}/pet[pet.person]/{pet.name}/:field" 4227 ] 4228 parser = db.parse_as_rest(patterns,kargs,kvars) 4229 if parser.status == 200: 4230 return dict(content=parser.response) 4231 else: 4232 raise HTTP(parser.status,parser.error) 4233 def POST(table_name,**kvars): 4234 if table_name == 'person': 4235 return db.person.validate_and_insert(**kvars) 4236 elif table_name == 'pet': 4237 return db.pet.validate_and_insert(**kvars) 4238 else: 4239 raise HTTP(400) 4240 return locals() 4241 """ 4242 4243 db = self 4244 re1 = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 4245 re2 = re.compile('^.+\[.+\]$') 4246 4247 def auto_table(table,base='',depth=0): 4248 patterns = [] 4249 for field in db[table].fields: 4250 if base: 4251 tag = '%s/%s' % (base,field.replace('_','-')) 4252 else: 4253 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 4254 f = db[table][field] 4255 if not f.readable: continue 4256 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 4257 tag += '/{%s.%s}' % (table,field) 4258 patterns.append(tag) 4259 patterns.append(tag+'/:field') 4260 elif f.type.startswith('boolean'): 4261 tag += '/{%s.%s}' % (table,field) 4262 patterns.append(tag) 4263 patterns.append(tag+'/:field') 4264 elif f.type.startswith('double') or f.type.startswith('integer'): 4265 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 4266 patterns.append(tag) 4267 patterns.append(tag+'/:field') 4268 elif f.type.startswith('list:'): 4269 tag += '/{%s.%s.contains}' % (table,field) 4270 patterns.append(tag) 4271 patterns.append(tag+'/:field') 4272 elif f.type in ('date','datetime'): 4273 tag+= '/{%s.%s.year}' % (table,field) 4274 patterns.append(tag) 4275 patterns.append(tag+'/:field') 4276 tag+='/{%s.%s.month}' % (table,field) 4277 patterns.append(tag) 4278 patterns.append(tag+'/:field') 4279 tag+='/{%s.%s.day}' % (table,field) 4280 patterns.append(tag) 4281 patterns.append(tag+'/:field') 4282 if f.type in ('datetime','time'): 4283 tag+= '/{%s.%s.hour}' % (table,field) 4284 patterns.append(tag) 4285 patterns.append(tag+'/:field') 4286 tag+='/{%s.%s.minute}' % (table,field) 4287 patterns.append(tag) 4288 patterns.append(tag+'/:field') 4289 tag+='/{%s.%s.second}' % (table,field) 4290 patterns.append(tag) 4291 patterns.append(tag+'/:field') 4292 if depth>0: 4293 for rtable,rfield in db[table]._referenced_by: 4294 tag+='/%s[%s.%s]' % (rtable,rtable,rfield) 4295 patterns.append(tag) 4296 patterns += auto_table(rtable,base=tag,depth=depth-1) 4297 return patterns
4298 4299 if patterns=='auto': 4300 patterns=[] 4301 for table in db.tables: 4302 if not table.startswith('auth_'): 4303 patterns += auto_table(table,base='',depth=1) 4304 else: 4305 i = 0 4306 while i<len(patterns): 4307 pattern = patterns[i] 4308 tokens = pattern.split('/') 4309 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 4310 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],'/'.join(tokens[:-1])) 4311 patterns = patterns[:i]+new_patterns+patterns[i+1:] 4312 i += len(new_patterns) 4313 else: 4314 i += 1 4315 if '/'.join(args) == 'patterns': 4316 return Row({'status':200,'pattern':'list', 4317 'error':None,'response':patterns}) 4318 for pattern in patterns: 4319 otable=table=None 4320 dbset=db(query) 4321 i=0 4322 tags = pattern[1:].split('/') 4323 # print pattern 4324 if len(tags)!=len(args): 4325 continue 4326 for tag in tags: 4327 # print i, tag, args[i] 4328 if re1.match(tag): 4329 # print 're1:'+tag 4330 tokens = tag[1:-1].split('.') 4331 table, field = tokens[0], tokens[1] 4332 if not otable or table == otable: 4333 if len(tokens)==2 or tokens[2]=='eq': 4334 query = db[table][field]==args[i] 4335 elif tokens[2]=='ne': 4336 query = db[table][field]!=args[i] 4337 elif tokens[2]=='lt': 4338 query = db[table][field]<args[i] 4339 elif tokens[2]=='gt': 4340 query = db[table][field]>args[i] 4341 elif tokens[2]=='ge': 4342 query = db[table][field]>=args[i] 4343 elif tokens[2]=='le': 4344 query = db[table][field]<=args[i] 4345 elif tokens[2]=='year': 4346 query = db[table][field].year()==args[i] 4347 elif tokens[2]=='month': 4348 query = db[table][field].month()==args[i] 4349 elif tokens[2]=='day': 4350 query = db[table][field].day()==args[i] 4351 elif tokens[2]=='hour': 4352 query = db[table][field].hour()==args[i] 4353 elif tokens[2]=='minute': 4354 query = db[table][field].minutes()==args[i] 4355 elif tokens[2]=='second': 4356 query = db[table][field].seconds()==args[i] 4357 elif tokens[2]=='startswith': 4358 query = db[table][field].startswith(args[i]) 4359 elif tokens[2]=='contains': 4360 query = db[table][field].contains(args[i]) 4361 else: 4362 raise RuntimeError, "invalid pattern: %s" % pattern 4363 if len(tokens)==4 and tokens[3]=='not': 4364 query = ~query 4365 elif len(tokens)>=4: 4366 raise RuntimeError, "invalid pattern: %s" % pattern 4367 dbset=dbset(query) 4368 else: 4369 raise RuntimeError, "missing relation in pattern: %s" % pattern 4370 elif otable and re2.match(tag) and args[i]==tag[:tag.find('[')]: 4371 # print 're2:'+tag 4372 ref = tag[tag.find('[')+1:-1] 4373 if '.' in ref: 4374 table,field = ref.split('.') 4375 # print table,field 4376 if nested_select: 4377 try: 4378 dbset=db(db[table][field].belongs(dbset._select(db[otable]._id))) 4379 except ValueError: 4380 return Row({'status':400,'pattern':pattern, 4381 'error':'invalid path','response':None}) 4382 else: 4383 items = [item.id for item in dbset.select(db[otable]._id)] 4384 dbset=db(db[table][field].belongs(items)) 4385 else: 4386 dbset=dbset(db[ref]) 4387 elif tag==':field' and table: 4388 # # print 're3:'+tag 4389 field = args[i] 4390 if not field in db[table]: break 4391 try: 4392 item = dbset.select(db[table][field],limitby=(0,1)).first() 4393 except ValueError: 4394 return Row({'status':400,'pattern':pattern, 4395 'error':'invalid path','response':None}) 4396 if not item: 4397 return Row({'status':404,'pattern':pattern, 4398 'error':'record not found','response':None}) 4399 else: 4400 return Row({'status':200,'response':item[field], 4401 'pattern':pattern}) 4402 elif tag != args[i]: 4403 break 4404 otable = table 4405 i += 1 4406 if i==len(tags) and table: 4407 otable,ofield = vars.get('order','%s.%s' % (table,field)).split('.',1) 4408 try: 4409 if otable[:1]=='~': orderby = ~db[otable[1:]][ofield] 4410 else: orderby = db[otable][ofield] 4411 except KeyError: 4412 return Row({'status':400,'error':'invalid orderby','response':None}) 4413 fields = [field for field in db[table] if field.readable] 4414 count = dbset.count() 4415 try: 4416 limits = (int(vars.get('min',0)),int(vars.get('max',1000))) 4417 if limits[0]<0 or limits[1]<limits[0]: raise ValueError 4418 except ValueError: 4419 Row({'status':400,'error':'invalid limits','response':None}) 4420 if count > limits[1]-limits[0]: 4421 Row({'status':400,'error':'too many records','response':None}) 4422 try: 4423 response = dbset.select(limitby=limits,orderby=orderby,*fields) 4424 except ValueError: 4425 return Row({'status':400,'pattern':pattern, 4426 'error':'invalid path','response':None}) 4427 return Row({'status':200,'response':response,'pattern':pattern}) 4428 return Row({'status':400,'error':'no matching pattern','response':None})
4429 4430
4431 - def define_table( 4432 self, 4433 tablename, 4434 *fields, 4435 **args 4436 ):
4437 4438 for key in args: 4439 if key not in [ 4440 'migrate', 4441 'primarykey', 4442 'fake_migrate', 4443 'format', 4444 'trigger_name', 4445 'sequence_name', 4446 'polymodel', 4447 'table_class']: 4448 raise SyntaxError, 'invalid table "%s" attribute: %s' \ 4449 % (tablename, key) 4450 migrate = self._migrate_enabled and args.get('migrate', 4451 self._migrate) 4452 fake_migrate = self._fake_migrate_all or args.get('fake_migrate', 4453 self._fake_migrate) 4454 table_class = args.get('table_class',Table) 4455 format = args.get('format',None) 4456 trigger_name = args.get('trigger_name', None) 4457 sequence_name = args.get('sequence_name', None) 4458 primarykey=args.get('primarykey',None) 4459 polymodel=args.get('polymodel',None) 4460 if not isinstance(tablename,str): 4461 raise SyntaxError, "missing table name" 4462 tablename = cleanup(tablename) 4463 lowertablename = tablename.lower() 4464 4465 if tablename.startswith('_') or hasattr(self,lowertablename) or \ 4466 regex_python_keywords.match(tablename): 4467 raise SyntaxError, 'invalid table name: %s' % tablename 4468 elif lowertablename in self.tables: 4469 raise SyntaxError, 'table already defined: %s' % tablename 4470 elif self.check_reserved: 4471 self.check_reserved_keyword(tablename) 4472 4473 if self._common_fields: 4474 fields = [f for f in fields] + [f for f in self._common_fields] 4475 4476 t = self[tablename] = table_class(self, tablename, *fields, 4477 **dict(primarykey=primarykey, 4478 trigger_name=trigger_name, 4479 sequence_name=sequence_name)) 4480 # db magic 4481 if self._uri in (None,'None'): 4482 return t 4483 4484 t._create_references() 4485 4486 if migrate or self._adapter.dbengine=='google:datastore': 4487 try: 4488 sql_locker.acquire() 4489 self._adapter.create_table(t,migrate=migrate, 4490 fake_migrate=fake_migrate, 4491 polymodel=polymodel) 4492 finally: 4493 sql_locker.release() 4494 else: 4495 t._dbt = None 4496 self.tables.append(tablename) 4497 t._format = format 4498 return t
4499
4500 - def __iter__(self):
4501 for tablename in self.tables: 4502 yield self[tablename]
4503
4504 - def __getitem__(self, key):
4505 return dict.__getitem__(self, str(key))
4506
4507 - def __setitem__(self, key, value):
4508 dict.__setitem__(self, str(key), value)
4509
4510 - def __getattr__(self, key):
4511 return self[key]
4512
4513 - def __setattr__(self, key, value):
4514 if key[:1]!='_' and key in self: 4515 raise SyntaxError, \ 4516 'Object %s exists and cannot be redefined' % key 4517 self[key] = value
4518
4519 - def __repr__(self):
4520 return '<DAL ' + dict.__repr__(self) + '>'
4521
4522 - def smart_query(self,fields,text):
4523 return Set(self, smart_query(fields,text))
4524
4525 - def __call__(self, query=None):
4526 if isinstance(query,Table): 4527 query = query._id>0 4528 elif isinstance(query,Field): 4529 query = query!=None 4530 return Set(self, query)
4531
4532 - def commit(self):
4533 self._adapter.commit()
4534
4535 - def rollback(self):
4536 self._adapter.rollback()
4537
4538 - def executesql(self, query, placeholders=None, as_dict=False):
4539 """ 4540 placeholders is optional and will always be None when using DAL 4541 if using raw SQL with placeholders, placeholders may be 4542 a sequence of values to be substituted in 4543 or, *if supported by the DB driver*, a dictionary with keys 4544 matching named placeholders in your SQL. 4545 4546 Added 2009-12-05 "as_dict" optional argument. Will always be 4547 None when using DAL. If using raw SQL can be set to True 4548 and the results cursor returned by the DB driver will be 4549 converted to a sequence of dictionaries keyed with the db 4550 field names. Tested with SQLite but should work with any database 4551 since the cursor.description used to get field names is part of the 4552 Python dbi 2.0 specs. Results returned with as_dict = True are 4553 the same as those returned when applying .to_list() to a DAL query. 4554 4555 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 4556 4557 --bmeredyk 4558 """ 4559 if placeholders: 4560 self._adapter.execute(query, placeholders) 4561 else: 4562 self._adapter.execute(query) 4563 if as_dict: 4564 if not hasattr(self._adapter.cursor,'description'): 4565 raise RuntimeError, "database does not support executesql(...,as_dict=True)" 4566 # Non-DAL legacy db query, converts cursor results to dict. 4567 # sequence of 7-item sequences. each sequence tells about a column. 4568 # first item is always the field name according to Python Database API specs 4569 columns = self._adapter.cursor.description 4570 # reduce the column info down to just the field names 4571 fields = [f[0] for f in columns] 4572 # will hold our finished resultset in a list 4573 data = self._adapter.cursor.fetchall() 4574 # convert the list for each row into a dictionary so it's 4575 # easier to work with. row['field_name'] rather than row[0] 4576 return [dict(zip(fields,row)) for row in data] 4577 # see if any results returned from database 4578 try: 4579 return self._adapter.cursor.fetchall() 4580 except: 4581 return None
4582
4583 - def _update_referenced_by(self, other):
4584 for tablename in self.tables: 4585 by = self[tablename]._referenced_by 4586 by[:] = [item for item in by if not item[0] == other]
4587
4588 - def export_to_csv_file(self, ofile, *args, **kwargs):
4589 for table in self.tables: 4590 ofile.write('TABLE %s\r\n' % table) 4591 self(self[table]._id > 0).select().export_to_csv_file(ofile, *args, **kwargs) 4592 ofile.write('\r\n\r\n') 4593 ofile.write('END')
4594
4595 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 4596 unique='uuid', *args, **kwargs):
4597 if id_map is None: id_map={} 4598 for line in ifile: 4599 line = line.strip() 4600 if not line: 4601 continue 4602 elif line == 'END': 4603 return 4604 elif not line.startswith('TABLE ') or not line[6:] in self.tables: 4605 raise SyntaxError, 'invalid file format' 4606 else: 4607 tablename = line[6:] 4608 self[tablename].import_from_csv_file(ifile, id_map, null, 4609 unique, *args, **kwargs)
4610 4611
4612 -class SQLALL(object):
4613 """ 4614 Helper class providing a comma-separated string having all the field names 4615 (prefixed by table name and '.') 4616 4617 normally only called from within gluon.sql 4618 """ 4619
4620 - def __init__(self, table):
4621 self.table = table
4622
4623 - def __str__(self):
4624 return ', '.join([str(field) for field in self.table])
4625 4626
4627 -class Reference(int):
4628
4629 - def __allocate(self):
4630 if not self._record: 4631 self._record = self._table[int(self)] 4632 if not self._record: 4633 raise RuntimeError, "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self))
4634
4635 - def __getattr__(self, key):
4636 if key == 'id': 4637 return int(self) 4638 self.__allocate() 4639 return self._record.get(key, None)
4640
4641 - def get(self, key):
4642 return self.__getattr__(key)
4643
4644 - def __setattr__(self, key, value):
4645 if key.startswith('_'): 4646 int.__setattr__(self, key, value) 4647 return 4648 self.__allocate() 4649 self._record[key] = value
4650
4651 - def __getitem__(self, key):
4652 if key == 'id': 4653 return int(self) 4654 self.__allocate() 4655 return self._record.get(key, None)
4656
4657 - def __setitem__(self,key,value):
4658 self.__allocate() 4659 self._record[key] = value
4660 4661
4662 -def Reference_unpickler(data):
4663 return marshal.loads(data)
4664
4665 -def Reference_pickler(data):
4666 try: 4667 marshal_dump = marshal.dumps(int(data)) 4668 except AttributeError: 4669 marshal_dump = 'i%s' % struct.pack('<i', int(data)) 4670 return (Reference_unpickler, (marshal_dump,))
4671 4672 copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler) 4673 4674
4675 -class Table(dict):
4676 4677 """ 4678 an instance of this class represents a database table 4679 4680 Example:: 4681 4682 db = DAL(...) 4683 db.define_table('users', Field('name')) 4684 db.users.insert(name='me') # print db.users._insert(...) to see SQL 4685 db.users.drop() 4686 """ 4687
4688 - def __init__( 4689 self, 4690 db, 4691 tablename, 4692 *fields, 4693 **args 4694 ):
4695 """ 4696 Initializes the table and performs checking on the provided fields. 4697 4698 Each table will have automatically an 'id'. 4699 4700 If a field is of type Table, the fields (excluding 'id') from that table 4701 will be used instead. 4702 4703 :raises SyntaxError: when a supplied field is of incorrect type. 4704 """ 4705 self._tablename = tablename 4706 self._sequence_name = args.get('sequence_name',None) or \ 4707 db and db._adapter.sequence_name(tablename) 4708 self._trigger_name = args.get('trigger_name',None) or \ 4709 db and db._adapter.trigger_name(tablename) 4710 4711 primarykey = args.get('primarykey', None) 4712 fieldnames,newfields=set(),[] 4713 if primarykey: 4714 if not isinstance(primarykey,list): 4715 raise SyntaxError, \ 4716 "primarykey must be a list of fields from table '%s'" \ 4717 % tablename 4718 self._primarykey = primarykey 4719 elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: 4720 field = Field('id', 'id') 4721 newfields.append(field) 4722 fieldnames.add('id') 4723 self._id = field 4724 for field in fields: 4725 if not isinstance(field, (Field, Table)): 4726 raise SyntaxError, \ 4727 'define_table argument is not a Field or Table: %s' % field 4728 elif isinstance(field, Field) and not field.name in fieldnames: 4729 if hasattr(field, '_db'): 4730 field = copy.copy(field) 4731 newfields.append(field) 4732 fieldnames.add(field.name) 4733 if field.type=='id': 4734 self._id = field 4735 elif isinstance(field, Table): 4736 table = field 4737 for field in table: 4738 if not field.name in fieldnames and not field.type=='id': 4739 newfields.append(copy.copy(field)) 4740 fieldnames.add(field.name) 4741 else: 4742 # let's ignore new fields with duplicated names!!! 4743 pass 4744 fields = newfields 4745 self._db = db 4746 tablename = tablename 4747 self.fields = SQLCallableList() 4748 self.virtualfields = [] 4749 fields = list(fields) 4750 4751 if db and self._db._adapter.uploads_in_blob==True: 4752 for field in fields: 4753 if isinstance(field, Field) and field.type == 'upload'\ 4754 and field.uploadfield is True: 4755 tmp = field.uploadfield = '%s_blob' % field.name 4756 fields.append(self._db.Field(tmp, 'blob', default='')) 4757 4758 lower_fieldnames = set() 4759 reserved = dir(Table) + ['fields'] 4760 for field in fields: 4761 if db and db.check_reserved: 4762 db.check_reserved_keyword(field.name) 4763 elif field.name in reserved: 4764 raise SyntaxError, "field name %s not allowed" % field.name 4765 4766 if field.name.lower() in lower_fieldnames: 4767 raise SyntaxError, "duplicate field %s in table %s" \ 4768 % (field.name, tablename) 4769 else: 4770 lower_fieldnames.add(field.name.lower()) 4771 4772 self.fields.append(field.name) 4773 self[field.name] = field 4774 if field.type == 'id': 4775 self['id'] = field 4776 field.tablename = field._tablename = tablename 4777 field.table = field._table = self 4778 field.db = field._db = self._db 4779 if self._db and not field.type in ('text','blob') and \ 4780 self._db._adapter.maxcharlength < field.length: 4781 field.length = self._db._adapter.maxcharlength 4782 if field.requires == DEFAULT: 4783 field.requires = sqlhtml_validators(field) 4784 self.ALL = SQLALL(self) 4785 4786 if hasattr(self,'_primarykey'): 4787 for k in self._primarykey: 4788 if k not in self.fields: 4789 raise SyntaxError, \ 4790 "primarykey must be a list of fields from table '%s " % tablename 4791 else: 4792 self[k].notnull = True
4793
4794 - def update(self,*args,**kwargs):
4795 raise RuntimeError, "Syntax Not Supported"
4796
4797 - def _validate(self,**vars):
4798 errors = Row() 4799 for key,value in vars.items(): 4800 value,error = self[key].validate(value) 4801 if error: 4802 errors[key] = error 4803 return errors
4804
4805 - def _create_references(self):
4806 pr = self._db._pending_references 4807 self._referenced_by = [] 4808 for fieldname in self.fields: 4809 field=self[fieldname] 4810 if isinstance(field.type,str) and field.type[:10] == 'reference ': 4811 ref = field.type[10:].strip() 4812 if not ref.split(): 4813 raise SyntaxError, 'Table: reference to nothing: %s' %ref 4814 refs = ref.split('.') 4815 rtablename = refs[0] 4816 if not rtablename in self._db: 4817 pr[rtablename] = pr.get(rtablename,[]) + [field] 4818 continue 4819 rtable = self._db[rtablename] 4820 if len(refs)==2: 4821 rfieldname = refs[1] 4822 if not hasattr(rtable,'_primarykey'): 4823 raise SyntaxError,\ 4824 'keyed tables can only reference other keyed tables (for now)' 4825 if rfieldname not in rtable.fields: 4826 raise SyntaxError,\ 4827 "invalid field '%s' for referenced table '%s' in table '%s'" \ 4828 % (rfieldname, rtablename, self._tablename) 4829 rtable._referenced_by.append((self._tablename, field.name)) 4830 for referee in pr.get(self._tablename,[]): 4831 self._referenced_by.append((referee._tablename,referee.name))
4832
4833 - def _filter_fields(self, record, id=False):
4834 return dict([(k, v) for (k, v) in record.items() if k 4835 in self.fields and (self[k].type!='id' or id)])
4836
4837 - def _build_query(self,key):
4838 """ for keyed table only """ 4839 query = None 4840 for k,v in key.iteritems(): 4841 if k in self._primarykey: 4842 if query: 4843 query = query & (self[k] == v) 4844 else: 4845 query = (self[k] == v) 4846 else: 4847 raise SyntaxError, \ 4848 'Field %s is not part of the primary key of %s' % \ 4849 (k,self._tablename) 4850 return query
4851
4852 - def __getitem__(self, key):
4853 if not key: 4854 return None 4855 elif isinstance(key, dict): 4856 """ for keyed table """ 4857 query = self._build_query(key) 4858 rows = self._db(query).select() 4859 if rows: 4860 return rows[0] 4861 return None 4862 elif str(key).isdigit(): 4863 return self._db(self._id == key).select(limitby=(0,1)).first() 4864 elif key: 4865 return dict.__getitem__(self, str(key))
4866
4867 - def __call__(self, key=DEFAULT, **kwargs):
4868 if key!=DEFAULT: 4869 if isinstance(key, Query): 4870 record = self._db(key).select(limitby=(0,1)).first() 4871 elif not str(key).isdigit(): 4872 record = None 4873 else: 4874 record = self._db(self._id == key).select(limitby=(0,1)).first() 4875 if record: 4876 for k,v in kwargs.items(): 4877 if record[k]!=v: return None 4878 return record 4879 elif kwargs: 4880 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.items()]) 4881 return self._db(query).select(limitby=(0,1)).first() 4882 else: 4883 return None
4884
4885 - def __setitem__(self, key, value):
4886 if isinstance(key, dict) and isinstance(value, dict): 4887 """ option for keyed table """ 4888 if set(key.keys()) == set(self._primarykey): 4889 value = self._filter_fields(value) 4890 kv = {} 4891 kv.update(value) 4892 kv.update(key) 4893 if not self.insert(**kv): 4894 query = self._build_query(key) 4895 self._db(query).update(**self._filter_fields(value)) 4896 else: 4897 raise SyntaxError,\ 4898 'key must have all fields from primary key: %s'%\ 4899 (self._primarykey) 4900 elif str(key).isdigit(): 4901 if key == 0: 4902 self.insert(**self._filter_fields(value)) 4903 elif not self._db(self._id == key)\ 4904 .update(**self._filter_fields(value)): 4905 raise SyntaxError, 'No such record: %s' % key 4906 else: 4907 if isinstance(key, dict): 4908 raise SyntaxError,\ 4909 'value must be a dictionary: %s' % value 4910 dict.__setitem__(self, str(key), value)
4911
4912 - def __delitem__(self, key):
4913 if isinstance(key, dict): 4914 query = self._build_query(key) 4915 if not self._db(query).delete(): 4916 raise SyntaxError, 'No such record: %s' % key 4917 elif not str(key).isdigit() or not self._db(self._id == key).delete(): 4918 raise SyntaxError, 'No such record: %s' % key
4919
4920 - def __getattr__(self, key):
4921 return self[key]
4922
4923 - def __setattr__(self, key, value):
4924 if key in self: 4925 raise SyntaxError, 'Object exists and cannot be redefined: %s' % key 4926 self[key] = value
4927
4928 - def __iter__(self):
4929 for fieldname in self.fields: 4930 yield self[fieldname]
4931
4932 - def __repr__(self):
4933 return '<Table ' + dict.__repr__(self) + '>'
4934
4935 - def __str__(self):
4936 if self.get('_ot', None): 4937 return '%s AS %s' % (self._ot, self._tablename) 4938 return self._tablename
4939
4940 - def _drop(self, mode = ''):
4941 return self._db._adapter._drop(self, mode)
4942
4943 - def drop(self, mode = ''):
4944 return self._db._adapter.drop(self,mode)
4945
4946 - def _listify(self,fields,update=False):
4947 new_fields = [] 4948 new_fields_names = [] 4949 for name in fields: 4950 if not name in self.fields: 4951 if name != 'id': 4952 raise SyntaxError, 'Field %s does not belong to the table' % name 4953 else: 4954 new_fields.append((self[name],fields[name])) 4955 new_fields_names.append(name) 4956 for ofield in self: 4957 if not ofield.name in new_fields_names: 4958 if not update and not ofield.default is None: 4959 new_fields.append((ofield,ofield.default)) 4960 elif update and not ofield.update is None: 4961 new_fields.append((ofield,ofield.update)) 4962 for ofield in self: 4963 if not ofield.name in new_fields_names and ofield.compute: 4964 try: 4965 new_fields.append((ofield,ofield.compute(Row(fields)))) 4966 except KeyError: 4967 pass 4968 if not update and ofield.required and not ofield.name in new_fields_names: 4969 raise SyntaxError,'Table: missing required field: %s' % ofield.name 4970 return new_fields
4971
4972 - def _insert(self, **fields):
4973 return self._db._adapter._insert(self,self._listify(fields))
4974
4975 - def insert(self, **fields):
4976 return self._db._adapter.insert(self,self._listify(fields))
4977
4978 - def validate_and_insert(self,**fields):
4979 response = Row() 4980 response.errors = self._validate(**fields) 4981 if not response.errors: 4982 response.id = self.insert(**fields) 4983 else: 4984 response.id = None 4985 return response
4986
4987 - def update_or_insert(self, key=DEFAULT, **values):
4988 if key==DEFAULT: 4989 record = self(**values) 4990 else: 4991 record = self(key) 4992 if record: 4993 record.update_record(**values) 4994 newid = None 4995 else: 4996 newid = self.insert(**values) 4997 return newid
4998
4999 - def bulk_insert(self, items):
5000 """ 5001 here items is a list of dictionaries 5002 """ 5003 items = [self._listify(item) for item in items] 5004 return self._db._adapter.bulk_insert(self,items)
5005
5006 - def _truncate(self, mode = None):
5007 return self._db._adapter._truncate(self, mode)
5008
5009 - def truncate(self, mode = None):
5010 return self._db._adapter.truncate(self, mode)
5011
5012 - def import_from_csv_file( 5013 self, 5014 csvfile, 5015 id_map=None, 5016 null='<NULL>', 5017 unique='uuid', 5018 *args, **kwargs 5019 ):
5020 """ 5021 import records from csv file. Column headers must have same names as 5022 table fields. field 'id' is ignored. If column names read 'table.file' 5023 the 'table.' prefix is ignored. 5024 'unique' argument is a field which must be unique 5025 (typically a uuid field) 5026 """ 5027 5028 delimiter = kwargs.get('delimiter', ',') 5029 quotechar = kwargs.get('quotechar', '"') 5030 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 5031 5032 reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) 5033 colnames = None 5034 if isinstance(id_map, dict): 5035 if not self._tablename in id_map: 5036 id_map[self._tablename] = {} 5037 id_map_self = id_map[self._tablename] 5038 5039 def fix(field, value, id_map): 5040 list_reference_s='list:reference' 5041 if value == null: 5042 value = None 5043 elif field.type=='blob': 5044 value = base64.b64decode(value) 5045 elif field.type=='double': 5046 if not value.strip(): 5047 value = None 5048 else: 5049 value = float(value) 5050 elif field.type=='integer': 5051 if not value.strip(): 5052 value = None 5053 else: 5054 value = int(value) 5055 elif field.type.startswith('list:string'): 5056 value = bar_decode_string(value) 5057 elif field.type.startswith(list_reference_s): 5058 ref_table = field.type[len(list_reference_s):].strip() 5059 value = [id_map[ref_table][int(v)] \ 5060 for v in bar_decode_string(value)] 5061 elif field.type.startswith('list:'): 5062 value = bar_decode_integer(value) 5063 elif id_map and field.type.startswith('reference'): 5064 try: 5065 value = id_map[field.type[9:].strip()][value] 5066 except KeyError: 5067 pass 5068 return (field.name, value)
5069 5070 def is_id(colname): 5071 if colname in self: 5072 return self[colname].type == 'id' 5073 else: 5074 return False
5075 5076 for line in reader: 5077 if not line: 5078 break 5079 if not colnames: 5080 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 5081 cols, cid = [], [] 5082 for i,colname in enumerate(colnames): 5083 if is_id(colname): 5084 cid = i 5085 else: 5086 cols.append(i) 5087 if colname == unique: 5088 unique_idx = i 5089 else: 5090 items = [fix(self[colnames[i]], line[i], id_map) \ 5091 for i in cols if colnames[i] in self.fields] 5092 # Validation. Check for duplicate of 'unique' &, 5093 # if present, update instead of insert. 5094 if not unique or unique not in colnames: 5095 new_id = self.insert(**dict(items)) 5096 else: 5097 unique_value = line[unique_idx] 5098 query = self._db[self][unique] == unique_value 5099 record = self._db(query).select().first() 5100 if record: 5101 record.update_record(**dict(items)) 5102 new_id = record[self._id.name] 5103 else: 5104 new_id = self.insert(**dict(items)) 5105 if id_map and cid != []: 5106 id_map_self[int(line[cid])] = new_id 5107
5108 - def with_alias(self, alias):
5109 return self._db._adapter.alias(self,alias)
5110
5111 - def on(self, query):
5112 return Expression(self._db,self._db._adapter.ON,self,query)
5113 5114 5115
5116 -class Expression(object):
5117
5118 - def __init__( 5119 self, 5120 db, 5121 op, 5122 first=None, 5123 second=None, 5124 type=None, 5125 ):
5126 5127 self.db = db 5128 self.op = op 5129 self.first = first 5130 self.second = second 5131 ### self._tablename = first._tablename ## CHECK 5132 if not type and first and hasattr(first,'type'): 5133 self.type = first.type 5134 else: 5135 self.type = type
5136
5137 - def sum(self):
5138 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'SUM', self.type)
5139
5140 - def max(self):
5141 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MAX', self.type)
5142
5143 - def min(self):
5144 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MIN', self.type)
5145
5146 - def len(self):
5147 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
5148
5149 - def lower(self):
5150 return Expression(self.db, self.db._adapter.LOWER, self, None, self.type)
5151
5152 - def upper(self):
5153 return Expression(self.db, self.db._adapter.UPPER, self, None, self.type)
5154
5155 - def year(self):
5156 return Expression(self.db, self.db._adapter.EXTRACT, self, 'year', 'integer')
5157
5158 - def month(self):
5159 return Expression(self.db, self.db._adapter.EXTRACT, self, 'month', 'integer')
5160
5161 - def day(self):
5162 return Expression(self.db, self.db._adapter.EXTRACT, self, 'day', 'integer')
5163
5164 - def hour(self):
5165 return Expression(self.db, self.db._adapter.EXTRACT, self, 'hour', 'integer')
5166
5167 - def minutes(self):
5168 return Expression(self.db, self.db._adapter.EXTRACT, self, 'minute', 'integer')
5169
5170 - def coalesce(self,*others):
5171 return Expression(self.db, self.db._adapter.COALESCE, self, others, self.type)
5172
5173 - def coalesce_zero(self):
5174 return Expression(self.db, self.db._adapter.COALESCE_ZERO, self, None, self.type)
5175
5176 - def seconds(self):
5177 return Expression(self.db, self.db._adapter.EXTRACT, self, 'second', 'integer')
5178
5179 - def __getslice__(self, start, stop):
5180 if start < 0: 5181 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 5182 else: 5183 pos0 = start + 1 5184 5185 if stop < 0: 5186 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 5187 elif stop == sys.maxint: 5188 length = self.len() 5189 else: 5190 length = '(%s - %s)' % (stop + 1, pos0) 5191 return Expression(self.db,self.db._adapter.SUBSTRING, 5192 self, (pos0, length), self.type)
5193
5194 - def __getitem__(self, i):
5195 return self[i:i + 1]
5196
5197 - def __str__(self):
5198 return self.db._adapter.expand(self,self.type)
5199
5200 - def __or__(self, other): # for use in sortby
5201 return Expression(self.db,self.db._adapter.COMMA,self,other,self.type)
5202
5203 - def __invert__(self):
5204 if hasattr(self,'_op') and self.op == self.db._adapter.INVERT: 5205 return self.first 5206 return Expression(self.db,self.db._adapter.INVERT,self,type=self.type)
5207
5208 - def __add__(self, other):
5209 return Expression(self.db,self.db._adapter.ADD,self,other,self.type)
5210
5211 - def __sub__(self, other):
5212 if self.type == 'integer': 5213 result_type = 'integer' 5214 elif self.type in ['date','time','datetime','double']: 5215 result_type = 'double' 5216 else: 5217 raise SyntaxError, "subtraction operation not supported for type" 5218 return Expression(self.db,self.db._adapter.SUB,self,other, 5219 result_type)
5220 - def __mul__(self, other):
5221 return Expression(self.db,self.db._adapter.MUL,self,other,self.type)
5222
5223 - def __div__(self, other):
5224 return Expression(self.db,self.db._adapter.DIV,self,other,self.type)
5225
5226 - def __mod__(self, other):
5227 return Expression(self.db,self.db._adapter.MOD,self,other,self.type)
5228
5229 - def __eq__(self, value):
5230 return Query(self.db, self.db._adapter.EQ, self, value)
5231
5232 - def __ne__(self, value):
5233 return Query(self.db, self.db._adapter.NE, self, value)
5234
5235 - def __lt__(self, value):
5236 return Query(self.db, self.db._adapter.LT, self, value)
5237
5238 - def __le__(self, value):
5239 return Query(self.db, self.db._adapter.LE, self, value)
5240
5241 - def __gt__(self, value):
5242 return Query(self.db, self.db._adapter.GT, self, value)
5243
5244 - def __ge__(self, value):
5245 return Query(self.db, self.db._adapter.GE, self, value)
5246
5247 - def like(self, value):
5248 return Query(self.db, self.db._adapter.LIKE, self, value)
5249
5250 - def belongs(self, value):
5251 return Query(self.db, self.db._adapter.BELONGS, self, value)
5252
5253 - def startswith(self, value):
5254 if not self.type in ('string', 'text'): 5255 raise SyntaxError, "startswith used with incompatible field type" 5256 return Query(self.db, self.db._adapter.STARTSWITH, self, value)
5257
5258 - def endswith(self, value):
5259 if not self.type in ('string', 'text'): 5260 raise SyntaxError, "endswith used with incompatible field type" 5261 return Query(self.db, self.db._adapter.ENDSWITH, self, value)
5262
5263 - def contains(self, value, all=False):
5264 if isinstance(value,(list,tuple)): 5265 subqueries = [self.contains(str(v).strip()) for v in value if str(v).strip()] 5266 return reduce(all and AND or OR, subqueries) 5267 if not self.type in ('string', 'text') and not self.type.startswith('list:'): 5268 raise SyntaxError, "contains used with incompatible field type" 5269 return Query(self.db, self.db._adapter.CONTAINS, self, value)
5270
5271 - def with_alias(self,alias):
5272 return Expression(self.db,self.db._adapter.AS,self,alias,self.type)
5273 5274 # for use in both Query and sortby 5275 5276
5277 -class SQLCustomType(object):
5278 """ 5279 allows defining of custom SQL types 5280 5281 Example:: 5282 5283 decimal = SQLCustomType( 5284 type ='double', 5285 native ='integer', 5286 encoder =(lambda x: int(float(x) * 100)), 5287 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 5288 ) 5289 5290 db.define_table( 5291 'example', 5292 Field('value', type=decimal) 5293 ) 5294 5295 :param type: the web2py type (default = 'string') 5296 :param native: the backend type 5297 :param encoder: how to encode the value to store it in the backend 5298 :param decoder: how to decode the value retrieved from the backend 5299 :param validator: what validators to use ( default = None, will use the 5300 default validator for type) 5301 """ 5302
5303 - def __init__( 5304 self, 5305 type='string', 5306 native=None, 5307 encoder=None, 5308 decoder=None, 5309 validator=None, 5310 _class=None, 5311 ):
5312 5313 self.type = type 5314 self.native = native 5315 self.encoder = encoder or (lambda x: x) 5316 self.decoder = decoder or (lambda x: x) 5317 self.validator = validator 5318 self._class = _class or type
5319
5320 - def startswith(self, dummy=None):
5321 return False
5322
5323 - def __getslice__(self, a=0, b=100):
5324 return None
5325
5326 - def __getitem__(self, i):
5327 return None
5328
5329 - def __str__(self):
5330 return self._class
5331
5332 -class FieldVirtual(object):
5333 - def __init__(self,f):
5334 self.f = f
5335
5336 -class FieldLazy(object):
5337 - def __init__(self,f,handler=None):
5338 self.f = f 5339 self.handler = handler
5340 5341
5342 -class Field(Expression):
5343 5344 Virtual = FieldVirtual 5345 Lazy = FieldLazy 5346 5347 """ 5348 an instance of this class represents a database field 5349 5350 example:: 5351 5352 a = Field(name, 'string', length=32, default=None, required=False, 5353 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 5354 notnull=False, unique=False, 5355 uploadfield=True, widget=None, label=None, comment=None, 5356 uploadfield=True, # True means store on disk, 5357 # 'a_field_name' means store in this field in db 5358 # False means file content will be discarded. 5359 writable=True, readable=True, update=None, authorize=None, 5360 autodelete=False, represent=None, uploadfolder=None, 5361 uploadseparate=False # upload to separate directories by uuid_keys 5362 # first 2 character and tablename.fieldname 5363 # False - old behavior 5364 # True - put uploaded file in 5365 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 5366 # directory) 5367 5368 to be used as argument of DAL.define_table 5369 5370 allowed field types: 5371 string, boolean, integer, double, text, blob, 5372 date, time, datetime, upload, password 5373 5374 strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) 5375 fields should have a default or they will be required in SQLFORMs 5376 the requires argument is used to validate the field input in SQLFORMs 5377 5378 """ 5379
5380 - def __init__( 5381 self, 5382 fieldname, 5383 type='string', 5384 length=None, 5385 default=DEFAULT, 5386 required=False, 5387 requires=DEFAULT, 5388 ondelete='CASCADE', 5389 notnull=False, 5390 unique=False, 5391 uploadfield=True, 5392 widget=None, 5393 label=DEFAULT, 5394 comment=None, 5395 writable=True, 5396 readable=True, 5397 update=None, 5398 authorize=None, 5399 autodelete=False, 5400 represent=None, 5401 uploadfolder=None, 5402 uploadseparate=False, 5403 compute=None, 5404 custom_store=None, 5405 custom_retrieve=None, 5406 custom_delete=None, 5407 ):
5408 self.db = None 5409 self.op = None 5410 self.first = None 5411 self.second = None 5412 if not isinstance(fieldname,str): 5413 raise SyntaxError, "missing field name" 5414 if fieldname.startswith(':'): 5415 fieldname,readable,writable=fieldname[1:],False,False 5416 elif fieldname.startswith('.'): 5417 fieldname,readable,writable=fieldname[1:],False,False 5418 if '=' in fieldname: 5419 fieldname,default = fieldname.split('=',1) 5420 self.name = fieldname = cleanup(fieldname) 5421 if hasattr(Table,fieldname) or fieldname[0] == '_' or \ 5422 regex_python_keywords.match(fieldname): 5423 raise SyntaxError, 'Field: invalid field name: %s' % fieldname 5424 if isinstance(type, Table): 5425 type = 'reference ' + type._tablename 5426 self.type = type # 'string', 'integer' 5427 self.length = (length is None) and DEFAULTLENGTH.get(type,512) or length 5428 if default==DEFAULT: 5429 self.default = update or None 5430 else: 5431 self.default = default 5432 self.required = required # is this field required 5433 self.ondelete = ondelete.upper() # this is for reference fields only 5434 self.notnull = notnull 5435 self.unique = unique 5436 self.uploadfield = uploadfield 5437 self.uploadfolder = uploadfolder 5438 self.uploadseparate = uploadseparate 5439 self.widget = widget 5440 if label == DEFAULT: 5441 self.label = ' '.join(i.capitalize() for i in fieldname.split('_')) 5442 else: 5443 self.label = label or '' 5444 self.comment = comment 5445 self.writable = writable 5446 self.readable = readable 5447 self.update = update 5448 self.authorize = authorize 5449 self.autodelete = autodelete 5450 if not represent and type in ('list:integer','list:string'): 5451 represent=lambda x,r=None: ', '.join(str(y) for y in x or []) 5452 self.represent = represent 5453 self.compute = compute 5454 self.isattachment = True 5455 self.custom_store = custom_store 5456 self.custom_retrieve = custom_retrieve 5457 self.custom_delete = custom_delete 5458 if self.label is None: 5459 self.label = ' '.join([x.capitalize() for x in 5460 fieldname.split('_')]) 5461 if requires is None: 5462 self.requires = [] 5463 else: 5464 self.requires = requires
5465
5466 - def store(self, file, filename=None, path=None):
5467 if self.custom_store: 5468 return self.custom_store(file,filename,path) 5469 if not filename: 5470 filename = file.name 5471 filename = os.path.basename(filename.replace('/', os.sep)\ 5472 .replace('\\', os.sep)) 5473 m = re.compile('\.(?P<e>\w{1,5})$').search(filename) 5474 extension = m and m.group('e') or 'txt' 5475 uuid_key = web2py_uuid().replace('-', '')[-16:] 5476 encoded_filename = base64.b16encode(filename).lower() 5477 newfilename = '%s.%s.%s.%s' % \ 5478 (self._tablename, self.name, uuid_key, encoded_filename) 5479 newfilename = newfilename[:200] + '.' + extension 5480 if isinstance(self.uploadfield,Field): 5481 blob_uploadfield_name = self.uploadfield.uploadfield 5482 keys={self.uploadfield.name: newfilename, 5483 blob_uploadfield_name: file.read()} 5484 self.uploadfield.table.insert(**keys) 5485 elif self.uploadfield == True: 5486 if path: 5487 pass 5488 elif self.uploadfolder: 5489 path = self.uploadfolder 5490 elif self.db._adapter.folder: 5491 path = os.path.join(self.db._adapter.folder, '..', 'uploads') 5492 else: 5493 raise RuntimeError, "you must specify a Field(...,uploadfolder=...)" 5494 if self.uploadseparate: 5495 path = os.path.join(path,"%s.%s" % (self._tablename, self.name),uuid_key[:2]) 5496 if not os.path.exists(path): 5497 os.makedirs(path) 5498 pathfilename = os.path.join(path, newfilename) 5499 dest_file = open(pathfilename, 'wb') 5500 try: 5501 shutil.copyfileobj(file, dest_file) 5502 finally: 5503 dest_file.close() 5504 return newfilename
5505
5506 - def retrieve(self, name, path=None):
5507 if self.custom_retrieve: 5508 return self.custom_retrieve(name, path) 5509 import http 5510 if self.authorize or isinstance(self.uploadfield, str): 5511 row = self.db(self == name).select().first() 5512 if not row: 5513 raise http.HTTP(404) 5514 if self.authorize and not self.authorize(row): 5515 raise http.HTTP(403) 5516 try: 5517 m = regex_content.match(name) 5518 if not m or not self.isattachment: 5519 raise TypeError, 'Can\'t retrieve %s' % name 5520 filename = base64.b16decode(m.group('name'), True) 5521 filename = regex_cleanup_fn.sub('_', filename) 5522 except (TypeError, AttributeError): 5523 filename = name 5524 if isinstance(self.uploadfield, str): # ## if file is in DB 5525 return (filename, cStringIO.StringIO(row[self.uploadfield] or '')) 5526 elif isinstance(self.uploadfield,Field): 5527 blob_uploadfield_name = self.uploadfield.uploadfield 5528 query = self.uploadfield == name 5529 data = self.uploadfield.table(query)[blob_uploadfield_name] 5530 return (filename, cStringIO.StringIO(data)) 5531 else: 5532 # ## if file is on filesystem 5533 if path: 5534 pass 5535 elif self.uploadfolder: 5536 path = self.uploadfolder 5537 else: 5538 path = os.path.join(self.db._adapter.folder, '..', 'uploads') 5539 if self.uploadseparate: 5540 t = m.group('table') 5541 f = m.group('field') 5542 u = m.group('uuidkey') 5543 path = os.path.join(path,"%s.%s" % (t,f),u[:2]) 5544 return (filename, open(os.path.join(path, name), 'rb'))
5545
5546 - def formatter(self, value):
5547 if value is None or not self.requires: 5548 return value 5549 if not isinstance(self.requires, (list, tuple)): 5550 requires = [self.requires] 5551 elif isinstance(self.requires, tuple): 5552 requires = list(self.requires) 5553 else: 5554 requires = copy.copy(self.requires) 5555 requires.reverse() 5556 for item in requires: 5557 if hasattr(item, 'formatter'): 5558 value = item.formatter(value) 5559 return value
5560
5561 - def validate(self, value):
5562 if not self.requires: 5563 return (value, None) 5564 requires = self.requires 5565 if not isinstance(requires, (list, tuple)): 5566 requires = [requires] 5567 for validator in requires: 5568 (value, error) = validator(value) 5569 if error: 5570 return (value, error) 5571 return (value, None)
5572
5573 - def count(self):
5574 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'COUNT', 'integer')
5575
5576 - def __nonzero__(self):
5577 return True
5578
5579 - def __str__(self):
5580 try: 5581 return '%s.%s' % (self.tablename, self.name) 5582 except: 5583 return '<no table>.%s' % self.name
5584 5585
5586 -def raw(s): return Expression(None,s)
5587
5588 -class Query(object):
5589 5590 """ 5591 a query object necessary to define a set. 5592 it can be stored or can be passed to DAL.__call__() to obtain a Set 5593 5594 Example:: 5595 5596 query = db.users.name=='Max' 5597 set = db(query) 5598 records = set.select() 5599 5600 """ 5601
5602 - def __init__( 5603 self, 5604 db, 5605 op, 5606 first=None, 5607 second=None, 5608 ):
5609 self.db = self._db = db 5610 self.op = op 5611 self.first = first 5612 self.second = second
5613
5614 - def __str__(self):
5615 return self.db._adapter.expand(self)
5616
5617 - def __and__(self, other):
5618 return Query(self.db,self.db._adapter.AND,self,other)
5619
5620 - def __or__(self, other):
5621 return Query(self.db,self.db._adapter.OR,self,other)
5622
5623 - def __invert__(self):
5624 if self.op==self.db._adapter.NOT: 5625 return self.first 5626 return Query(self.db,self.db._adapter.NOT,self)
5627 5628 5629 regex_quotes = re.compile("'[^']*'") 5630 5631
5632 -def xorify(orderby):
5633 if not orderby: 5634 return None 5635 orderby2 = orderby[0] 5636 for item in orderby[1:]: 5637 orderby2 = orderby2 | item 5638 return orderby2
5639 5640
5641 -class Set(object):
5642 5643 """ 5644 a Set represents a set of records in the database, 5645 the records are identified by the query=Query(...) object. 5646 normally the Set is generated by DAL.__call__(Query(...)) 5647 5648 given a set, for example 5649 set = db(db.users.name=='Max') 5650 you can: 5651 set.update(db.users.name='Massimo') 5652 set.delete() # all elements in the set 5653 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 5654 and take subsets: 5655 subset = set(db.users.id<5) 5656 """ 5657
5658 - def __init__(self, db, query):
5659 self.db = db 5660 self._db = db # for backward compatibility 5661 self.query = query
5662
5663 - def __call__(self, query):
5664 if isinstance(query,Table): 5665 query = query._id>0 5666 elif isinstance(query,str): 5667 query = raw(query) 5668 elif isinstance(query,Field): 5669 query = query!=None 5670 if self.query: 5671 return Set(self.db, self.query & query) 5672 else: 5673 return Set(self.db, query)
5674
5675 - def _count(self,distinct=None):
5676 return self.db._adapter._count(self.query,distinct)
5677
5678 - def _select(self, *fields, **attributes):
5679 return self.db._adapter._select(self.query,fields,attributes)
5680
5681 - def _delete(self):
5682 tablename=self.db._adapter.get_table(self.query) 5683 return self.db._adapter._delete(tablename,self.query)
5684
5685 - def _update(self, **update_fields):
5686 tablename = self.db._adapter.get_table(self.query) 5687 fields = self.db[tablename]._listify(update_fields,update=True) 5688 return self.db._adapter._update(tablename,self.query,fields)
5689
5690 - def isempty(self):
5691 return not self.select(limitby=(0,1))
5692
5693 - def count(self,distinct=None):
5694 return self.db._adapter.count(self.query,distinct)
5695
5696 - def select(self, *fields, **attributes):
5697 return self.db._adapter.select(self.query,fields,attributes)
5698
5699 - def delete(self):
5700 tablename=self.db._adapter.get_table(self.query) 5701 self.delete_uploaded_files() 5702 return self.db._adapter.delete(tablename,self.query)
5703
5704 - def update(self, **update_fields):
5705 tablename = self.db._adapter.get_table(self.query) 5706 fields = self.db[tablename]._listify(update_fields,update=True) 5707 if not fields: 5708 raise SyntaxError, "No fields to update" 5709 self.delete_uploaded_files(update_fields) 5710 return self.db._adapter.update(tablename,self.query,fields)
5711
5712 - def validate_and_update(self, **update_fields):
5713 tablename = self.db._adapter.get_table(self.query) 5714 response = Row() 5715 response.errors = self.db[tablename]._validate(**update_fields) 5716 fields = self.db[tablename]._listify(update_fields,update=True) 5717 if not fields: 5718 raise SyntaxError, "No fields to update" 5719 self.delete_uploaded_files(update_fields) 5720 if not response.errors: 5721 response.updated = self.db._adapter.update(tablename,self.query,fields) 5722 else: 5723 response.updated = None 5724 return response
5725
5726 - def delete_uploaded_files(self, upload_fields=None):
5727 table = self.db[self.db._adapter.tables(self.query)[0]] 5728 # ## mind uploadfield==True means file is not in DB 5729 if upload_fields: 5730 fields = upload_fields.keys() 5731 else: 5732 fields = table.fields 5733 fields = [f for f in fields if table[f].type == 'upload' 5734 and table[f].uploadfield == True 5735 and table[f].autodelete] 5736 if not fields: 5737 return 5738 for record in self.select(*[table[f] for f in fields]): 5739 for fieldname in fields: 5740 field = table[fieldname] 5741 oldname = record.get(fieldname, None) 5742 if not oldname: 5743 continue 5744 if upload_fields and oldname == upload_fields[fieldname]: 5745 continue 5746 if field.custom_delete: 5747 field.custom_delete(oldname) 5748 else: 5749 uploadfolder = field.uploadfolder 5750 if not uploadfolder: 5751 uploadfolder = os.path.join(self.db._adapter.folder, '..', 'uploads') 5752 if field.uploadseparate: 5753 items = oldname.split('.') 5754 uploadfolder = os.path.join(uploadfolder, 5755 "%s.%s" % (items[0], items[1]), 5756 items[2][:2]) 5757 oldpath = os.path.join(uploadfolder, oldname) 5758 if os.path.exists(oldpath): 5759 os.unlink(oldpath)
5760
5761 -def update_record(pack, a=None):
5762 (colset, table, id) = pack 5763 b = a or dict(colset) 5764 c = dict([(k,v) for (k,v) in b.items() if k in table.fields and table[k].type!='id']) 5765 table._db(table._id==id).update(**c) 5766 for (k, v) in c.items(): 5767 colset[k] = v
5768
5769 -class VirtualCommand(object):
5770 - def __init__(self,method,row):
5771 self.method=method 5772 #self.instance=instance 5773 self.row=row
5774 - def __call__(self,*args,**kwargs):
5775 return self.method(self.row,*args,**kwargs)
5776
5777 -def lazy_virtualfield(f):
5778 f.__lazy__ = True 5779 return f
5780
5781 -class Rows(object):
5782 5783 """ 5784 A wrapper for the return value of a select. It basically represents a table. 5785 It has an iterator and each row is represented as a dictionary. 5786 """ 5787 5788 # ## TODO: this class still needs some work to care for ID/OID 5789
5790 - def __init__( 5791 self, 5792 db=None, 5793 records=[], 5794 colnames=[], 5795 compact=True, 5796 rawrows=None 5797 ):
5798 self.db = db 5799 self.records = records 5800 self.colnames = colnames 5801 self.compact = compact 5802 self.response = rawrows
5803
5804 - def setvirtualfields(self,**keyed_virtualfields):
5805 """ 5806 db.define_table('x',Field('number','integer')) 5807 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 5808 5809 from gluon.dal import lazy_virtualfield 5810 5811 class MyVirtualFields(object): 5812 # normal virtual field (backward compatible, discouraged) 5813 def normal_shift(self): return self.x.number+1 5814 # lazy virtual field (because of @staticmethod) 5815 @lazy_virtualfield 5816 def lazy_shift(instance,row,delta=4): return row.x.number+delta 5817 db.x.virtualfields.append(MyVirtualFields()) 5818 5819 for row in db(db.x).select(): 5820 print row.number, row.normal_shift, row.lazy_shift(delta=7) 5821 """ 5822 if not keyed_virtualfields: 5823 return self 5824 for row in self.records: 5825 for (tablename,virtualfields) in keyed_virtualfields.items(): 5826 attributes = dir(virtualfields) 5827 if not tablename in row: 5828 box = row[tablename] = Row() 5829 else: 5830 box = row[tablename] 5831 updated = False 5832 for attribute in attributes: 5833 if attribute[0] != '_': 5834 method = getattr(virtualfields,attribute) 5835 if hasattr(method,'__lazy__'): 5836 box[attribute]=VirtualCommand(method,row) 5837 elif type(method)==types.MethodType: 5838 if not updated: 5839 virtualfields.__dict__.update(row) 5840 updated = True 5841 box[attribute]=method() 5842 return self
5843
5844 - def __and__(self,other):
5845 if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects' 5846 records = self.records+other.records 5847 return Rows(self.db,records,self.colnames)
5848
5849 - def __or__(self,other):
5850 if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects' 5851 records = self.records 5852 records += [record for record in other.records \ 5853 if not record in records] 5854 return Rows(self.db,records,self.colnames)
5855
5856 - def __nonzero__(self):
5857 if len(self.records): 5858 return 1 5859 return 0
5860
5861 - def __len__(self):
5862 return len(self.records)
5863
5864 - def __getslice__(self, a, b):
5865 return Rows(self.db,self.records[a:b],self.colnames)
5866
5867 - def __getitem__(self, i):
5868 row = self.records[i] 5869 keys = row.keys() 5870 if self.compact and len(keys) == 1 and keys[0] != '_extra': 5871 return row[row.keys()[0]] 5872 return row
5873
5874 - def __iter__(self):
5875 """ 5876 iterator over records 5877 """ 5878 5879 for i in xrange(len(self)): 5880 yield self[i]
5881
5882 - def __str__(self):
5883 """ 5884 serializes the table into a csv file 5885 """ 5886 5887 s = cStringIO.StringIO() 5888 self.export_to_csv_file(s) 5889 return s.getvalue()
5890
5891 - def first(self):
5892 if not self.records: 5893 return None 5894 return self[0]
5895
5896 - def last(self):
5897 if not self.records: 5898 return None 5899 return self[-1]
5900
5901 - def find(self,f):
5902 """ 5903 returns a new Rows object, a subset of the original object, 5904 filtered by the function f 5905 """ 5906 if not self.records: 5907 return Rows(self.db, [], self.colnames) 5908 records = [] 5909 for i in range(0,len(self)): 5910 row = self[i] 5911 if f(row): 5912 records.append(self.records[i]) 5913 return Rows(self.db, records, self.colnames)
5914
5915 - def exclude(self, f):
5916 """ 5917 removes elements from the calling Rows object, filtered by the function f, 5918 and returns a new Rows object containing the removed elements 5919 """ 5920 if not self.records: 5921 return Rows(self.db, [], self.colnames) 5922 removed = [] 5923 i=0 5924 while i<len(self): 5925 row = self[i] 5926 if f(row): 5927 removed.append(self.records[i]) 5928 del self.records[i] 5929 else: 5930 i += 1 5931 return Rows(self.db, removed, self.colnames)
5932
5933 - def sort(self, f, reverse=False):
5934 """ 5935 returns a list of sorted elements (not sorted in place) 5936 """ 5937 return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames)
5938
5939 - def as_list(self, 5940 compact=True, 5941 storage_to_dict=True, 5942 datetime_to_str=True):
5943 """ 5944 returns the data as a list or dictionary. 5945 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 5946 :param datetime_to_str: convert datetime fields as strings (default True) 5947 """ 5948 (oc, self.compact) = (self.compact, compact) 5949 if storage_to_dict: 5950 items = [item.as_dict(datetime_to_str) for item in self] 5951 else: 5952 items = [item for item in self] 5953 self.compact = compact 5954 return items
5955 5956
5957 - def as_dict(self, 5958 key='id', 5959 compact=True, 5960 storage_to_dict=True, 5961 datetime_to_str=True):
5962 """ 5963 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 5964 5965 :param key: the name of the field to be used as dict key, normally the id 5966 :param compact: ? (default True) 5967 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 5968 :param datetime_to_str: convert datetime fields as strings (default True) 5969 """ 5970 rows = self.as_list(compact, storage_to_dict, datetime_to_str) 5971 if isinstance(key,str) and key.count('.')==1: 5972 (table, field) = key.split('.') 5973 return dict([(r[table][field],r) for r in rows]) 5974 elif isinstance(key,str): 5975 return dict([(r[key],r) for r in rows]) 5976 else: 5977 return dict([(key(r),r) for r in rows])
5978
5979 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
5980 """ 5981 export data to csv, the first line contains the column names 5982 5983 :param ofile: where the csv must be exported to 5984 :param null: how null values must be represented (default '<NULL>') 5985 :param delimiter: delimiter to separate values (default ',') 5986 :param quotechar: character to use to quote string values (default '"') 5987 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 5988 :param represent: use the fields .represent value (default False) 5989 :param colnames: list of column names to use (default self.colnames) 5990 This will only work when exporting rows objects!!!! 5991 DO NOT use this with db.export_to_csv() 5992 """ 5993 delimiter = kwargs.get('delimiter', ',') 5994 quotechar = kwargs.get('quotechar', '"') 5995 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 5996 represent = kwargs.get('represent', False) 5997 writer = csv.writer(ofile, delimiter=delimiter, 5998 quotechar=quotechar, quoting=quoting) 5999 colnames = kwargs.get('colnames', self.colnames) 6000 # a proper csv starting with the column names 6001 writer.writerow(colnames) 6002 6003 def none_exception(value): 6004 """ 6005 returns a cleaned up value that can be used for csv export: 6006 - unicode text is encoded as such 6007 - None values are replaced with the given representation (default <NULL>) 6008 """ 6009 if value is None: 6010 return null 6011 elif isinstance(value, unicode): 6012 return value.encode('utf8') 6013 elif isinstance(value,Reference): 6014 return int(value) 6015 elif hasattr(value, 'isoformat'): 6016 return value.isoformat()[:19].replace('T', ' ') 6017 elif isinstance(value, (list,tuple)): # for type='list:..' 6018 return bar_encode(value) 6019 return value
6020 6021 for record in self: 6022 row = [] 6023 for col in colnames: 6024 if not table_field.match(col): 6025 row.append(record._extra[col]) 6026 else: 6027 (t, f) = col.split('.') 6028 field = self.db[t][f] 6029 if isinstance(record.get(t, None), (Row,dict)): 6030 value = record[t][f] 6031 else: 6032 value = record[f] 6033 if field.type=='blob' and not value is None: 6034 value = base64.b64encode(value) 6035 elif represent and field.represent: 6036 value = field.represent(value) 6037 row.append(none_exception(value)) 6038 writer.writerow(row)
6039
6040 - def xml(self):
6041 """ 6042 serializes the table using sqlhtml.SQLTABLE (if present) 6043 """ 6044 6045 import sqlhtml 6046 return sqlhtml.SQLTABLE(self).xml()
6047
6048 - def json(self, mode='object', default=None):
6049 """ 6050 serializes the table to a JSON list of objects 6051 """ 6052 mode = mode.lower() 6053 if not mode in ['object', 'array']: 6054 raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode 6055 6056 def inner_loop(record, col): 6057 (t, f) = col.split('.') 6058 res = None 6059 if not table_field.match(col): 6060 key = col 6061 res = record._extra[col] 6062 else: 6063 key = f 6064 if isinstance(record.get(t, None), Row): 6065 res = record[t][f] 6066 else: 6067 res = record[f] 6068 if mode == 'object': 6069 return (key, res) 6070 else: 6071 return res
6072 6073 if mode == 'object': 6074 items = [dict([inner_loop(record, col) for col in 6075 self.colnames]) for record in self] 6076 else: 6077 items = [[inner_loop(record, col) for col in self.colnames] 6078 for record in self] 6079 if have_serializers: 6080 return serializers.json(items,default=default or serializers.custom_json) 6081 else: 6082 import simplejson 6083 return simplejson.dumps(items) 6084
6085 -def Rows_unpickler(data):
6086 return cPickle.loads(data)
6087
6088 -def Rows_pickler(data):
6089 return Rows_unpickler, \ 6090 (cPickle.dumps(data.as_list(storage_to_dict=True, 6091 datetime_to_str=False)),)
6092 6093 copy_reg.pickle(Rows, Rows_pickler, Rows_unpickler) 6094 6095 6096 ################################################################################ 6097 # dummy function used to define some doctests 6098 ################################################################################ 6099
6100 -def test_all():
6101 """ 6102 6103 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 6104 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 6105 >>> tmp = db.define_table('users',\ 6106 Field('stringf', 'string', length=32, required=True),\ 6107 Field('booleanf', 'boolean', default=False),\ 6108 Field('passwordf', 'password', notnull=True),\ 6109 Field('uploadf', 'upload'),\ 6110 Field('blobf', 'blob'),\ 6111 Field('integerf', 'integer', unique=True),\ 6112 Field('doublef', 'double', unique=True,notnull=True),\ 6113 Field('datef', 'date', default=datetime.date.today()),\ 6114 Field('timef', 'time'),\ 6115 Field('datetimef', 'datetime'),\ 6116 migrate='test_user.table') 6117 6118 Insert a field 6119 6120 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 6121 uploadf=None, integerf=5, doublef=3.14,\ 6122 datef=datetime.date(2001, 1, 1),\ 6123 timef=datetime.time(12, 30, 15),\ 6124 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 6125 1 6126 6127 Drop the table 6128 6129 >>> db.users.drop() 6130 6131 Examples of insert, select, update, delete 6132 6133 >>> tmp = db.define_table('person',\ 6134 Field('name'),\ 6135 Field('birth','date'),\ 6136 migrate='test_person.table') 6137 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 6138 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 6139 6140 commented len(db().select(db.person.ALL)) 6141 commented 2 6142 6143 >>> me = db(db.person.id==person_id).select()[0] # test select 6144 >>> me.name 6145 'Massimo' 6146 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 6147 1 6148 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 6149 1 6150 6151 Update a single record 6152 6153 >>> me.update_record(name=\"Max\") 6154 >>> me.name 6155 'Max' 6156 6157 Examples of complex search conditions 6158 6159 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 6160 1 6161 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 6162 1 6163 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 6164 1 6165 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 6166 >>> me.name 6167 'Max' 6168 6169 Examples of search conditions using extract from date/datetime/time 6170 6171 >>> len(db(db.person.birth.month()==12).select()) 6172 1 6173 >>> len(db(db.person.birth.year()>1900).select()) 6174 1 6175 6176 Example of usage of NULL 6177 6178 >>> len(db(db.person.birth==None).select()) ### test NULL 6179 0 6180 >>> len(db(db.person.birth!=None).select()) ### test NULL 6181 1 6182 6183 Examples of search conditions using lower, upper, and like 6184 6185 >>> len(db(db.person.name.upper()=='MAX').select()) 6186 1 6187 >>> len(db(db.person.name.like('%ax')).select()) 6188 1 6189 >>> len(db(db.person.name.upper().like('%AX')).select()) 6190 1 6191 >>> len(db(~db.person.name.upper().like('%AX')).select()) 6192 0 6193 6194 orderby, groupby and limitby 6195 6196 >>> people = db().select(db.person.name, orderby=db.person.name) 6197 >>> order = db.person.name|~db.person.birth 6198 >>> people = db().select(db.person.name, orderby=order) 6199 6200 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 6201 6202 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 6203 6204 Example of one 2 many relation 6205 6206 >>> tmp = db.define_table('dog',\ 6207 Field('name'),\ 6208 Field('birth','date'),\ 6209 Field('owner',db.person),\ 6210 migrate='test_dog.table') 6211 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 6212 1 6213 6214 A simple JOIN 6215 6216 >>> len(db(db.dog.owner==db.person.id).select()) 6217 1 6218 6219 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 6220 1 6221 6222 Drop tables 6223 6224 >>> db.dog.drop() 6225 >>> db.person.drop() 6226 6227 Example of many 2 many relation and Set 6228 6229 >>> tmp = db.define_table('author', Field('name'),\ 6230 migrate='test_author.table') 6231 >>> tmp = db.define_table('paper', Field('title'),\ 6232 migrate='test_paper.table') 6233 >>> tmp = db.define_table('authorship',\ 6234 Field('author_id', db.author),\ 6235 Field('paper_id', db.paper),\ 6236 migrate='test_authorship.table') 6237 >>> aid = db.author.insert(name='Massimo') 6238 >>> pid = db.paper.insert(title='QCD') 6239 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 6240 6241 Define a Set 6242 6243 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 6244 >>> rows = authored_papers.select(db.author.name, db.paper.title) 6245 >>> for row in rows: print row.author.name, row.paper.title 6246 Massimo QCD 6247 6248 Example of search condition using belongs 6249 6250 >>> set = (1, 2, 3) 6251 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 6252 >>> print rows[0].title 6253 QCD 6254 6255 Example of search condition using nested select 6256 6257 >>> nested_select = db()._select(db.authorship.paper_id) 6258 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 6259 >>> print rows[0].title 6260 QCD 6261 6262 Example of expressions 6263 6264 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 6265 >>> db(mynumber.id>0).delete() 6266 0 6267 >>> for i in range(10): tmp = mynumber.insert(x=i) 6268 >>> db(mynumber.id>0).select(mynumber.x.sum())[0](mynumber.x.sum()) 6269 45 6270 6271 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 6272 5 6273 6274 Output in csv 6275 6276 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 6277 author.name,paper.title\r 6278 Massimo,QCD 6279 6280 Delete all leftover tables 6281 6282 >>> DAL.distributed_transaction_commit(db) 6283 6284 >>> db.mynumber.drop() 6285 >>> db.authorship.drop() 6286 >>> db.author.drop() 6287 >>> db.paper.drop() 6288 """
6289 ################################################################################ 6290 # deprecated since the new DAL; here only for backward compatibility 6291 ################################################################################ 6292 6293 SQLField = Field 6294 SQLTable = Table 6295 SQLXorable = Expression 6296 SQLQuery = Query 6297 SQLSet = Set 6298 SQLRows = Rows 6299 SQLStorage = Row 6300 SQLDB = DAL 6301 GQLDB = DAL 6302 DAL.Field = Field # was necessary in gluon/globals.py session.connect 6303 DAL.Table = Table # was necessary in gluon/globals.py session.connect 6304 6305 ################################################################################ 6306 # run tests 6307 ################################################################################ 6308 6309 if __name__ == '__main__': 6310 import doctest 6311 doctest.testmod() 6312