Package web2py :: Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module web2py.gluon.dal

   1  #!/bin/env python 
   2  # -*- coding: utf-8 -*- 
   3   
   4  """ 
   5  This file is part of the web2py Web Framework 
   6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
   7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
   8   
   9  Thanks to 
  10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
  11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
  12      * Denes 
  13      * Chris Clark 
  14      * clach05 
  15      * Denes Lengyel 
  16      * and many others who have contributed to current and previous versions 
  17   
  18  This file contains the DAL support for many relational databases, 
  19  including: 
  20  - SQLite 
  21  - MySQL 
  22  - Postgres 
  23  - Oracle 
  24  - MS SQL 
  25  - DB2 
  26  - Interbase 
  27  - Ingres 
  28  - SapDB (experimental) 
  29  - Cubrid (experimental) 
  30  - CouchDB (experimental) 
  31  - MongoDB (in progress) 
  32  - Google:nosql 
  33  - Google:sql 
  34   
  35  Example of usage: 
  36   
  37  >>> # from dal import DAL, Field 
  38   
  39  ### create DAL connection (and create DB if it doesn't exist) 
  40  >>> db = DAL(('mysql://a:b@localhost/x', 'sqlite://storage.sqlite'), folder=None) 
  41   
  42  ### define a table 'person' (create/alter as necessary) 
  43  >>> person = db.define_table('person',Field('name','string')) 
  44   
  45  ### insert a record 
  46  >>> id = person.insert(name='James') 
  47   
  48  ### retrieve it by id 
  49  >>> james = person(id) 
  50   
  51  ### retrieve it by name 
  52  >>> james = person(name='James') 
  53   
  54  ### retrieve it by arbitrary query 
  55  >>> query = (person.name=='James') & (person.name.startswith('J')) 
  56  >>> james = db(query).select(person.ALL)[0] 
  57   
  58  ### update one record 
  59  >>> james.update_record(name='Jim') 
  60   
  61  ### update multiple records by query 
  62  >>> db(person.name.like('J%')).update(name='James') 
  63  1 
  64   
  65  ### delete records by query 
  66  >>> db(person.name.lower() == 'jim').delete() 
  67  0 
  68   
  69  ### retrieve multiple records (rows) 
  70  >>> people = db(person).select(orderby=person.name, groupby=person.name, limitby=(0,100)) 
  71   
  72  ### further filter them 
  73  >>> james = people.find(lambda row: row.name == 'James').first() 
  74  >>> print james.id, james.name 
  75  1 James 
  76   
  77  ### check aggregates 
  78  >>> counter = person.id.count() 
  79  >>> print db(person).select(counter).first()(counter) 
  80  1 
  81   
  82  ### delete one record 
  83  >>> james.delete_record() 
  84  1 
  85   
  86  ### delete (drop) entire database table 
  87  >>> person.drop() 
  88   
  89  Supported field types: 
  90  id string text boolean integer double decimal password upload blob time date datetime 
  91   
  92  Supported DAL URI strings: 
  93  'sqlite://test.db' 
  94  'sqlite:memory' 
  95  'jdbc:sqlite://test.db' 
  96  'mysql://root:none@localhost/test' 
  97  'postgres://mdipierro:none@localhost/test' 
  98  'jdbc:postgres://mdipierro:none@localhost/test' 
  99  'mssql://web2py:none@A64X2/web2py_test' 
 100  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
 101  'oracle://username:password@database' 
 102  'firebird://user:password@server:3050/database' 
 103  'db2://DSN=dsn;UID=user;PWD=pass' 
 104  'firebird://username:password@hostname/database' 
 105  'firebird_embedded://username:password@c://path' 
 106  'informix://user:password@server:3050/database' 
 107  'informixu://user:password@server:3050/database' # unicode informix 
 108  'google:datastore' # for google app engine datastore 
 109  'google:sql' # for google app engine with sql (mysql compatible) 
 110  'teradata://DSN=dsn;UID=user;PWD=pass' # experimental 
 111   
 112  For more info: 
 113  help(DAL) 
 114  help(Field) 
 115  """ 
 116   
 117  ################################################################################### 
 118  # this file only exposes DAL and Field 
 119  ################################################################################### 
 120   
 121  __all__ = ['DAL', 'Field'] 
 122   
 123  MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length 
 124  DEFAULTLENGTH = {'string':512, 
 125                   'password':512, 
 126                   'upload':512, 
 127                   'text':2**15, 
 128                   'blob':2**31} 
 129  TIMINGSSIZE = 100 
 130   
 131  import re 
 132  import sys 
 133  import locale 
 134  import os 
 135  import types 
 136  import cPickle 
 137  import datetime 
 138  import threading 
 139  import time 
 140  import cStringIO 
 141  import csv 
 142  import cgi 
 143  import copy 
 144  import socket 
 145  import logging 
 146  import copy_reg 
 147  import base64 
 148  import shutil 
 149  import marshal 
 150  import decimal 
 151  import struct 
 152  import urllib 
 153  import hashlib 
 154  import uuid 
 155  import glob 
 156  import traceback 
 157   
 158  CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, 
 159                   types.MethodType, types.BuiltinMethodType) 
 160   
 161   
 162  ################################################################################### 
 163  # following checks allows running of dal without web2py as a standalone module 
 164  ################################################################################### 
 165  try: 
 166      from utils import web2py_uuid 
 167  except ImportError: 
 168      import uuid 
169 - def web2py_uuid(): return str(uuid.uuid4())
170 171 try: 172 import portalocker 173 have_portalocker = True 174 except ImportError: 175 have_portalocker = False 176 177 try: 178 import serializers 179 have_serializers = True 180 except ImportError: 181 have_serializers = False 182 183 try: 184 import validators 185 have_validators = True 186 except ImportError: 187 have_validators = False 188 189 logger = logging.getLogger("web2py.dal") 190 DEFAULT = lambda:0 191 192 sql_locker = threading.RLock() 193 thread = threading.local() 194 195 # internal representation of tables with field 196 # <table>.<field>, tables and fields may only be [a-zA-Z0-0_] 197 198 regex_dbname = re.compile('^(\w+)(\:\w+)*') 199 table_field = re.compile('^([\w_]+)\.([\w_]+)$') 200 regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 201 regex_cleanup_fn = re.compile('[\'"\s;]+') 202 string_unpack=re.compile('(?<!\|)\|(?!\|)') 203 regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 204 205 206 207 # list of drivers will be built on the fly 208 # and lists only what is available 209 drivers = [] 210 211 try: 212 from new import classobj 213 from google.appengine.ext import db as gae 214 from google.appengine.api import namespace_manager, rdbms 215 from google.appengine.api.datastore_types import Key ### needed for belongs on ID 216 from google.appengine.ext.db.polymodel import PolyModel 217 drivers.append('google') 218 except ImportError: 219 pass 220 221 if not 'google' in drivers: 222 223 try: 224 from pysqlite2 import dbapi2 as sqlite3 225 drivers.append('pysqlite2') 226 except ImportError: 227 try: 228 from sqlite3 import dbapi2 as sqlite3 229 drivers.append('SQLite3') 230 except ImportError: 231 logger.debug('no sqlite3 or pysqlite2.dbapi2 driver') 232 233 try: 234 import contrib.pymysql as pymysql 235 drivers.append('pymysql') 236 except ImportError: 237 logger.debug('no pymysql driver') 238 239 try: 240 import psycopg2 241 from psycopg2.extensions import adapt as psycopg2_adapt 242 drivers.append('PostgreSQL') 243 except ImportError: 244 logger.debug('no psycopg2 driver') 245 246 try: 247 import cx_Oracle 248 drivers.append('Oracle') 249 except ImportError: 250 logger.debug('no cx_Oracle driver') 251 252 try: 253 import pyodbc 254 drivers.append('MSSQL/DB2') 255 except ImportError: 256 logger.debug('no MSSQL/DB2 driver') 257 258 try: 259 import kinterbasdb 260 drivers.append('Interbase') 261 except ImportError: 262 logger.debug('no kinterbasdb driver') 263 264 try: 265 import firebirdsql 266 drivers.append('Firebird') 267 except ImportError: 268 logger.debug('no Firebird driver') 269 270 try: 271 import informixdb 272 drivers.append('Informix') 273 logger.warning('Informix support is experimental') 274 except ImportError: 275 logger.debug('no informixdb driver') 276 277 try: 278 import sapdb 279 drivers.append('SAPDB') 280 logger.warning('SAPDB support is experimental') 281 except ImportError: 282 logger.debug('no sapdb driver') 283 284 try: 285 import cubriddb 286 drivers.append('Cubrid') 287 logger.warning('Cubrid support is experimental') 288 except ImportError: 289 logger.debug('no cubriddb driver') 290 291 try: 292 from com.ziclix.python.sql import zxJDBC 293 import java.sql 294 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 295 from org.sqlite import JDBC # required by java.sql; ensure we have it 296 drivers.append('zxJDBC') 297 logger.warning('zxJDBC support is experimental') 298 is_jdbc = True 299 except ImportError: 300 logger.debug('no zxJDBC driver') 301 is_jdbc = False 302 303 try: 304 import ingresdbi 305 drivers.append('Ingres') 306 except ImportError: 307 logger.debug('no Ingres driver') 308 # NOTE could try JDBC....... 309 310 try: 311 import couchdb 312 drivers.append('CouchDB') 313 except ImportError: 314 logger.debug('no couchdb driver') 315 316 try: 317 import pymongo 318 drivers.append('mongoDB') 319 except: 320 logger.debug('no mongoDB driver') 321 322 PLURALIZE_RULES = [ 323 (re.compile('child$'), re.compile('child$'), 'children'), 324 (re.compile('oot$'), re.compile('oot$'), 'eet'), 325 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 326 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 327 (re.compile('sis$'), re.compile('sis$'), 'ses'), 328 (re.compile('man$'), re.compile('man$'), 'men'), 329 (re.compile('ife$'), re.compile('ife$'), 'ives'), 330 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 331 (re.compile('lf$'), re.compile('lf$'), 'lves'), 332 (re.compile('[sxz]$'), re.compile('$'), 'es'), 333 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 334 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 335 (re.compile('$'), re.compile('$'), 's'), 336 ] 337
338 -def pluralize(singular, rules=PLURALIZE_RULES):
339 for line in rules: 340 re_search, re_sub, replace = line 341 plural = re_search.search(singular) and re_sub.sub(replace, singular) 342 if plural: return plural
343
344 -def OR(a,b):
345 return a|b
346
347 -def AND(a,b):
348 return a&b
349 350 if 'google' in drivers: 351 352 is_jdbc = False 353
354 - class GAEDecimalProperty(gae.Property):
355 """ 356 GAE decimal implementation 357 """ 358 data_type = decimal.Decimal 359
360 - def __init__(self, precision, scale, **kwargs):
361 super(GAEDecimalProperty, self).__init__(self, **kwargs) 362 d = '1.' 363 for x in range(scale): 364 d += '0' 365 self.round = decimal.Decimal(d)
366
367 - def get_value_for_datastore(self, model_instance):
368 value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance) 369 if value is None or value == '': 370 return None 371 else: 372 return str(value)
373
374 - def make_value_from_datastore(self, value):
375 if value is None or value == '': 376 return None 377 else: 378 return decimal.Decimal(value).quantize(self.round)
379
380 - def validate(self, value):
381 value = super(GAEDecimalProperty, self).validate(value) 382 if value is None or isinstance(value, decimal.Decimal): 383 return value 384 elif isinstance(value, basestring): 385 return decimal.Decimal(value) 386 raise gae.BadValueError("Property %s must be a Decimal or string." % self.name)
387 388 ################################################################################### 389 # class that handles connection pooling (all adapters are derived from this one) 390 ################################################################################### 391
392 -class ConnectionPool(object):
393 394 pools = {} 395 check_active_connection = True 396 397 @staticmethod
398 - def set_folder(folder):
399 thread.folder = folder
400 401 # ## this allows gluon to commit/rollback all dbs in this thread 402 403 @staticmethod
404 - def close_all_instances(action):
405 """ to close cleanly databases in a multithreaded environment """ 406 if not hasattr(thread, 'instances'): 407 return 408 while thread.instances: 409 instance = thread.instances.pop() 410 if action: 411 getattr(instance, action)() 412 # ## if you want pools, recycle this connection 413 really = True 414 if instance.pool_size: 415 sql_locker.acquire() 416 pool = ConnectionPool.pools[instance.uri] 417 if len(pool) < instance.pool_size: 418 pool.append(instance.connection) 419 really = False 420 sql_locker.release() 421 if really: 422 getattr(instance, 'close')() 423 return
424
425 - def find_or_make_work_folder(self):
426 """ this actually does not make the folder. it has to be there """ 427 if hasattr(thread,'folder'): 428 self.folder = thread.folder 429 else: 430 self.folder = thread.folder = '' 431 432 # Creating the folder if it does not exist 433 if False and self.folder and not os.path.exists(self.folder): 434 os.mkdir(self.folder)
435
436 - def pool_connection(self, f, cursor=True):
437 """ 438 this function defines: self.connection and self.cursor (iff cursor is True) 439 if self.pool_size>0 it will try pull the connection from the pool 440 if the connection is not active (closed by db server) it will loop 441 if not self.pool_size or no active connections in pool makes a new one 442 """ 443 if not self.pool_size: 444 self.connection = f() 445 self.cursor = cursor and self.connection.cursor() 446 else: 447 uri = self.uri 448 while True: 449 sql_locker.acquire() 450 if not uri in ConnectionPool.pools: 451 ConnectionPool.pools[uri] = [] 452 if ConnectionPool.pools[uri]: 453 self.connection = ConnectionPool.pools[uri].pop() 454 sql_locker.release() 455 self.cursor = cursor and self.connection.cursor() 456 try: 457 if self.cursor and self.check_active_connection: 458 self.execute('SELECT 1;') 459 break 460 except: 461 pass 462 else: 463 sql_locker.release() 464 self.connection = f() 465 self.cursor = cursor and self.connection.cursor() 466 break 467 if not hasattr(thread,'instances'): 468 thread.instances = [] 469 thread.instances.append(self)
470 471 472 ################################################################################### 473 # this is a generic adapter that does nothing; all others are derived from this one 474 ################################################################################### 475
476 -class BaseAdapter(ConnectionPool):
477 478 driver = None 479 maxcharlength = MAXCHARLENGTH 480 commit_on_alter_table = False 481 support_distributed_transaction = False 482 uploads_in_blob = False 483 can_select_for_update = True 484 types = { 485 'boolean': 'CHAR(1)', 486 'string': 'CHAR(%(length)s)', 487 'text': 'TEXT', 488 'password': 'CHAR(%(length)s)', 489 'blob': 'BLOB', 490 'upload': 'CHAR(%(length)s)', 491 'integer': 'INTEGER', 492 'double': 'DOUBLE', 493 'decimal': 'DOUBLE', 494 'date': 'DATE', 495 'time': 'TIME', 496 'datetime': 'TIMESTAMP', 497 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 498 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 499 'list:integer': 'TEXT', 500 'list:string': 'TEXT', 501 'list:reference': 'TEXT', 502 } 503
504 - def adapt(self, obj):
505 return "'%s'" % obj.replace("'", "''")
506
507 - def integrity_error(self):
508 return self.driver.IntegrityError
509
510 - def operational_error(self):
511 return self.driver.OperationalError
512
513 - def file_exists(self, filename):
514 """ 515 to be used ONLY for files that on GAE may not be on filesystem 516 """ 517 return os.path.exists(filename)
518
519 - def file_open(self, filename, mode='rb', lock=True):
520 """ 521 to be used ONLY for files that on GAE may not be on filesystem 522 """ 523 fileobj = open(filename, mode) 524 if have_portalocker and lock: 525 if mode in ('r','rb'): 526 portalocker.lock(fileobj, portalocker.LOCK_SH) 527 elif mode in ('w','wb','a'): 528 portalocker.lock(fileobj, portalocker.LOCK_EX) 529 else: 530 fileobj.close() 531 raise RuntimeError, "Unsupported file_open mode" 532 return fileobj
533
534 - def file_close(self, fileobj, unlock=True):
535 """ 536 to be used ONLY for files that on GAE may not be on filesystem 537 """ 538 if fileobj: 539 if have_portalocker and unlock: 540 portalocker.unlock(fileobj) 541 fileobj.close()
542
543 - def file_delete(self, filename):
544 os.unlink(filename)
545
546 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 547 credential_decoder=lambda x:x, driver_args={}, 548 adapter_args={}):
549 self.db = db 550 self.dbengine = "None" 551 self.uri = uri 552 self.pool_size = pool_size 553 self.folder = folder 554 self.db_codec = db_codec 555 class Dummy(object): 556 lastrowid = 1 557 def __getattr__(self, value): 558 return lambda *a, **b: []
559 self.connection = Dummy() 560 self.cursor = Dummy() 561
562 - def sequence_name(self,tablename):
563 return '%s_sequence' % tablename
564
565 - def trigger_name(self,tablename):
566 return '%s_sequence' % tablename
567
568 - def create_table(self, table, 569 migrate=True, 570 fake_migrate=False, 571 polymodel=None):
572 fields = [] 573 sql_fields = {} 574 sql_fields_aux = {} 575 TFK = {} 576 tablename = table._tablename 577 sortable = 0 578 for field in table: 579 sortable += 1 580 k = field.name 581 if isinstance(field.type,SQLCustomType): 582 ftype = field.type.native or field.type.type 583 elif field.type.startswith('reference'): 584 referenced = field.type[10:].strip() 585 constraint_name = self.constraint_name(tablename, field.name) 586 if hasattr(table,'_primarykey'): 587 rtablename,rfieldname = referenced.split('.') 588 rtable = table._db[rtablename] 589 rfield = rtable[rfieldname] 590 # must be PK reference or unique 591 if rfieldname in rtable._primarykey or rfield.unique: 592 ftype = self.types[rfield.type[:9]] % dict(length=rfield.length) 593 # multicolumn primary key reference? 594 if not rfield.unique and len(rtable._primarykey)>1 : 595 # then it has to be a table level FK 596 if rtablename not in TFK: 597 TFK[rtablename] = {} 598 TFK[rtablename][rfieldname] = field.name 599 else: 600 ftype = ftype + \ 601 self.types['reference FK'] %dict(\ 602 constraint_name=constraint_name, 603 table_name=tablename, 604 field_name=field.name, 605 foreign_key='%s (%s)'%(rtablename, rfieldname), 606 on_delete_action=field.ondelete) 607 else: 608 # make a guess here for circular references 609 id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id' 610 ftype = self.types[field.type[:9]]\ 611 % dict(table_name=tablename, 612 field_name=field.name, 613 constraint_name=constraint_name, 614 foreign_key=referenced + ('(%s)' % id_fieldname), 615 on_delete_action=field.ondelete) 616 elif field.type.startswith('list:reference'): 617 ftype = self.types[field.type[:14]] 618 elif field.type.startswith('decimal'): 619 precision, scale = map(int,field.type[8:-1].split(',')) 620 ftype = self.types[field.type[:7]] % \ 621 dict(precision=precision,scale=scale) 622 elif not field.type in self.types: 623 raise SyntaxError, 'Field: unknown field type: %s for %s' % \ 624 (field.type, field.name) 625 else: 626 ftype = self.types[field.type]\ 627 % dict(length=field.length) 628 if not field.type.startswith('id') and not field.type.startswith('reference'): 629 if field.notnull: 630 ftype += ' NOT NULL' 631 else: 632 ftype += self.ALLOW_NULL() 633 if field.unique: 634 ftype += ' UNIQUE' 635 636 # add to list of fields 637 sql_fields[field.name] = dict(sortable=sortable, 638 type=str(field.type), 639 sql=ftype) 640 641 if isinstance(field.default,(str,int,float)): 642 # Caveat: sql_fields and sql_fields_aux differ for default values. 643 # sql_fields is used to trigger migrations and sql_fields_aux 644 # is used for create tables. 645 # The reason is that we do not want to trigger a migration simply 646 # because a default value changes. 647 not_null = self.NOT_NULL(field.default, field.type) 648 ftype = ftype.replace('NOT NULL', not_null) 649 sql_fields_aux[field.name] = dict(sql=ftype) 650 fields.append('%s %s' % (field.name, ftype)) 651 other = ';' 652 653 # backend-specific extensions to fields 654 if self.dbengine == 'mysql': 655 if not hasattr(table, "_primarykey"): 656 fields.append('PRIMARY KEY(%s)' % table._id.name) 657 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 658 659 fields = ',\n '.join(fields) 660 for rtablename in TFK: 661 rfields = TFK[rtablename] 662 pkeys = table._db[rtablename]._primarykey 663 fkeys = [ rfields[k] for k in pkeys ] 664 fields = fields + ',\n ' + \ 665 self.types['reference TFK'] %\ 666 dict(table_name=tablename, 667 field_name=', '.join(fkeys), 668 foreign_table=rtablename, 669 foreign_key=', '.join(pkeys), 670 on_delete_action=field.ondelete) 671 672 if hasattr(table,'_primarykey'): 673 query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \ 674 (tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other) 675 else: 676 query = '''CREATE TABLE %s(\n %s\n)%s''' % \ 677 (tablename, fields, other) 678 679 if self.uri.startswith('sqlite:///'): 680 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 681 dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding) 682 else: 683 dbpath = self.folder 684 685 if not migrate: 686 return query 687 elif self.uri.startswith('sqlite:memory'): 688 table._dbt = None 689 elif isinstance(migrate, str): 690 table._dbt = os.path.join(dbpath, migrate) 691 else: 692 table._dbt = os.path.join(dbpath, '%s_%s.table' \ 693 % (table._db._uri_hash, tablename)) 694 if table._dbt: 695 table._loggername = os.path.join(dbpath, 'sql.log') 696 logfile = self.file_open(table._loggername, 'a') 697 else: 698 logfile = None 699 if not table._dbt or not self.file_exists(table._dbt): 700 if table._dbt: 701 logfile.write('timestamp: %s\n' 702 % datetime.datetime.today().isoformat()) 703 logfile.write(query + '\n') 704 if not fake_migrate: 705 self.create_sequence_and_triggers(query,table) 706 table._db.commit() 707 if table._dbt: 708 tfile = self.file_open(table._dbt, 'w') 709 cPickle.dump(sql_fields, tfile) 710 self.file_close(tfile) 711 if fake_migrate: 712 logfile.write('faked!\n') 713 else: 714 logfile.write('success!\n') 715 else: 716 tfile = self.file_open(table._dbt, 'r') 717 try: 718 sql_fields_old = cPickle.load(tfile) 719 except EOFError: 720 self.file_close(tfile) 721 self.file_close(logfile) 722 raise RuntimeError, 'File %s appears corrupted' % table._dbt 723 self.file_close(tfile) 724 if sql_fields != sql_fields_old: 725 self.migrate_table(table, 726 sql_fields, sql_fields_old, 727 sql_fields_aux, logfile, 728 fake_migrate=fake_migrate) 729 self.file_close(logfile) 730 return query
731
732 - def migrate_table( 733 self, 734 table, 735 sql_fields, 736 sql_fields_old, 737 sql_fields_aux, 738 logfile, 739 fake_migrate=False, 740 ):
741 tablename = table._tablename 742 def fix(item): 743 k,v=item 744 if not isinstance(v,dict): 745 v=dict(type='unkown',sql=v) 746 return k.lower(),v
747 ### make sure all field names are lower case to avoid conflicts 748 sql_fields = dict(map(fix,sql_fields.items())) 749 sql_fields_old = dict(map(fix,sql_fields_old.items())) 750 sql_fields_aux = dict(map(fix,sql_fields_aux.items())) 751 752 keys = sql_fields.keys() 753 for key in sql_fields_old: 754 if not key in keys: 755 keys.append(key) 756 if self.dbengine == 'mssql': 757 new_add = '; ALTER TABLE %s ADD ' % tablename 758 else: 759 new_add = ', ADD ' 760 761 metadata_change = False 762 sql_fields_current = copy.copy(sql_fields_old) 763 for key in keys: 764 query = None 765 if not key in sql_fields_old: 766 sql_fields_current[key] = sql_fields[key] 767 query = ['ALTER TABLE %s ADD %s %s;' % \ 768 (tablename, key, 769 sql_fields_aux[key]['sql'].replace(', ', new_add))] 770 metadata_change = True 771 elif self.dbengine == 'sqlite': 772 if key in sql_fields: 773 sql_fields_current[key] = sql_fields[key] 774 metadata_change = True 775 elif not key in sql_fields: 776 del sql_fields_current[key] 777 if not self.dbengine in ('firebird',): 778 query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)] 779 else: 780 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 781 metadata_change = True 782 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 783 and not isinstance(table[key].type, SQLCustomType) \ 784 and not (table[key].type.startswith('reference') and \ 785 sql_fields[key]['sql'].startswith('INT,') and \ 786 sql_fields_old[key]['sql'].startswith('INT NOT NULL,')): 787 sql_fields_current[key] = sql_fields[key] 788 t = tablename 789 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 790 if not self.dbengine in ('firebird',): 791 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 792 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 793 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 794 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 795 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 796 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 797 else: 798 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 799 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 800 'ALTER TABLE %s DROP %s;' % (t, key), 801 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 802 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 803 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 804 metadata_change = True 805 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 806 sql_fields_current[key] = sql_fields[key] 807 metadata_change = True 808 809 if query: 810 logfile.write('timestamp: %s\n' 811 % datetime.datetime.today().isoformat()) 812 table._db['_lastsql'] = '\n'.join(query) 813 for sub_query in query: 814 logfile.write(sub_query + '\n') 815 if not fake_migrate: 816 self.execute(sub_query) 817 # Caveat: mysql, oracle and firebird do not allow multiple alter table 818 # in one transaction so we must commit partial transactions and 819 # update table._dbt after alter table. 820 if table._db._adapter.commit_on_alter_table: 821 table._db.commit() 822 tfile = self.file_open(table._dbt, 'w') 823 cPickle.dump(sql_fields_current, tfile) 824 self.file_close(tfile) 825 logfile.write('success!\n') 826 else: 827 logfile.write('faked!\n') 828 elif metadata_change: 829 tfile = self.file_open(table._dbt, 'w') 830 cPickle.dump(sql_fields_current, tfile) 831 self.file_close(tfile) 832 833 if metadata_change and \ 834 not (query and self.dbengine in ('mysql','oracle','firebird')): 835 table._db.commit() 836 tfile = self.file_open(table._dbt, 'w') 837 cPickle.dump(sql_fields_current, tfile) 838 self.file_close(tfile) 839
840 - def LOWER(self, first):
841 return 'LOWER(%s)' % self.expand(first)
842
843 - def UPPER(self, first):
844 return 'UPPER(%s)' % self.expand(first)
845
846 - def EXTRACT(self, first, what):
847 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
848
849 - def AGGREGATE(self, first, what):
850 return "%s(%s)" % (what, self.expand(first))
851
852 - def JOIN(self):
853 return 'JOIN'
854
855 - def LEFT_JOIN(self):
856 return 'LEFT JOIN'
857
858 - def RANDOM(self):
859 return 'Random()'
860
861 - def NOT_NULL(self, default, field_type):
862 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
863
864 - def COALESCE(self, first, second):
865 expressions = [self.expand(first)]+[self.expand(e) for e in second] 866 return 'COALESCE(%s)' % ','.join(expressions)
867
868 - def COALESCE_ZERO(self, first):
869 return 'COALESCE(%s,0)' % self.expand(first)
870
871 - def RAW(self, first):
872 return first
873
874 - def ALLOW_NULL(self):
875 return ''
876
877 - def SUBSTRING(self, field, parameters):
878 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
879
880 - def PRIMARY_KEY(self, key):
881 return 'PRIMARY KEY(%s)' % key
882
883 - def _drop(self, table, mode):
884 return ['DROP TABLE %s;' % table]
885
886 - def drop(self, table, mode=''):
887 if table._dbt: 888 logfile = self.file_open(table._loggername, 'a') 889 queries = self._drop(table, mode) 890 for query in queries: 891 if table._dbt: 892 logfile.write(query + '\n') 893 self.execute(query) 894 table._db.commit() 895 del table._db[table._tablename] 896 del table._db.tables[table._db.tables.index(table._tablename)] 897 table._db._update_referenced_by(table._tablename) 898 if table._dbt: 899 self.file_delete(table._dbt) 900 logfile.write('success!\n')
901
902 - def _insert(self, table, fields):
903 keys = ','.join(f.name for f,v in fields) 904 values = ','.join(self.expand(v,f.type) for f,v in fields) 905 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
906
907 - def insert(self, table, fields):
908 query = self._insert(table,fields) 909 try: 910 self.execute(query) 911 except Exception, e: 912 if isinstance(e,self.integrity_error_class()): 913 return None 914 raise e 915 if hasattr(table,'_primarykey'): 916 return dict([(k[0].name, k[1]) for k in fields \ 917 if k[0].name in table._primarykey]) 918 id = self.lastrowid(table) 919 if not isinstance(id,int): 920 return id 921 rid = Reference(id) 922 (rid._table, rid._record) = (table, None) 923 return rid
924
925 - def bulk_insert(self, table, items):
926 return [self.insert(table,item) for item in items]
927
928 - def NOT(self, first):
929 return '(NOT %s)' % self.expand(first)
930
931 - def AND(self, first, second):
932 return '(%s AND %s)' % (self.expand(first), self.expand(second))
933
934 - def OR(self, first, second):
935 return '(%s OR %s)' % (self.expand(first), self.expand(second))
936
937 - def BELONGS(self, first, second):
938 if isinstance(second, str): 939 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 940 elif second==[] or second==(): 941 return '(1=0)' 942 items = ','.join(self.expand(item, first.type) for item in second) 943 return '(%s IN (%s))' % (self.expand(first), items)
944
945 - def LIKE(self, first, second):
946 return '(%s LIKE %s)' % (self.expand(first), self.expand(second, 'string'))
947
948 - def STARTSWITH(self, first, second):
949 return '(%s LIKE %s)' % (self.expand(first), self.expand(second+'%', 'string'))
950
951 - def ENDSWITH(self, first, second):
952 return '(%s LIKE %s)' % (self.expand(first), self.expand('%'+second, 'string'))
953
954 - def CONTAINS(self, first, second):
955 if first.type in ('string', 'text'): 956 key = '%'+str(second).replace('%','%%')+'%' 957 elif first.type.startswith('list:'): 958 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 959 return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string'))
960
961 - def EQ(self, first, second=None):
962 if second is None: 963 return '(%s IS NULL)' % self.expand(first) 964 return '(%s = %s)' % (self.expand(first), self.expand(second, first.type))
965
966 - def NE(self, first, second=None):
967 if second is None: 968 return '(%s IS NOT NULL)' % self.expand(first) 969 return '(%s <> %s)' % (self.expand(first), self.expand(second, first.type))
970
971 - def LT(self,first,second=None):
972 if second is None: 973 raise RuntimeError, "Cannot compare %s < None" % first 974 return '(%s < %s)' % (self.expand(first),self.expand(second,first.type))
975
976 - def LE(self,first,second=None):
977 if second is None: 978 raise RuntimeError, "Cannot compare %s <= None" % first 979 return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type))
980
981 - def GT(self,first,second=None):
982 if second is None: 983 raise RuntimeError, "Cannot compare %s > None" % first 984 return '(%s > %s)' % (self.expand(first),self.expand(second,first.type))
985
986 - def GE(self,first,second=None):
987 if second is None: 988 raise RuntimeError, "Cannot compare %s >= None" % first 989 return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type))
990
991 - def ADD(self, first, second):
992 return '(%s + %s)' % (self.expand(first), self.expand(second, first.type))
993
994 - def SUB(self, first, second):
995 return '(%s - %s)' % (self.expand(first), self.expand(second, first.type))
996
997 - def MUL(self, first, second):
998 return '(%s * %s)' % (self.expand(first), self.expand(second, first.type))
999
1000 - def DIV(self, first, second):
1001 return '(%s / %s)' % (self.expand(first), self.expand(second, first.type))
1002
1003 - def MOD(self, first, second):
1004 return '(%s %% %s)' % (self.expand(first), self.expand(second, first.type))
1005
1006 - def AS(self, first, second):
1007 return '%s AS %s' % (self.expand(first), second)
1008
1009 - def ON(self, first, second):
1010 return '%s ON %s' % (self.expand(first), self.expand(second))
1011
1012 - def INVERT(self, first):
1013 return '%s DESC' % self.expand(first)
1014
1015 - def COMMA(self, first, second):
1016 return '%s, %s' % (self.expand(first), self.expand(second))
1017
1018 - def expand(self, expression, field_type=None):
1019 if isinstance(expression, Field): 1020 return str(expression) 1021 elif isinstance(expression, (Expression, Query)): 1022 if not expression.second is None: 1023 return expression.op(expression.first, expression.second) 1024 elif not expression.first is None: 1025 return expression.op(expression.first) 1026 elif not isinstance(expression.op, str): 1027 return expression.op() 1028 else: 1029 return '(%s)' % expression.op 1030 elif field_type: 1031 return str(self.represent(expression,field_type)) 1032 elif isinstance(expression,(list,tuple)): 1033 return ','.join(self.represent(item,field_type) for item in expression) 1034 else: 1035 return str(expression)
1036
1037 - def alias(self, table, alias):
1038 """ 1039 Given a table object, makes a new table object 1040 with alias name. 1041 """ 1042 other = copy.copy(table) 1043 other['_ot'] = other._tablename 1044 other['ALL'] = SQLALL(other) 1045 other['_tablename'] = alias 1046 for fieldname in other.fields: 1047 other[fieldname] = copy.copy(other[fieldname]) 1048 other[fieldname]._tablename = alias 1049 other[fieldname].tablename = alias 1050 other[fieldname].table = other 1051 table._db[alias] = other 1052 return other
1053
1054 - def _truncate(self, table, mode=''):
1055 tablename = table._tablename 1056 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1057
1058 - def truncate(self, table, mode= ' '):
1059 # Prepare functions "write_to_logfile" and "close_logfile" 1060 if table._dbt: 1061 logfile = self.file_open(table._loggername, 'a') 1062 else: 1063 class Logfile(object): 1064 def write(self, value): 1065 pass
1066 def close(self): 1067 pass 1068 logfile = Logfile() 1069 1070 try: 1071 queries = table._db._adapter._truncate(table, mode) 1072 for query in queries: 1073 logfile.write(query + '\n') 1074 self.execute(query) 1075 table._db.commit() 1076 logfile.write('success!\n') 1077 finally: 1078 logfile.close() 1079
1080 - def _update(self, tablename, query, fields):
1081 if query: 1082 if not query.ignore_common_filters: 1083 query = self.common_filter(query, [tablename]) 1084 sql_w = ' WHERE ' + self.expand(query) 1085 else: 1086 sql_w = '' 1087 sql_v = ','.join(['%s=%s' % (field.name, self.expand(value, field.type)) \ 1088 for (field, value) in fields]) 1089 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1090
1091 - def update(self, tablename, query, fields):
1092 sql = self._update(tablename, query, fields) 1093 self.execute(sql) 1094 try: 1095 return self.cursor.rowcount 1096 except: 1097 return None
1098
1099 - def _delete(self, tablename, query):
1100 if query: 1101 if not query.ignore_common_filters: 1102 query = self.common_filter(query, [tablename]) 1103 sql_w = ' WHERE ' + self.expand(query) 1104 else: 1105 sql_w = '' 1106 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1107
1108 - def delete(self, tablename, query):
1109 sql = self._delete(tablename, query) 1110 ### special code to handle CASCADE in SQLite 1111 db = self.db 1112 table = db[tablename] 1113 if self.dbengine=='sqlite' and table._referenced_by: 1114 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1115 ### end special code to handle CASCADE in SQLite 1116 self.execute(sql) 1117 try: 1118 counter = self.cursor.rowcount 1119 except: 1120 counter = None 1121 ### special code to handle CASCADE in SQLite 1122 if self.dbengine=='sqlite' and counter: 1123 for tablename,fieldname in table._referenced_by: 1124 f = db[tablename][fieldname] 1125 if f.type=='reference '+table._tablename and f.ondelete=='CASCADE': 1126 db(db[tablename][fieldname].belongs(deleted)).delete() 1127 ### end special code to handle CASCADE in SQLite 1128 return counter
1129
1130 - def get_table(self, query):
1131 tablenames = self.tables(query) 1132 if len(tablenames)==1: 1133 return tablenames[0] 1134 elif len(tablenames)<1: 1135 raise RuntimeError, "No table selected" 1136 else: 1137 raise RuntimeError, "Too many tables selected"
1138
1139 - def _select(self, query, fields, attributes):
1140 for key in set(attributes.keys())-set(('orderby', 'groupby', 'limitby', 1141 'required', 'cache', 'left', 1142 'distinct', 'having', 'join', 1143 'for_update')): 1144 raise SyntaxError, 'invalid select attribute: %s' % key 1145 # ## if no fields specified take them all from the requested tables 1146 new_fields = [] 1147 for item in fields: 1148 if isinstance(item,SQLALL): 1149 new_fields += item.table 1150 else: 1151 new_fields.append(item) 1152 fields = new_fields 1153 tablenames = self.tables(query) 1154 1155 if query and not query.ignore_common_filters: 1156 query = self.common_filter(query,tablenames) 1157 1158 if not fields: 1159 for table in tablenames: 1160 for field in self.db[table]: 1161 fields.append(field) 1162 else: 1163 for field in fields: 1164 if isinstance(field, basestring) and table_field.match(field): 1165 tn,fn = field.split('.') 1166 field = self.db[tn][fn] 1167 for tablename in self.tables(field): 1168 if not tablename in tablenames: 1169 tablenames.append(tablename) 1170 if len(tablenames) < 1: 1171 raise SyntaxError, 'Set: no tables selected' 1172 sql_f = ', '.join(map(self.expand, fields)) 1173 self._colnames = [c.strip() for c in sql_f.split(', ')] 1174 if query: 1175 sql_w = ' WHERE ' + self.expand(query) 1176 else: 1177 sql_w = '' 1178 sql_o = '' 1179 sql_s = '' 1180 left = attributes.get('left', False) 1181 inner_join = attributes.get('join', False) 1182 distinct = attributes.get('distinct', False) 1183 groupby = attributes.get('groupby', False) 1184 orderby = attributes.get('orderby', False) 1185 having = attributes.get('having', False) 1186 limitby = attributes.get('limitby', False) 1187 for_update = attributes.get('for_update', False) 1188 if self.can_select_for_update is False and for_update is True: 1189 raise SyntaxError, 'invalid select attribute: for_update' 1190 if distinct is True: 1191 sql_s += 'DISTINCT' 1192 elif distinct: 1193 sql_s += 'DISTINCT ON (%s)' % distinct 1194 if inner_join: 1195 icommand = self.JOIN() 1196 if not isinstance(inner_join, (tuple, list)): 1197 inner_join = [inner_join] 1198 ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)] 1199 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1200 ijoinont = [t.first._tablename for t in ijoinon] 1201 iexcluded = [t for t in tablenames if not t in ijoint + ijoinont] 1202 if left: 1203 join = attributes['left'] 1204 command = self.LEFT_JOIN() 1205 if not isinstance(join, (tuple, list)): 1206 join = [join] 1207 joint = [t._tablename for t in join if not isinstance(t, Expression)] 1208 joinon = [t for t in join if isinstance(t, Expression)] 1209 #patch join+left patch (solves problem with ordering in left joins) 1210 tables_to_merge={} 1211 [tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon] 1212 joinont = [t.first._tablename for t in joinon] 1213 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1214 important_tablenames = joint + joinont + tables_to_merge.keys() 1215 excluded = [t for t in tablenames if not t in important_tablenames ] 1216 def alias(t): 1217 return str(self.db[t])
1218 if inner_join and not left: 1219 sql_t = ', '.join(alias(t) for t in iexcluded) 1220 for t in ijoinon: 1221 sql_t += ' %s %s' % (icommand, str(t)) 1222 elif not inner_join and left: 1223 sql_t = ', '.join([alias(t) for t in excluded + tables_to_merge.keys()]) 1224 if joint: 1225 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1226 for t in joinon: 1227 sql_t += ' %s %s' % (command, str(t)) 1228 elif inner_join and left: 1229 sql_t = ','.join([alias(t) for t in excluded + \ 1230 tables_to_merge.keys() if t in iexcluded ]) 1231 for t in ijoinon: 1232 sql_t += ' %s %s' % (icommand, str(t)) 1233 if joint: 1234 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1235 for t in joinon: 1236 sql_t += ' %s %s' % (command, str(t)) 1237 else: 1238 sql_t = ', '.join(alias(t) for t in tablenames) 1239 if groupby: 1240 if isinstance(groupby, (list, tuple)): 1241 groupby = xorify(groupby) 1242 sql_o += ' GROUP BY %s' % self.expand(groupby) 1243 if having: 1244 sql_o += ' HAVING %s' % attributes['having'] 1245 if orderby: 1246 if isinstance(orderby, (list, tuple)): 1247 orderby = xorify(orderby) 1248 if str(orderby) == '<random>': 1249 sql_o += ' ORDER BY %s' % self.RANDOM() 1250 else: 1251 sql_o += ' ORDER BY %s' % self.expand(orderby) 1252 if limitby: 1253 if not orderby and tablenames: 1254 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t], '_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])]) 1255 # oracle does not support limitby 1256 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1257 if for_update and self.can_select_for_update is True: 1258 sql = sql.rstrip(';') + ' FOR UPDATE;' 1259 return sql 1260
1261 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1262 if limitby: 1263 (lmin, lmax) = limitby 1264 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1265 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1266
1267 - def select(self, query, fields, attributes):
1268 """ 1269 Always returns a Rows object, possibly empty. 1270 """ 1271 def response(sql): 1272 self.execute(sql) 1273 return self.cursor.fetchall()
1274 sql = self._select(query, fields, attributes) 1275 if attributes.get('cache', None): 1276 (cache_model, time_expire) = attributes['cache'] 1277 del attributes['cache'] 1278 key = self.uri + '/' + sql 1279 key = (key<=200) and key or hashlib.md5(key).hexdigest() 1280 rows = cache_model(key, lambda: response(sql), time_expire) 1281 else: 1282 rows = response(sql) 1283 if isinstance(rows,tuple): 1284 rows = list(rows) 1285 limitby = attributes.get('limitby', None) or (0,) 1286 rows = self.rowslice(rows,limitby[0],None) 1287 return self.parse(rows,self._colnames) 1288
1289 - def _count(self, query, distinct=None):
1290 tablenames = self.tables(query) 1291 if query: 1292 if not query.ignore_common_filters: 1293 query = self.common_filter(query, tablenames) 1294 sql_w = ' WHERE ' + self.expand(query) 1295 else: 1296 sql_w = '' 1297 sql_t = ','.join(tablenames) 1298 if distinct: 1299 if isinstance(distinct,(list, tuple)): 1300 distinct = xorify(distinct) 1301 sql_d = self.expand(distinct) 1302 return 'SELECT count(DISTINCT %s) FROM %s%s;' % (sql_d, sql_t, sql_w) 1303 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1304
1305 - def count(self, query, distinct=None):
1306 self.execute(self._count(query, distinct)) 1307 return self.cursor.fetchone()[0]
1308
1309 - def tables(self, query):
1310 tables = set() 1311 if isinstance(query, Field): 1312 tables.add(query.tablename) 1313 elif isinstance(query, (Expression, Query)): 1314 if not query.first is None: 1315 tables = tables.union(self.tables(query.first)) 1316 if not query.second is None: 1317 tables = tables.union(self.tables(query.second)) 1318 return list(tables)
1319
1320 - def commit(self):
1321 return self.connection.commit()
1322
1323 - def rollback(self):
1324 return self.connection.rollback()
1325
1326 - def close(self):
1327 return self.connection.close()
1328
1329 - def distributed_transaction_begin(self, key):
1330 return
1331
1332 - def prepare(self, key):
1333 self.connection.prepare()
1334
1335 - def commit_prepared(self, key):
1336 self.connection.commit()
1337
1338 - def rollback_prepared(self, key):
1339 self.connection.rollback()
1340
1341 - def concat_add(self, table):
1342 return ', ADD '
1343
1344 - def constraint_name(self, table, fieldname):
1345 return '%s_%s__constraint' % (table,fieldname)
1346
1347 - def create_sequence_and_triggers(self, query, table, **args):
1348 self.execute(query)
1349
1350 - def log_execute(self, *a, **b):
1351 self.db._lastsql = a[0] 1352 t0 = time.time() 1353 ret = self.cursor.execute(*a, **b) 1354 self.db._timings.append((a[0],time.time()-t0)) 1355 del self.db._timings[:-TIMINGSSIZE] 1356 return ret
1357
1358 - def execute(self, *a, **b):
1359 return self.log_execute(*a, **b)
1360
1361 - def represent(self, obj, fieldtype):
1362 if isinstance(obj, CALLABLETYPES): 1363 obj = obj() 1364 if isinstance(fieldtype, SQLCustomType): 1365 return fieldtype.encoder(obj) 1366 if isinstance(obj, (Expression, Field)): 1367 return str(obj) 1368 if fieldtype.startswith('list:'): 1369 if not obj: 1370 obj = [] 1371 if not isinstance(obj, (list, tuple)): 1372 obj = [obj] 1373 if isinstance(obj, (list, tuple)): 1374 obj = bar_encode(obj) 1375 if obj is None: 1376 return 'NULL' 1377 if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']: 1378 return 'NULL' 1379 r = self.represent_exceptions(obj, fieldtype) 1380 if not r is None: 1381 return r 1382 if fieldtype == 'boolean': 1383 if obj and not str(obj)[:1].upper() in ['F', '0']: 1384 return "'T'" 1385 else: 1386 return "'F'" 1387 if fieldtype == 'id' or fieldtype == 'integer': 1388 return str(int(obj)) 1389 if fieldtype.startswith('decimal'): 1390 return str(obj) 1391 elif fieldtype.startswith('reference'): # reference 1392 if fieldtype.find('.')>0: 1393 return repr(obj) 1394 elif isinstance(obj, (Row, Reference)): 1395 return str(obj['id']) 1396 return str(int(obj)) 1397 elif fieldtype == 'double': 1398 return repr(float(obj)) 1399 if isinstance(obj, unicode): 1400 obj = obj.encode(self.db_codec) 1401 if fieldtype == 'blob': 1402 obj = base64.b64encode(str(obj)) 1403 elif fieldtype == 'date': 1404 if isinstance(obj, (datetime.date, datetime.datetime)): 1405 obj = obj.isoformat()[:10] 1406 else: 1407 obj = str(obj) 1408 elif fieldtype == 'datetime': 1409 if isinstance(obj, datetime.datetime): 1410 obj = obj.isoformat()[:19].replace('T',' ') 1411 elif isinstance(obj, datetime.date): 1412 obj = obj.isoformat()[:10]+' 00:00:00' 1413 else: 1414 obj = str(obj) 1415 elif fieldtype == 'time': 1416 if isinstance(obj, datetime.time): 1417 obj = obj.isoformat()[:10] 1418 else: 1419 obj = str(obj) 1420 if not isinstance(obj,str): 1421 obj = str(obj) 1422 try: 1423 obj.decode(self.db_codec) 1424 except: 1425 obj = obj.decode('latin1').encode(self.db_codec) 1426 return self.adapt(obj)
1427
1428 - def represent_exceptions(self, obj, fieldtype):
1429 return None
1430
1431 - def lastrowid(self, table):
1432 return None
1433
1434 - def integrity_error_class(self):
1435 return type(None)
1436
1437 - def rowslice(self, rows, minimum=0, maximum=None):
1438 """ By default this function does nothing; overload when db does not do slicing. """ 1439 return rows
1440
1441 - def parse(self, rows, colnames, blob_decode=True):
1442 db = self.db 1443 virtualtables = [] 1444 new_rows = [] 1445 for (i,row) in enumerate(rows): 1446 new_row = Row() 1447 for j,colname in enumerate(colnames): 1448 value = row[j] 1449 if not table_field.match(colnames[j]): 1450 if not '_extra' in new_row: 1451 new_row['_extra'] = Row() 1452 new_row['_extra'][colnames[j]] = value 1453 select_as_parser = re.compile("\s+AS\s+(\S+)") 1454 new_column_name = select_as_parser.search(colnames[j]) 1455 if not new_column_name is None: 1456 column_name = new_column_name.groups(0) 1457 setattr(new_row,column_name[0],value) 1458 continue 1459 (tablename, fieldname) = colname.split('.') 1460 table = db[tablename] 1461 field = table[fieldname] 1462 field_type = field.type 1463 if field.type != 'blob' and isinstance(value, str): 1464 try: 1465 value = value.decode(db._db_codec) 1466 except Exception: 1467 pass 1468 if isinstance(value, unicode): 1469 value = value.encode('utf-8') 1470 if not tablename in new_row: 1471 colset = new_row[tablename] = Row() 1472 if tablename not in virtualtables: 1473 virtualtables.append(tablename) 1474 else: 1475 colset = new_row[tablename] 1476 1477 if isinstance(field_type, SQLCustomType): 1478 colset[fieldname] = field_type.decoder(value) 1479 # field_type = field_type.type 1480 elif not isinstance(field_type, str) or value is None: 1481 colset[fieldname] = value 1482 elif isinstance(field_type, str) and \ 1483 field_type.startswith('reference'): 1484 referee = field_type[10:].strip() 1485 if not '.' in referee: 1486 colset[fieldname] = rid = Reference(value) 1487 (rid._table, rid._record) = (db[referee], None) 1488 else: ### reference not by id 1489 colset[fieldname] = value 1490 elif field_type == 'boolean': 1491 if value == True or str(value)[:1].lower() == 't': 1492 colset[fieldname] = True 1493 else: 1494 colset[fieldname] = False 1495 elif field_type == 'date' \ 1496 and (not isinstance(value, datetime.date)\ 1497 or isinstance(value, datetime.datetime)): 1498 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1499 colset[fieldname] = datetime.date(y, m, d) 1500 elif field_type == 'time' \ 1501 and not isinstance(value, datetime.time): 1502 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1503 if len(time_items) == 3: 1504 (h, mi, s) = time_items 1505 else: 1506 (h, mi, s) = time_items + [0] 1507 colset[fieldname] = datetime.time(h, mi, s) 1508 elif field_type == 'datetime'\ 1509 and not isinstance(value, datetime.datetime): 1510 (y, m, d) = map(int,str(value)[:10].strip().split('-')) 1511 time_items = map(int,str(value)[11:19].strip().split(':')[:3]) 1512 if len(time_items) == 3: 1513 (h, mi, s) = time_items 1514 else: 1515 (h, mi, s) = time_items + [0] 1516 colset[fieldname] = datetime.datetime(y, m, d, h, mi, s) 1517 elif field_type == 'blob' and blob_decode: 1518 colset[fieldname] = base64.b64decode(str(value)) 1519 elif field_type.startswith('decimal'): 1520 decimals = int(field_type[8:-1].split(',')[-1]) 1521 if self.dbengine == 'sqlite': 1522 value = ('%.' + str(decimals) + 'f') % value 1523 if not isinstance(value, decimal.Decimal): 1524 value = decimal.Decimal(str(value)) 1525 colset[fieldname] = value 1526 elif field_type.startswith('list:integer'): 1527 if not self.dbengine=='google:datastore': 1528 colset[fieldname] = bar_decode_integer(value) 1529 else: 1530 colset[fieldname] = value 1531 elif field_type.startswith('list:reference'): 1532 if not self.dbengine=='google:datastore': 1533 colset[fieldname] = bar_decode_integer(value) 1534 else: 1535 colset[fieldname] = value 1536 elif field_type.startswith('list:string'): 1537 if not self.dbengine=='google:datastore': 1538 colset[fieldname] = bar_decode_string(value) 1539 else: 1540 colset[fieldname] = value 1541 else: 1542 colset[fieldname] = value 1543 if field_type == 'id': 1544 id = colset[field.name] 1545 colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a) 1546 colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete() 1547 for (referee_table, referee_name) in \ 1548 table._referenced_by: 1549 s = db[referee_table][referee_name] 1550 referee_link = db._referee_name and \ 1551 db._referee_name % dict(table=referee_table,field=referee_name) 1552 if referee_link and not referee_link in colset: 1553 colset[referee_link] = Set(db, s == id) 1554 colset['id'] = id 1555 new_rows.append(new_row) 1556 1557 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 1558 1559 for tablename in virtualtables: 1560 ### new style virtual fields 1561 table = db[tablename] 1562 fields_virtual = [(f,v) for (f,v) in table.items() if isinstance(v,FieldVirtual)] 1563 fields_lazy = [(f,v) for (f,v) in table.items() if isinstance(v,FieldLazy)] 1564 if fields_virtual or fields_lazy: 1565 for row in rowsobj.records: 1566 box = row[tablename] 1567 for f,v in fields_virtual: 1568 box[f] = v.f(row) 1569 for f,v in fields_lazy: 1570 box[f] = (v.handler or VirtualCommand)(v.f,row) 1571 1572 ### old style virtual fields 1573 for item in table.virtualfields: 1574 try: 1575 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 1576 except KeyError: 1577 # to avoid breaking virtualfields when partial select 1578 pass 1579 return rowsobj
1580
1581 - def common_filter(self, query, tablenames):
1582 tenant_fieldname = self.db._request_tenant 1583 1584 for tablename in tablenames: 1585 table = self.db[tablename] 1586 1587 # deal with user provided filters 1588 if table._common_filter != None: 1589 query = query & table._common_filter(query) 1590 1591 # deal with multi_tenant filters 1592 if tenant_fieldname in table: 1593 default = table[tenant_fieldname].default 1594 if not default is None: 1595 newquery = table[tenant_fieldname] == default 1596 if query is None: 1597 query = newquery 1598 else: 1599 query = query & newquery 1600 return query
1601 1602 ################################################################################### 1603 # List of all the available adapters; they all extend BaseAdapter. 1604 ################################################################################### 1605
1606 -class SQLiteAdapter(BaseAdapter):
1607 1608 driver = globals().get('sqlite3', None) 1609 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 1610
1611 - def EXTRACT(self,field,what):
1612 return "web2py_extract('%s',%s)" % (what, self.expand(field))
1613 1614 @staticmethod
1615 - def web2py_extract(lookup, s):
1616 table = { 1617 'year': (0, 4), 1618 'month': (5, 7), 1619 'day': (8, 10), 1620 'hour': (11, 13), 1621 'minute': (14, 16), 1622 'second': (17, 19), 1623 } 1624 try: 1625 (i, j) = table[lookup] 1626 return int(s[i:j]) 1627 except: 1628 return None
1629
1630 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 1631 credential_decoder=lambda x:x, driver_args={}, 1632 adapter_args={}):
1633 if not self.driver: 1634 raise RuntimeError, "Unable to import driver" 1635 self.db = db 1636 self.dbengine = "sqlite" 1637 self.uri = uri 1638 self.pool_size = 0 1639 self.folder = folder 1640 self.db_codec = db_codec 1641 self.find_or_make_work_folder() 1642 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 1643 if uri.startswith('sqlite:memory'): 1644 dbpath = ':memory:' 1645 else: 1646 dbpath = uri.split('://')[1] 1647 if dbpath[0] != '/': 1648 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'), dbpath) 1649 if not 'check_same_thread' in driver_args: 1650 driver_args['check_same_thread'] = False 1651 if not 'detect_types' in driver_args: 1652 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 1653 def connect(dbpath=dbpath, driver_args=driver_args): 1654 return self.driver.Connection(dbpath, **driver_args)
1655 self.pool_connection(connect) 1656 self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
1657
1658 - def _truncate(self, table, mode=''):
1659 tablename = table._tablename 1660 return ['DELETE FROM %s;' % tablename, 1661 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
1662
1663 - def lastrowid(self, table):
1664 return self.cursor.lastrowid
1665
1666 - def _select(self, query, fields, attributes):
1667 """ 1668 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 1669 Note that the entire database, rather than one record, is locked 1670 (it will be locked eventually anyway by the following UPDATE). 1671 """ 1672 sql = super(SQLiteAdapter, self)._select(query, fields, attributes) 1673 if attributes.get('for_update', False): 1674 sql = 'BEGIN IMMEDIATE TRANSACTION; ' + sql 1675 return sql
1676 1677
1678 -class JDBCSQLiteAdapter(SQLiteAdapter):
1679 1680 driver = globals().get('zxJDBC', None) 1681
1682 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 1683 credential_decoder=lambda x:x, driver_args={}, 1684 adapter_args={}):
1685 if not self.driver: 1686 raise RuntimeError, "Unable to import driver" 1687 self.db = db 1688 self.dbengine = "sqlite" 1689 self.uri = uri 1690 self.pool_size = pool_size 1691 self.folder = folder 1692 self.db_codec = db_codec 1693 self.find_or_make_work_folder() 1694 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 1695 if uri.startswith('sqlite:memory'): 1696 dbpath = ':memory:' 1697 else: 1698 dbpath = uri.split('://')[1] 1699 if dbpath[0] != '/': 1700 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'), dbpath) 1701 def connect(dbpath=dbpath,driver_args=driver_args): 1702 return self.driver.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath), **driver_args)
1703 self.pool_connection(connect) 1704 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 1705 self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
1706
1707 - def execute(self, a):
1708 return self.log_execute(a)
1709 1710
1711 -class MySQLAdapter(BaseAdapter):
1712 1713 driver = globals().get('pymysql',None) 1714 maxcharlength = 255 1715 commit_on_alter_table = True 1716 support_distributed_transaction = True 1717 types = { 1718 'boolean': 'CHAR(1)', 1719 'string': 'VARCHAR(%(length)s)', 1720 'text': 'LONGTEXT', 1721 'password': 'VARCHAR(%(length)s)', 1722 'blob': 'LONGBLOB', 1723 'upload': 'VARCHAR(%(length)s)', 1724 'integer': 'INT', 1725 'double': 'DOUBLE', 1726 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1727 'date': 'DATE', 1728 'time': 'TIME', 1729 'datetime': 'DATETIME', 1730 'id': 'INT AUTO_INCREMENT NOT NULL', 1731 'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1732 'list:integer': 'LONGTEXT', 1733 'list:string': 'LONGTEXT', 1734 'list:reference': 'LONGTEXT', 1735 } 1736
1737 - def RANDOM(self):
1738 return 'RAND()'
1739
1740 - def SUBSTRING(self,field,parameters):
1741 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1742
1743 - def _drop(self,table,mode):
1744 # breaks db integrity but without this mysql does not drop table 1745 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;']
1746
1747 - def distributed_transaction_begin(self,key):
1748 self.execute('XA START;')
1749
1750 - def prepare(self,key):
1751 self.execute("XA END;") 1752 self.execute("XA PREPARE;")
1753
1754 - def commit_prepared(self,ley):
1755 self.execute("XA COMMIT;")
1756
1757 - def rollback_prepared(self,key):
1758 self.execute("XA ROLLBACK;")
1759
1760 - def concat_add(self,table):
1761 return '; ALTER TABLE %s ADD ' % table
1762
1763 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1764 credential_decoder=lambda x:x, driver_args={}, 1765 adapter_args={}):
1766 if not self.driver: 1767 raise RuntimeError, "Unable to import driver" 1768 self.db = db 1769 self.dbengine = "mysql" 1770 self.uri = uri 1771 self.pool_size = pool_size 1772 self.folder = folder 1773 self.db_codec = db_codec 1774 self.find_or_make_work_folder() 1775 uri = uri.split('://')[1] 1776 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 1777 if not m: 1778 raise SyntaxError, \ 1779 "Invalid URI string in DAL: %s" % self.uri 1780 user = credential_decoder(m.group('user')) 1781 if not user: 1782 raise SyntaxError, 'User required' 1783 password = credential_decoder(m.group('password')) 1784 if not password: 1785 password = '' 1786 host = m.group('host') 1787 if not host: 1788 raise SyntaxError, 'Host name required' 1789 db = m.group('db') 1790 if not db: 1791 raise SyntaxError, 'Database name required' 1792 port = int(m.group('port') or '3306') 1793 charset = m.group('charset') or 'utf8' 1794 driver_args.update(dict(db=db, 1795 user=credential_decoder(user), 1796 passwd=credential_decoder(password), 1797 host=host, 1798 port=port, 1799 charset=charset)) 1800 def connect(driver_args=driver_args): 1801 return self.driver.connect(**driver_args)
1802 self.pool_connection(connect) 1803 self.execute('SET FOREIGN_KEY_CHECKS=1;') 1804 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
1805
1806 - def lastrowid(self,table):
1807 self.execute('select last_insert_id();') 1808 return int(self.cursor.fetchone()[0])
1809
1810 -class PostgreSQLAdapter(BaseAdapter):
1811 1812 driver = globals().get('psycopg2',None) 1813 1814 support_distributed_transaction = True 1815 types = { 1816 'boolean': 'CHAR(1)', 1817 'string': 'VARCHAR(%(length)s)', 1818 'text': 'TEXT', 1819 'password': 'VARCHAR(%(length)s)', 1820 'blob': 'BYTEA', 1821 'upload': 'VARCHAR(%(length)s)', 1822 'integer': 'INTEGER', 1823 'double': 'FLOAT8', 1824 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1825 'date': 'DATE', 1826 'time': 'TIME', 1827 'datetime': 'TIMESTAMP', 1828 'id': 'SERIAL PRIMARY KEY', 1829 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1830 'list:integer': 'TEXT', 1831 'list:string': 'TEXT', 1832 'list:reference': 'TEXT', 1833 } 1834
1835 - def adapt(self,obj):
1836 return psycopg2_adapt(obj).getquoted()
1837
1838 - def sequence_name(self,table):
1839 return '%s_id_Seq' % table
1840
1841 - def RANDOM(self):
1842 return 'RANDOM()'
1843
1844 - def distributed_transaction_begin(self,key):
1845 return
1846
1847 - def prepare(self,key):
1848 self.execute("PREPARE TRANSACTION '%s';" % key)
1849
1850 - def commit_prepared(self,key):
1851 self.execute("COMMIT PREPARED '%s';" % key)
1852
1853 - def rollback_prepared(self,key):
1854 self.execute("ROLLBACK PREPARED '%s';" % key)
1855
1856 - def create_sequence_and_triggers(self, query, table, **args):
1857 # following lines should only be executed if table._sequence_name does not exist 1858 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 1859 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 1860 # % (table._tablename, table._fieldname, table._sequence_name)) 1861 self.execute(query)
1862
1863 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1864 credential_decoder=lambda x:x, driver_args={}, 1865 adapter_args={}):
1866 if not self.driver: 1867 raise RuntimeError, "Unable to import driver" 1868 self.db = db 1869 self.dbengine = "postgres" 1870 self.uri = uri 1871 self.pool_size = pool_size 1872 self.folder = folder 1873 self.db_codec = db_codec 1874 self.find_or_make_work_folder() 1875 uri = uri.split('://')[1] 1876 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 1877 if not m: 1878 raise SyntaxError, "Invalid URI string in DAL" 1879 user = credential_decoder(m.group('user')) 1880 if not user: 1881 raise SyntaxError, 'User required' 1882 password = credential_decoder(m.group('password')) 1883 if not password: 1884 password = '' 1885 host = m.group('host') 1886 if not host: 1887 raise SyntaxError, 'Host name required' 1888 db = m.group('db') 1889 if not db: 1890 raise SyntaxError, 'Database name required' 1891 port = m.group('port') or '5432' 1892 sslmode = m.group('sslmode') 1893 if sslmode: 1894 msg = ("dbname='%s' user='%s' host='%s'" 1895 "port=%s password='%s' sslmode='%s'") \ 1896 % (db, user, host, port, password, sslmode) 1897 else: 1898 msg = ("dbname='%s' user='%s' host='%s'" 1899 "port=%s password='%s'") \ 1900 % (db, user, host, port, password) 1901 def connect(msg=msg,driver_args=driver_args): 1902 return self.driver.connect(msg,**driver_args)
1903 self.pool_connection(connect) 1904 self.connection.set_client_encoding('UTF8') 1905 self.execute("SET standard_conforming_strings=on;")
1906
1907 - def lastrowid(self,table):
1908 self.execute("select currval('%s')" % table._sequence_name) 1909 return int(self.cursor.fetchone()[0])
1910
1911 - def LIKE(self,first,second):
1912 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second,'string'))
1913
1914 - def STARTSWITH(self,first,second):
1915 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
1916
1917 - def ENDSWITH(self,first,second):
1918 return '(%s ILIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
1919
1920 - def CONTAINS(self,first,second):
1921 if first.type in ('string','text'): 1922 key = '%'+str(second).replace('%','%%')+'%' 1923 elif first.type.startswith('list:'): 1924 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 1925 return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string'))
1926
1927 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
1928
1929 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1930 credential_decoder=lambda x:x, driver_args={}, 1931 adapter_args={}):
1932 if not self.driver: 1933 raise RuntimeError, "Unable to import driver" 1934 self.db = db 1935 self.dbengine = "postgres" 1936 self.uri = uri 1937 self.pool_size = pool_size 1938 self.folder = folder 1939 self.db_codec = db_codec 1940 self.find_or_make_work_folder() 1941 uri = uri.split('://')[1] 1942 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 1943 if not m: 1944 raise SyntaxError, "Invalid URI string in DAL" 1945 user = credential_decoder(m.group('user')) 1946 if not user: 1947 raise SyntaxError, 'User required' 1948 password = credential_decoder(m.group('password')) 1949 if not password: 1950 password = '' 1951 host = m.group('host') 1952 if not host: 1953 raise SyntaxError, 'Host name required' 1954 db = m.group('db') 1955 if not db: 1956 raise SyntaxError, 'Database name required' 1957 port = m.group('port') or '5432' 1958 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 1959 def connect(msg=msg,driver_args=driver_args): 1960 return self.driver.connect(*msg,**driver_args)
1961 self.pool_connection(connect) 1962 self.connection.set_client_encoding('UTF8') 1963 self.execute('BEGIN;') 1964 self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
1965 1966
1967 -class OracleAdapter(BaseAdapter):
1968 1969 driver = globals().get('cx_Oracle',None) 1970 1971 commit_on_alter_table = False 1972 types = { 1973 'boolean': 'CHAR(1)', 1974 'string': 'VARCHAR2(%(length)s)', 1975 'text': 'CLOB', 1976 'password': 'VARCHAR2(%(length)s)', 1977 'blob': 'CLOB', 1978 'upload': 'VARCHAR2(%(length)s)', 1979 'integer': 'INT', 1980 'double': 'FLOAT', 1981 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1982 'date': 'DATE', 1983 'time': 'CHAR(8)', 1984 'datetime': 'DATE', 1985 'id': 'NUMBER PRIMARY KEY', 1986 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1987 'list:integer': 'CLOB', 1988 'list:string': 'CLOB', 1989 'list:reference': 'CLOB', 1990 } 1991
1992 - def sequence_name(self,tablename):
1993 return '%s_sequence' % tablename
1994
1995 - def trigger_name(self,tablename):
1996 return '%s_trigger' % tablename
1997
1998 - def LEFT_JOIN(self):
1999 return 'LEFT OUTER JOIN'
2000
2001 - def RANDOM(self):
2002 return 'dbms_random.value'
2003
2004 - def NOT_NULL(self,default,field_type):
2005 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2006
2007 - def _drop(self,table,mode):
2008 sequence_name = table._sequence_name 2009 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
2010
2011 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2012 if limitby: 2013 (lmin, lmax) = limitby 2014 if len(sql_w) > 1: 2015 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2016 else: 2017 sql_w_row = 'WHERE w_row > %i' % lmin 2018 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2019 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2020
2021 - def constraint_name(self, tablename, fieldname):
2022 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 2023 if len(constraint_name)>30: 2024 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 2025 return constraint_name
2026
2027 - def represent_exceptions(self, obj, fieldtype):
2028 if fieldtype == 'blob': 2029 obj = base64.b64encode(str(obj)) 2030 return ":CLOB('%s')" % obj 2031 elif fieldtype == 'date': 2032 if isinstance(obj, (datetime.date, datetime.datetime)): 2033 obj = obj.isoformat()[:10] 2034 else: 2035 obj = str(obj) 2036 return "to_date('%s','yyyy-mm-dd')" % obj 2037 elif fieldtype == 'datetime': 2038 if isinstance(obj, datetime.datetime): 2039 obj = obj.isoformat()[:19].replace('T',' ') 2040 elif isinstance(obj, datetime.date): 2041 obj = obj.isoformat()[:10]+' 00:00:00' 2042 else: 2043 obj = str(obj) 2044 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2045 return None
2046
2047 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2048 credential_decoder=lambda x:x, driver_args={}, 2049 adapter_args={}):
2050 if not self.driver: 2051 raise RuntimeError, "Unable to import driver" 2052 self.db = db 2053 self.dbengine = "oracle" 2054 self.uri = uri 2055 self.pool_size = pool_size 2056 self.folder = folder 2057 self.db_codec = db_codec 2058 self.find_or_make_work_folder() 2059 uri = uri.split('://')[1] 2060 if not 'threaded' in driver_args: 2061 driver_args['threaded']=True 2062 def connect(uri=uri,driver_args=driver_args): 2063 return self.driver.connect(uri,**driver_args)
2064 self.pool_connection(connect) 2065 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 2066 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
2067 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 2068
2069 - def execute(self, command):
2070 args = [] 2071 i = 1 2072 while True: 2073 m = self.oracle_fix.match(command) 2074 if not m: 2075 break 2076 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 2077 args.append(m.group('clob')[6:-2].replace("''", "'")) 2078 i += 1 2079 if command[-1:]==';': 2080 command = command[:-1] 2081 return self.log_execute(command, args)
2082
2083 - def create_sequence_and_triggers(self, query, table, **args):
2084 tablename = table._tablename 2085 sequence_name = table._sequence_name 2086 trigger_name = table._trigger_name 2087 self.execute(query) 2088 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name) 2089 self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name))
2090
2091 - def lastrowid(self,table):
2092 sequence_name = table._sequence_name 2093 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 2094 return int(self.cursor.fetchone()[0])
2095 2096
2097 -class MSSQLAdapter(BaseAdapter):
2098 2099 driver = globals().get('pyodbc',None) 2100 2101 types = { 2102 'boolean': 'BIT', 2103 'string': 'VARCHAR(%(length)s)', 2104 'text': 'TEXT', 2105 'password': 'VARCHAR(%(length)s)', 2106 'blob': 'IMAGE', 2107 'upload': 'VARCHAR(%(length)s)', 2108 'integer': 'INT', 2109 'double': 'FLOAT', 2110 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2111 'date': 'DATETIME', 2112 'time': 'CHAR(8)', 2113 'datetime': 'DATETIME', 2114 'id': 'INT IDENTITY PRIMARY KEY', 2115 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2116 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2117 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2118 'list:integer': 'TEXT', 2119 'list:string': 'TEXT', 2120 'list:reference': 'TEXT', 2121 } 2122
2123 - def EXTRACT(self,field,what):
2124 return "DATEPART(%s,%s)" % (what, self.expand(field))
2125
2126 - def LEFT_JOIN(self):
2127 return 'LEFT OUTER JOIN'
2128
2129 - def RANDOM(self):
2130 return 'NEWID()'
2131
2132 - def ALLOW_NULL(self):
2133 return ' NULL'
2134
2135 - def SUBSTRING(self,field,parameters):
2136 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
2137
2138 - def PRIMARY_KEY(self,key):
2139 return 'PRIMARY KEY CLUSTERED (%s)' % key
2140
2141 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2142 if limitby: 2143 (lmin, lmax) = limitby 2144 sql_s += ' TOP %i' % lmax 2145 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2146
2147 - def represent_exceptions(self, obj, fieldtype):
2148 if fieldtype == 'boolean': 2149 if obj and not str(obj)[0].upper() == 'F': 2150 return '1' 2151 else: 2152 return '0' 2153 return None
2154
2155 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2156 credential_decoder=lambda x:x, driver_args={}, 2157 adapter_args={}, fake_connect=False):
2158 if not self.driver: 2159 raise RuntimeError, "Unable to import driver" 2160 self.db = db 2161 self.dbengine = "mssql" 2162 self.uri = uri 2163 self.pool_size = pool_size 2164 self.folder = folder 2165 self.db_codec = db_codec 2166 self.find_or_make_work_folder() 2167 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 2168 uri = uri.split('://')[1] 2169 if '@' not in uri: 2170 try: 2171 m = re.compile('^(?P<dsn>.+)$').match(uri) 2172 if not m: 2173 raise SyntaxError, \ 2174 'Parsing uri string(%s) has no result' % self.uri 2175 dsn = m.group('dsn') 2176 if not dsn: 2177 raise SyntaxError, 'DSN required' 2178 except SyntaxError, e: 2179 logger.error('NdGpatch error') 2180 raise e 2181 cnxn = 'DSN=%s' % dsn 2182 else: 2183 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri) 2184 if not m: 2185 raise SyntaxError, \ 2186 "Invalid URI string in DAL: %s" % uri 2187 user = credential_decoder(m.group('user')) 2188 if not user: 2189 raise SyntaxError, 'User required' 2190 password = credential_decoder(m.group('password')) 2191 if not password: 2192 password = '' 2193 host = m.group('host') 2194 if not host: 2195 raise SyntaxError, 'Host name required' 2196 db = m.group('db') 2197 if not db: 2198 raise SyntaxError, 'Database name required' 2199 port = m.group('port') or '1433' 2200 # Parse the optional url name-value arg pairs after the '?' 2201 # (in the form of arg1=value1&arg2=value2&...) 2202 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 2203 argsdict = { 'DRIVER':'{SQL Server}' } 2204 urlargs = m.group('urlargs') or '' 2205 argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 2206 for argmatch in argpattern.finditer(urlargs): 2207 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 2208 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()]) 2209 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 2210 % (host, port, db, user, password, urlargs) 2211 def connect(cnxn=cnxn,driver_args=driver_args): 2212 return self.driver.connect(cnxn,**driver_args)
2213 if not fake_connect: 2214 self.pool_connection(connect)
2215
2216 - def lastrowid(self,table):
2217 #self.execute('SELECT @@IDENTITY;') 2218 self.execute('SELECT SCOPE_IDENTITY();') 2219 return int(self.cursor.fetchone()[0])
2220
2221 - def integrity_error_class(self):
2222 return pyodbc.IntegrityError
2223
2224 - def rowslice(self,rows,minimum=0,maximum=None):
2225 if maximum is None: 2226 return rows[minimum:] 2227 return rows[minimum:maximum]
2228 2229
2230 -class MSSQL2Adapter(MSSQLAdapter):
2231 types = { 2232 'boolean': 'CHAR(1)', 2233 'string': 'NVARCHAR(%(length)s)', 2234 'text': 'NTEXT', 2235 'password': 'NVARCHAR(%(length)s)', 2236 'blob': 'IMAGE', 2237 'upload': 'NVARCHAR(%(length)s)', 2238 'integer': 'INT', 2239 'double': 'FLOAT', 2240 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2241 'date': 'DATETIME', 2242 'time': 'CHAR(8)', 2243 'datetime': 'DATETIME', 2244 'id': 'INT IDENTITY PRIMARY KEY', 2245 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2246 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2247 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2248 'list:integer': 'NTEXT', 2249 'list:string': 'NTEXT', 2250 'list:reference': 'NTEXT', 2251 } 2252
2253 - def represent(self, obj, fieldtype):
2254 value = BaseAdapter.represent(self, obj, fieldtype) 2255 if (fieldtype == 'string' or fieldtype == 'text') and value[:1]=="'": 2256 value = 'N'+value 2257 return value
2258
2259 - def execute(self,a):
2260 return self.log_execute(a.decode('utf8'))
2261 2262
2263 -class FireBirdAdapter(BaseAdapter):
2264 2265 driver = globals().get('pyodbc',None) 2266 2267 commit_on_alter_table = False 2268 support_distributed_transaction = True 2269 types = { 2270 'boolean': 'CHAR(1)', 2271 'string': 'VARCHAR(%(length)s)', 2272 'text': 'BLOB SUB_TYPE 1', 2273 'password': 'VARCHAR(%(length)s)', 2274 'blob': 'BLOB SUB_TYPE 0', 2275 'upload': 'VARCHAR(%(length)s)', 2276 'integer': 'INTEGER', 2277 'double': 'DOUBLE PRECISION', 2278 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 2279 'date': 'DATE', 2280 'time': 'TIME', 2281 'datetime': 'TIMESTAMP', 2282 'id': 'INTEGER PRIMARY KEY', 2283 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2284 'list:integer': 'BLOB SUB_TYPE 1', 2285 'list:string': 'BLOB SUB_TYPE 1', 2286 'list:reference': 'BLOB SUB_TYPE 1', 2287 } 2288
2289 - def sequence_name(self,tablename):
2290 return 'genid_%s' % tablename
2291
2292 - def trigger_name(self,tablename):
2293 return 'trg_id_%s' % tablename
2294
2295 - def RANDOM(self):
2296 return 'RAND()'
2297
2298 - def NOT_NULL(self,default,field_type):
2299 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2300
2301 - def SUBSTRING(self,field,parameters):
2302 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
2303
2304 - def _drop(self,table,mode):
2305 sequence_name = table._sequence_name 2306 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
2307
2308 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2309 if limitby: 2310 (lmin, lmax) = limitby 2311 sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin) 2312 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2313
2314 - def _truncate(self,table,mode = ''):
2315 return ['DELETE FROM %s;' % table._tablename, 2316 'SET GENERATOR %s TO 0;' % table._sequence_name]
2317
2318 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2319 credential_decoder=lambda x:x, driver_args={}, 2320 adapter_args={}):
2321 if adapter_args.has_key('driver_name'): 2322 if adapter_args['driver_name'] == 'kinterbasdb': 2323 self.driver = kinterbasdb 2324 elif adapter_args['driver_name'] == 'firebirdsql': 2325 self.driver = firebirdsql 2326 else: 2327 self.driver = kinterbasdb 2328 2329 if not self.driver: 2330 raise RuntimeError, "Unable to import driver" 2331 self.db = db 2332 self.dbengine = "firebird" 2333 self.uri = uri 2334 self.pool_size = pool_size 2335 self.folder = folder 2336 self.db_codec = db_codec 2337 self.find_or_make_work_folder() 2338 uri = uri.split('://')[1] 2339 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2340 if not m: 2341 raise SyntaxError, "Invalid URI string in DAL: %s" % uri 2342 user = credential_decoder(m.group('user')) 2343 if not user: 2344 raise SyntaxError, 'User required' 2345 password = credential_decoder(m.group('password')) 2346 if not password: 2347 password = '' 2348 host = m.group('host') 2349 if not host: 2350 raise SyntaxError, 'Host name required' 2351 port = int(m.group('port') or 3050) 2352 db = m.group('db') 2353 if not db: 2354 raise SyntaxError, 'Database name required' 2355 charset = m.group('charset') or 'UTF8' 2356 driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db), 2357 user = credential_decoder(user), 2358 password = credential_decoder(password), 2359 charset = charset)) 2360 2361 def connect(driver_args=driver_args): 2362 return self.driver.connect(**driver_args)
2363 self.pool_connection(connect)
2364
2365 - def create_sequence_and_triggers(self, query, table, **args):
2366 tablename = table._tablename 2367 sequence_name = table._sequence_name 2368 trigger_name = table._trigger_name 2369 self.execute(query) 2370 self.execute('create generator %s;' % sequence_name) 2371 self.execute('set generator %s to 0;' % sequence_name) 2372 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
2373
2374 - def lastrowid(self,table):
2375 sequence_name = table._sequence_name 2376 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 2377 return int(self.cursor.fetchone()[0])
2378 2379
2380 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
2381
2382 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2383 credential_decoder=lambda x:x, driver_args={}, 2384 adapter_args={}):
2385 2386 if adapter_args.has_key('driver_name'): 2387 if adapter_args['driver_name'] == 'kinterbasdb': 2388 self.driver = kinterbasdb 2389 elif adapter_args['driver_name'] == 'firebirdsql': 2390 self.driver = firebirdsql 2391 else: 2392 self.driver = kinterbasdb 2393 2394 if not self.driver: 2395 raise RuntimeError, "Unable to import driver" 2396 self.db = db 2397 self.dbengine = "firebird" 2398 self.uri = uri 2399 self.pool_size = pool_size 2400 self.folder = folder 2401 self.db_codec = db_codec 2402 self.find_or_make_work_folder() 2403 uri = uri.split('://')[1] 2404 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2405 if not m: 2406 raise SyntaxError, \ 2407 "Invalid URI string in DAL: %s" % self.uri 2408 user = credential_decoder(m.group('user')) 2409 if not user: 2410 raise SyntaxError, 'User required' 2411 password = credential_decoder(m.group('password')) 2412 if not password: 2413 password = '' 2414 pathdb = m.group('path') 2415 if not pathdb: 2416 raise SyntaxError, 'Path required' 2417 charset = m.group('charset') 2418 if not charset: 2419 charset = 'UTF8' 2420 host = '' 2421 driver_args.update(dict(host=host, 2422 database=pathdb, 2423 user=credential_decoder(user), 2424 password=credential_decoder(password), 2425 charset=charset)) 2426 #def connect(driver_args=driver_args): 2427 # return kinterbasdb.connect(**driver_args) 2428 2429 def connect(driver_args=driver_args): 2430 return self.driver.connect(**driver_args)
2431 self.pool_connection(connect)
2432 2433
2434 -class InformixAdapter(BaseAdapter):
2435 2436 driver = globals().get('informixdb',None) 2437 2438 types = { 2439 'boolean': 'CHAR(1)', 2440 'string': 'VARCHAR(%(length)s)', 2441 'text': 'BLOB SUB_TYPE 1', 2442 'password': 'VARCHAR(%(length)s)', 2443 'blob': 'BLOB SUB_TYPE 0', 2444 'upload': 'VARCHAR(%(length)s)', 2445 'integer': 'INTEGER', 2446 'double': 'FLOAT', 2447 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2448 'date': 'DATE', 2449 'time': 'CHAR(8)', 2450 'datetime': 'DATETIME', 2451 'id': 'SERIAL', 2452 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2453 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 2454 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 2455 'list:integer': 'BLOB SUB_TYPE 1', 2456 'list:string': 'BLOB SUB_TYPE 1', 2457 'list:reference': 'BLOB SUB_TYPE 1', 2458 } 2459
2460 - def RANDOM(self):
2461 return 'Random()'
2462
2463 - def NOT_NULL(self,default,field_type):
2464 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2465
2466 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2467 if limitby: 2468 (lmin, lmax) = limitby 2469 fetch_amt = lmax - lmin 2470 dbms_version = int(self.connection.dbms_version.split('.')[0]) 2471 if lmin and (dbms_version >= 10): 2472 # Requires Informix 10.0+ 2473 sql_s += ' SKIP %d' % (lmin, ) 2474 if fetch_amt and (dbms_version >= 9): 2475 # Requires Informix 9.0+ 2476 sql_s += ' FIRST %d' % (fetch_amt, ) 2477 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2478
2479 - def represent_exceptions(self, obj, fieldtype):
2480 if fieldtype == 'date': 2481 if isinstance(obj, (datetime.date, datetime.datetime)): 2482 obj = obj.isoformat()[:10] 2483 else: 2484 obj = str(obj) 2485 return "to_date('%s','%%Y-%%m-%%d')" % obj 2486 elif fieldtype == 'datetime': 2487 if isinstance(obj, datetime.datetime): 2488 obj = obj.isoformat()[:19].replace('T',' ') 2489 elif isinstance(obj, datetime.date): 2490 obj = obj.isoformat()[:10]+' 00:00:00' 2491 else: 2492 obj = str(obj) 2493 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 2494 return None
2495
2496 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2497 credential_decoder=lambda x:x, driver_args={}, 2498 adapter_args={}):
2499 if not self.driver: 2500 raise RuntimeError, "Unable to import driver" 2501 self.db = db 2502 self.dbengine = "informix" 2503 self.uri = uri 2504 self.pool_size = pool_size 2505 self.folder = folder 2506 self.db_codec = db_codec 2507 self.find_or_make_work_folder() 2508 uri = uri.split('://')[1] 2509 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 2510 if not m: 2511 raise SyntaxError, \ 2512 "Invalid URI string in DAL: %s" % self.uri 2513 user = credential_decoder(m.group('user')) 2514 if not user: 2515 raise SyntaxError, 'User required' 2516 password = credential_decoder(m.group('password')) 2517 if not password: 2518 password = '' 2519 host = m.group('host') 2520 if not host: 2521 raise SyntaxError, 'Host name required' 2522 db = m.group('db') 2523 if not db: 2524 raise SyntaxError, 'Database name required' 2525 user = credential_decoder(user) 2526 password = credential_decoder(password) 2527 dsn = '%s@%s' % (db,host) 2528 driver_args.update(dict(user=user,password=password,autocommit=True)) 2529 def connect(dsn=dsn,driver_args=driver_args): 2530 return self.driver.connect(dsn,**driver_args)
2531 self.pool_connection(connect)
2532
2533 - def execute(self,command):
2534 if command[-1:]==';': 2535 command = command[:-1] 2536 return self.log_execute(command)
2537
2538 - def lastrowid(self,table):
2539 return self.cursor.sqlerrd[1]
2540
2541 - def integrity_error_class(self):
2542 return informixdb.IntegrityError
2543 2544
2545 -class DB2Adapter(BaseAdapter):
2546 2547 driver = globals().get('pyodbc',None) 2548 2549 types = { 2550 'boolean': 'CHAR(1)', 2551 'string': 'VARCHAR(%(length)s)', 2552 'text': 'CLOB', 2553 'password': 'VARCHAR(%(length)s)', 2554 'blob': 'BLOB', 2555 'upload': 'VARCHAR(%(length)s)', 2556 'integer': 'INT', 2557 'double': 'DOUBLE', 2558 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2559 'date': 'DATE', 2560 'time': 'TIME', 2561 'datetime': 'TIMESTAMP', 2562 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 2563 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2564 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2565 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2566 'list:integer': 'CLOB', 2567 'list:string': 'CLOB', 2568 'list:reference': 'CLOB', 2569 } 2570
2571 - def LEFT_JOIN(self):
2572 return 'LEFT OUTER JOIN'
2573
2574 - def RANDOM(self):
2575 return 'RAND()'
2576
2577 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2578 if limitby: 2579 (lmin, lmax) = limitby 2580 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 2581 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2582
2583 - def represent_exceptions(self, obj, fieldtype):
2584 if fieldtype == 'blob': 2585 obj = base64.b64encode(str(obj)) 2586 return "BLOB('%s')" % obj 2587 elif fieldtype == 'datetime': 2588 if isinstance(obj, datetime.datetime): 2589 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 2590 elif isinstance(obj, datetime.date): 2591 obj = obj.isoformat()[:10]+'-00.00.00' 2592 return "'%s'" % obj 2593 return None
2594
2595 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2596 credential_decoder=lambda x:x, driver_args={}, 2597 adapter_args={}):
2598 if not self.driver: 2599 raise RuntimeError, "Unable to import driver" 2600 self.db = db 2601 self.dbengine = "db2" 2602 self.uri = uri 2603 self.pool_size = pool_size 2604 self.folder = folder 2605 self.db_codec = db_codec 2606 self.find_or_make_work_folder() 2607 cnxn = uri.split('://', 1)[1] 2608 def connect(cnxn=cnxn,driver_args=driver_args): 2609 return self.driver.connect(cnxn,**driver_args)
2610 self.pool_connection(connect)
2611
2612 - def execute(self,command):
2613 if command[-1:]==';': 2614 command = command[:-1] 2615 return self.log_execute(command)
2616
2617 - def lastrowid(self,table):
2618 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 2619 return int(self.cursor.fetchone()[0])
2620
2621 - def rowslice(self,rows,minimum=0,maximum=None):
2622 if maximum is None: 2623 return rows[minimum:] 2624 return rows[minimum:maximum]
2625 2626
2627 -class TeradataAdapter(DB2Adapter):
2628 2629 driver = globals().get('pyodbc',None) 2630 2631 types = { 2632 'boolean': 'CHAR(1)', 2633 'string': 'VARCHAR(%(length)s)', 2634 'text': 'CLOB', 2635 'password': 'VARCHAR(%(length)s)', 2636 'blob': 'BLOB', 2637 'upload': 'VARCHAR(%(length)s)', 2638 'integer': 'INT', 2639 'double': 'DOUBLE', 2640 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2641 'date': 'DATE', 2642 'time': 'TIME', 2643 'datetime': 'TIMESTAMP', 2644 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 2645 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2646 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2647 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2648 'list:integer': 'CLOB', 2649 'list:string': 'CLOB', 2650 'list:reference': 'CLOB', 2651 } 2652 2653
2654 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2655 credential_decoder=lambda x:x, driver_args={}, 2656 adapter_args={}):
2657 if not self.driver: 2658 raise RuntimeError, "Unable to import driver" 2659 self.db = db 2660 self.dbengine = "teradata" 2661 self.uri = uri 2662 self.pool_size = pool_size 2663 self.folder = folder 2664 self.db_codec = db_codec 2665 self.find_or_make_work_folder() 2666 cnxn = uri.split('://', 1)[1] 2667 def connect(cnxn=cnxn,driver_args=driver_args): 2668 return self.driver.connect(cnxn,**driver_args)
2669 self.pool_connection(connect)
2670 2671 2672 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name 2673 # (ANSI-SQL wants this form of name 2674 # to be a delimited identifier) 2675
2676 -class IngresAdapter(BaseAdapter):
2677 2678 driver = globals().get('ingresdbi',None) 2679 2680 types = { 2681 'boolean': 'CHAR(1)', 2682 'string': 'VARCHAR(%(length)s)', 2683 'text': 'CLOB', 2684 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2685 'blob': 'BLOB', 2686 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2687 'integer': 'INTEGER4', # or int8... 2688 'double': 'FLOAT8', 2689 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2690 'date': 'ANSIDATE', 2691 'time': 'TIME WITHOUT TIME ZONE', 2692 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2693 'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME, 2694 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2695 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2696 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2697 'list:integer': 'CLOB', 2698 'list:string': 'CLOB', 2699 'list:reference': 'CLOB', 2700 } 2701
2702 - def LEFT_JOIN(self):
2703 return 'LEFT OUTER JOIN'
2704
2705 - def RANDOM(self):
2706 return 'RANDOM()'
2707
2708 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2709 if limitby: 2710 (lmin, lmax) = limitby 2711 fetch_amt = lmax - lmin 2712 if fetch_amt: 2713 sql_s += ' FIRST %d ' % (fetch_amt, ) 2714 if lmin: 2715 # Requires Ingres 9.2+ 2716 sql_o += ' OFFSET %d' % (lmin, ) 2717 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2718
2719 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2720 credential_decoder=lambda x:x, driver_args={}, 2721 adapter_args={}):
2722 if not self.driver: 2723 raise RuntimeError, "Unable to import driver" 2724 self.db = db 2725 self.dbengine = "ingres" 2726 self.uri = uri 2727 self.pool_size = pool_size 2728 self.folder = folder 2729 self.db_codec = db_codec 2730 self.find_or_make_work_folder() 2731 connstr = self._uri.split(':', 1)[1] 2732 # Simple URI processing 2733 connstr = connstr.lstrip() 2734 while connstr.startswith('/'): 2735 connstr = connstr[1:] 2736 database_name=connstr # Assume only (local) dbname is passed in 2737 vnode = '(local)' 2738 servertype = 'ingres' 2739 trace = (0, None) # No tracing 2740 driver_args.update(dict(database=database_name, 2741 vnode=vnode, 2742 servertype=servertype, 2743 trace=trace)) 2744 def connect(driver_args=driver_args): 2745 return self.driver.connect(**driver_args)
2746 self.pool_connection(connect)
2747
2748 - def create_sequence_and_triggers(self, query, table, **args):
2749 # post create table auto inc code (if needed) 2750 # modify table to btree for performance.... 2751 # Older Ingres releases could use rule/trigger like Oracle above. 2752 if hasattr(table,'_primarykey'): 2753 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 2754 (table._tablename, 2755 ', '.join(["'%s'" % x for x in table.primarykey])) 2756 self.execute(modify_tbl_sql) 2757 else: 2758 tmp_seqname='%s_iisq' % table._tablename 2759 query=query.replace(INGRES_SEQNAME, tmp_seqname) 2760 self.execute('create sequence %s' % tmp_seqname) 2761 self.execute(query) 2762 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
2763 2764
2765 - def lastrowid(self,table):
2766 tmp_seqname='%s_iisq' % table 2767 self.execute('select current value for %s' % tmp_seqname) 2768 return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
2769
2770 - def integrity_error_class(self):
2771 return ingresdbi.IntegrityError
2772 2773
2774 -class IngresUnicodeAdapter(IngresAdapter):
2775 types = { 2776 'boolean': 'CHAR(1)', 2777 'string': 'NVARCHAR(%(length)s)', 2778 'text': 'NCLOB', 2779 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2780 'blob': 'BLOB', 2781 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2782 'integer': 'INTEGER4', # or int8... 2783 'double': 'FLOAT8', 2784 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2785 'date': 'ANSIDATE', 2786 'time': 'TIME WITHOUT TIME ZONE', 2787 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2788 'id': 'integer4 not null unique with default next value for %s'% INGRES_SEQNAME, 2789 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2790 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2791 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2792 'list:integer': 'NCLOB', 2793 'list:string': 'NCLOB', 2794 'list:reference': 'NCLOB', 2795 }
2796
2797 -class SAPDBAdapter(BaseAdapter):
2798 2799 driver = globals().get('sapdb',None) 2800 support_distributed_transaction = False 2801 types = { 2802 'boolean': 'CHAR(1)', 2803 'string': 'VARCHAR(%(length)s)', 2804 'text': 'LONG', 2805 'password': 'VARCHAR(%(length)s)', 2806 'blob': 'LONG', 2807 'upload': 'VARCHAR(%(length)s)', 2808 'integer': 'INT', 2809 'double': 'FLOAT', 2810 'decimal': 'FIXED(%(precision)s,%(scale)s)', 2811 'date': 'DATE', 2812 'time': 'TIME', 2813 'datetime': 'TIMESTAMP', 2814 'id': 'INT PRIMARY KEY', 2815 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2816 'list:integer': 'LONG', 2817 'list:string': 'LONG', 2818 'list:reference': 'LONG', 2819 } 2820
2821 - def sequence_name(self,table):
2822 return '%s_id_Seq' % table
2823
2824 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2825 if limitby: 2826 (lmin, lmax) = limitby 2827 if len(sql_w) > 1: 2828 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2829 else: 2830 sql_w_row = 'WHERE w_row > %i' % lmin 2831 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2832 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2833
2834 - def create_sequence_and_triggers(self, query, table, **args):
2835 # following lines should only be executed if table._sequence_name does not exist 2836 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2837 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2838 % (table._tablename, table._id.name, table._sequence_name)) 2839 self.execute(query)
2840
2841 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2842 credential_decoder=lambda x:x, driver_args={}, 2843 adapter_args={}):
2844 if not self.driver: 2845 raise RuntimeError, "Unable to import driver" 2846 self.db = db 2847 self.dbengine = "sapdb" 2848 self.uri = uri 2849 self.pool_size = pool_size 2850 self.folder = folder 2851 self.db_codec = db_codec 2852 self.find_or_make_work_folder() 2853 uri = uri.split('://')[1] 2854 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 2855 if not m: 2856 raise SyntaxError, "Invalid URI string in DAL" 2857 user = credential_decoder(m.group('user')) 2858 if not user: 2859 raise SyntaxError, 'User required' 2860 password = credential_decoder(m.group('password')) 2861 if not password: 2862 password = '' 2863 host = m.group('host') 2864 if not host: 2865 raise SyntaxError, 'Host name required' 2866 db = m.group('db') 2867 if not db: 2868 raise SyntaxError, 'Database name required' 2869 def connect(user=user, password=password, database=db, 2870 host=host, driver_args=driver_args): 2871 return self.driver.Connection(user, password, database, 2872 host, **driver_args)
2873 self.pool_connection(connect)
2874
2875 - def lastrowid(self,table):
2876 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 2877 return int(self.cursor.fetchone()[0])
2878
2879 -class CubridAdapter(MySQLAdapter):
2880 2881 driver = globals().get('cubriddb', None) 2882
2883 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2884 credential_decoder=lambda x:x, driver_args={}, 2885 adapter_args={}):
2886 if not self.driver: 2887 raise RuntimeError, "Unable to import driver" 2888 self.db = db 2889 self.dbengine = "cubrid" 2890 self.uri = uri 2891 self.pool_size = pool_size 2892 self.folder = folder 2893 self.db_codec = db_codec 2894 self.find_or_make_work_folder() 2895 uri = uri.split('://')[1] 2896 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2897 if not m: 2898 raise SyntaxError, \ 2899 "Invalid URI string in DAL: %s" % self.uri 2900 user = credential_decoder(m.group('user')) 2901 if not user: 2902 raise SyntaxError, 'User required' 2903 password = credential_decoder(m.group('password')) 2904 if not password: 2905 password = '' 2906 host = m.group('host') 2907 if not host: 2908 raise SyntaxError, 'Host name required' 2909 db = m.group('db') 2910 if not db: 2911 raise SyntaxError, 'Database name required' 2912 port = int(m.group('port') or '30000') 2913 charset = m.group('charset') or 'utf8' 2914 user = credential_decoder(user) 2915 passwd = credential_decoder(password) 2916 def connect(host=host,port=port,db=db, 2917 user=user,passwd=password,driver_args=driver_args): 2918 return self.driver.connect(host,port,db,user,passwd,**driver_args)
2919 self.pool_connection(connect) 2920 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2921 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2922 2923 2924 ######## GAE MySQL ########## 2925
2926 -class DatabaseStoredFile:
2927 2928 web2py_filesystem = False 2929
2930 - def escape(self,obj):
2931 return self.db._adapter.escape(obj)
2932
2933 - def __init__(self,db,filename,mode):
2934 if db._adapter.dbengine != 'mysql': 2935 raise RuntimeError, "only MySQL can store metadata .table files in database for now" 2936 self.db = db 2937 self.filename = filename 2938 self.mode = mode 2939 if not self.web2py_filesystem: 2940 self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;") 2941 DatabaseStoredFile.web2py_filesystem = True 2942 self.p=0 2943 self.data = '' 2944 if mode in ('r','rw','a'): 2945 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 2946 % filename 2947 rows = self.db.executesql(query) 2948 if rows: 2949 self.data = rows[0][0] 2950 elif os.path.exists(filename): 2951 datafile = open(filename, 'r') 2952 try: 2953 self.data = datafile.read() 2954 finally: 2955 datafile.close() 2956 elif mode in ('r','rw'): 2957 raise RuntimeError, "File %s does not exist" % filename
2958
2959 - def read(self, bytes):
2960 data = self.data[self.p:self.p+bytes] 2961 self.p += len(data) 2962 return data
2963
2964 - def readline(self):
2965 i = self.data.find('\n',self.p)+1 2966 if i>0: 2967 data, self.p = self.data[self.p:i], i 2968 else: 2969 data, self.p = self.data[self.p:], len(self.data) 2970 return data
2971
2972 - def write(self,data):
2973 self.data += data
2974
2975 - def close(self):
2976 self.db.executesql("DELETE FROM web2py_filesystem WHERE path='%s'" \ 2977 % self.filename) 2978 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 2979 % (self.filename, self.data.replace("'","''")) 2980 self.db.executesql(query) 2981 self.db.commit()
2982 2983 @staticmethod
2984 - def exists(db, filename):
2985 if os.path.exists(filename): 2986 return True 2987 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 2988 if db.executesql(query): 2989 return True 2990 return False
2991 2992
2993 -class UseDatabaseStoredFile:
2994
2995 - def file_exists(self, filename):
2996 return DatabaseStoredFile.exists(self.db,filename)
2997
2998 - def file_open(self, filename, mode='rb', lock=True):
2999 return DatabaseStoredFile(self.db,filename,mode)
3000
3001 - def file_close(self, fileobj, unlock=True):
3002 fileobj.close()
3003
3004 - def file_delete(self,filename):
3005 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 3006 self.db.executesql(query) 3007 self.db.commit()
3008
3009 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
3010
3011 - def __init__(self, db, uri='google:sql://realm:domain/database', 3012 pool_size=0, folder=None, db_codec='UTF-8', 3013 credential_decoder=lambda x:x, driver_args={}, 3014 adapter_args={}):
3015 3016 self.db = db 3017 self.dbengine = "mysql" 3018 self.uri = uri 3019 self.pool_size = pool_size 3020 self.folder = folder 3021 self.db_codec = db_codec 3022 self.folder = folder or '$HOME/'+thread.folder.split('/applications/',1)[1] 3023 3024 m = re.compile('^(?P<instance>.*)/(?P<db>.*)$').match(self.uri[len('google:sql://'):]) 3025 if not m: 3026 raise SyntaxError, "Invalid URI string in SQLDB: %s" % self._uri 3027 instance = credential_decoder(m.group('instance')) 3028 db = credential_decoder(m.group('db')) 3029 driver_args['instance'] = instance 3030 createdb = adapter_args.get('createdb',True) 3031 if not createdb: 3032 driver_args['database'] = db 3033 def connect(driver_args=driver_args): 3034 return rdbms.connect(**driver_args)
3035 self.pool_connection(connect) 3036 if createdb: 3037 # self.execute('DROP DATABASE %s' % db) 3038 self.execute('CREATE DATABASE IF NOT EXISTS %s' % db) 3039 self.execute('USE %s' % db) 3040 self.execute("SET FOREIGN_KEY_CHECKS=1;") 3041 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
3042
3043 -class NoSQLAdapter(BaseAdapter):
3044 can_select_for_update = False 3045 3046 @staticmethod
3047 - def to_unicode(obj):
3048 if isinstance(obj, str): 3049 return obj.decode('utf8') 3050 elif not isinstance(obj, unicode): 3051 return unicode(obj) 3052 return obj
3053
3054 - def represent(self, obj, fieldtype):
3055 if isinstance(obj, CALLABLETYPES): 3056 obj = obj() 3057 if isinstance(fieldtype, SQLCustomType): 3058 return fieldtype.encoder(obj) 3059 if isinstance(obj, (Expression, Field)): 3060 raise SyntaxError, "non supported on GAE" 3061 if self.dbengine == 'google:datastore': 3062 if isinstance(fieldtype, gae.Property): 3063 return obj 3064 is_string = isinstance(fieldtype,str) 3065 is_list = is_string and fieldtype.startswith('list:') 3066 if is_list: 3067 if not obj: 3068 obj = [] 3069 if not isinstance(obj, (list, tuple)): 3070 obj = [obj] 3071 if obj == '' and not \ 3072 (is_string and fieldtype[:2] in ['st','te','pa','up']): 3073 return None 3074 if not obj is None: 3075 if isinstance(obj, list) and not is_list: 3076 obj = [self.represent(o, fieldtype) for o in obj] 3077 elif fieldtype in ('integer','id'): 3078 obj = long(obj) 3079 elif fieldtype == 'double': 3080 obj = float(obj) 3081 elif is_string and fieldtype.startswith('reference'): 3082 if isinstance(obj, (Row, Reference)): 3083 obj = obj['id'] 3084 obj = long(obj) 3085 elif fieldtype == 'boolean': 3086 if obj and not str(obj)[0].upper() == 'F': 3087 obj = True 3088 else: 3089 obj = False 3090 elif fieldtype == 'date': 3091 if not isinstance(obj, datetime.date): 3092 (y, m, d) = map(int,str(obj).strip().split('-')) 3093 obj = datetime.date(y, m, d) 3094 elif isinstance(obj,datetime.datetime): 3095 (y, m, d) = (obj.year, obj.month, obj.day) 3096 obj = datetime.date(y, m, d) 3097 elif fieldtype == 'time': 3098 if not isinstance(obj, datetime.time): 3099 time_items = map(int,str(obj).strip().split(':')[:3]) 3100 if len(time_items) == 3: 3101 (h, mi, s) = time_items 3102 else: 3103 (h, mi, s) = time_items + [0] 3104 obj = datetime.time(h, mi, s) 3105 elif fieldtype == 'datetime': 3106 if not isinstance(obj, datetime.datetime): 3107 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 3108 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 3109 while len(time_items)<3: 3110 time_items.append(0) 3111 (h, mi, s) = time_items 3112 obj = datetime.datetime(y, m, d, h, mi, s) 3113 elif fieldtype == 'blob': 3114 pass 3115 elif is_string and fieldtype.startswith('list:string'): 3116 return map(self.to_unicode,obj) 3117 elif is_list: 3118 return map(int,obj) 3119 else: 3120 obj = self.to_unicode(obj) 3121 return obj
3122
3123 - def _insert(self,table,fields):
3124 return 'insert %s in %s' % (fields, table)
3125
3126 - def _count(self,query,distinct=None):
3127 return 'count %s' % repr(query)
3128
3129 - def _select(self,query,fields,attributes):
3130 return 'select %s where %s' % (repr(fields), repr(query))
3131
3132 - def _delete(self,tablename, query):
3133 return 'delete %s where %s' % (repr(tablename),repr(query))
3134
3135 - def _update(self,tablename,query,fields):
3136 return 'update %s (%s) where %s' % (repr(tablename), 3137 repr(fields),repr(query))
3138
3139 - def commit(self):
3140 """ 3141 remember: no transactions on many NoSQL 3142 """ 3143 pass
3144
3145 - def rollback(self):
3146 """ 3147 remember: no transactions on many NoSQL 3148 """ 3149 pass
3150
3151 - def close(self):
3152 """ 3153 remember: no transactions on many NoSQL 3154 """ 3155 pass
3156 3157 3158 # these functions should never be called!
3159 - def OR(self,first,second): raise SyntaxError, "Not supported"
3160 - def AND(self,first,second): raise SyntaxError, "Not supported"
3161 - def AS(self,first,second): raise SyntaxError, "Not supported"
3162 - def ON(self,first,second): raise SyntaxError, "Not supported"
3163 - def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported"
3164 - def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported"
3165 - def ADD(self,first,second): raise SyntaxError, "Not supported"
3166 - def SUB(self,first,second): raise SyntaxError, "Not supported"
3167 - def MUL(self,first,second): raise SyntaxError, "Not supported"
3168 - def DIV(self,first,second): raise SyntaxError, "Not supported"
3169 - def LOWER(self,first): raise SyntaxError, "Not supported"
3170 - def UPPER(self,first): raise SyntaxError, "Not supported"
3171 - def EXTRACT(self,first,what): raise SyntaxError, "Not supported"
3172 - def AGGREGATE(self,first,what): raise SyntaxError, "Not supported"
3173 - def LEFT_JOIN(self): raise SyntaxError, "Not supported"
3174 - def RANDOM(self): raise SyntaxError, "Not supported"
3175 - def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported"
3176 - def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported"
3177 - def LIKE(self,first,second): raise SyntaxError, "Not supported"
3178 - def drop(self,table,mode): raise SyntaxError, "Not supported"
3179 - def alias(self,table,alias): raise SyntaxError, "Not supported"
3180 - def migrate_table(self,*a,**b): raise SyntaxError, "Not supported"
3181 - def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported"
3182 - def prepare(self,key): raise SyntaxError, "Not supported"
3183 - def commit_prepared(self,key): raise SyntaxError, "Not supported"
3184 - def rollback_prepared(self,key): raise SyntaxError, "Not supported"
3185 - def concat_add(self,table): raise SyntaxError, "Not supported"
3186 - def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported"
3187 - def create_sequence_and_triggers(self, query, table, **args): pass
3188 - def log_execute(self,*a,**b): raise SyntaxError, "Not supported"
3189 - def execute(self,*a,**b): raise SyntaxError, "Not supported"
3190 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported"
3191 - def lastrowid(self,table): raise SyntaxError, "Not supported"
3192 - def integrity_error_class(self): raise SyntaxError, "Not supported"
3193 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported"
3194 3195
3196 -class GAEF(object):
3197 - def __init__(self,name,op,value,apply):
3198 self.name=name=='id' and '__key__' or name 3199 self.op=op 3200 self.value=value 3201 self.apply=apply
3202 - def __repr__(self):
3203 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
3204
3205 -class GoogleDatastoreAdapter(NoSQLAdapter):
3206 uploads_in_blob = True 3207 types = {} 3208
3209 - def file_exists(self, filename): pass
3210 - def file_open(self, filename, mode='rb', lock=True): pass
3211 - def file_close(self, fileobj, unlock=True): pass
3212
3213 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3214 credential_decoder=lambda x:x, driver_args={}, 3215 adapter_args={}):
3216 self.types.update({ 3217 'boolean': gae.BooleanProperty, 3218 'string': (lambda: gae.StringProperty(multiline=True)), 3219 'text': gae.TextProperty, 3220 'password': gae.StringProperty, 3221 'blob': gae.BlobProperty, 3222 'upload': gae.StringProperty, 3223 'integer': gae.IntegerProperty, 3224 'double': gae.FloatProperty, 3225 'decimal': GAEDecimalProperty, 3226 'date': gae.DateProperty, 3227 'time': gae.TimeProperty, 3228 'datetime': gae.DateTimeProperty, 3229 'id': None, 3230 'reference': gae.IntegerProperty, 3231 'list:string': (lambda: gae.StringListProperty(default=None)), 3232 'list:integer': (lambda: gae.ListProperty(int,default=None)), 3233 'list:reference': (lambda: gae.ListProperty(int,default=None)), 3234 }) 3235 self.db = db 3236 self.uri = uri 3237 self.dbengine = 'google:datastore' 3238 self.folder = folder 3239 db['_lastsql'] = '' 3240 self.db_codec = 'UTF-8' 3241 self.pool_size = 0 3242 match = re.compile('.*://(?P<namespace>.+)').match(uri) 3243 if match: 3244 namespace_manager.set_namespace(match.group('namespace'))
3245
3246 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
3247 myfields = {} 3248 for k in table.fields: 3249 if isinstance(polymodel,Table) and k in polymodel.fields(): 3250 continue 3251 field = table[k] 3252 attr = {} 3253 if isinstance(field.type, SQLCustomType): 3254 ftype = self.types[field.type.native or field.type.type](**attr) 3255 elif isinstance(field.type, gae.Property): 3256 ftype = field.type 3257 elif field.type.startswith('id'): 3258 continue 3259 elif field.type.startswith('decimal'): 3260 precision, scale = field.type[7:].strip('()').split(',') 3261 precision = int(precision) 3262 scale = int(scale) 3263 ftype = GAEDecimalProperty(precision, scale, **attr) 3264 elif field.type.startswith('reference'): 3265 if field.notnull: 3266 attr = dict(required=True) 3267 referenced = field.type[10:].strip() 3268 ftype = self.types[field.type[:9]](table._db[referenced]) 3269 elif field.type.startswith('list:reference'): 3270 if field.notnull: 3271 attr = dict(required=True) 3272 referenced = field.type[15:].strip() 3273 ftype = self.types[field.type[:14]](**attr) 3274 elif field.type.startswith('list:'): 3275 ftype = self.types[field.type](**attr) 3276 elif not field.type in self.types\ 3277 or not self.types[field.type]: 3278 raise SyntaxError, 'Field: unknown field type: %s' % field.type 3279 else: 3280 ftype = self.types[field.type](**attr) 3281 myfields[field.name] = ftype 3282 if not polymodel: 3283 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 3284 elif polymodel==True: 3285 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 3286 elif isinstance(polymodel,Table): 3287 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 3288 else: 3289 raise SyntaxError, "polymodel must be None, True, a table or a tablename" 3290 return None
3291
3292 - def expand(self,expression,field_type=None):
3293 if isinstance(expression,Field): 3294 if expression.type in ('text','blob'): 3295 raise SyntaxError, 'AppEngine does not index by: %s' % expression.type 3296 return expression.name 3297 elif isinstance(expression, (Expression, Query)): 3298 if not expression.second is None: 3299 return expression.op(expression.first, expression.second) 3300 elif not expression.first is None: 3301 return expression.op(expression.first) 3302 else: 3303 return expression.op() 3304 elif field_type: 3305 return self.represent(expression,field_type) 3306 elif isinstance(expression,(list,tuple)): 3307 return ','.join([self.represent(item,field_type) for item in expression]) 3308 else: 3309 return str(expression)
3310 3311 ### TODO from gql.py Expression
3312 - def AND(self,first,second):
3313 a = self.expand(first) 3314 b = self.expand(second) 3315 if b[0].name=='__key__' and a[0].name!='__key__': 3316 return b+a 3317 return a+b
3318
3319 - def EQ(self,first,second=None):
3320 if isinstance(second, Key): 3321 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 3322 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
3323
3324 - def NE(self,first,second=None):
3325 if first.type != 'id': 3326 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 3327 else: 3328 second = Key.from_path(first._tablename, long(second)) 3329 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
3330
3331 - def LT(self,first,second=None):
3332 if first.type != 'id': 3333 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 3334 else: 3335 second = Key.from_path(first._tablename, long(second)) 3336 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
3337
3338 - def LE(self,first,second=None):
3339 if first.type != 'id': 3340 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 3341 else: 3342 second = Key.from_path(first._tablename, long(second)) 3343 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
3344
3345 - def GT(self,first,second=None):
3346 if first.type != 'id' or second==0 or second == '0': 3347 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 3348 else: 3349 second = Key.from_path(first._tablename, long(second)) 3350 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
3351
3352 - def GE(self,first,second=None):
3353 if first.type != 'id': 3354 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 3355 else: 3356 second = Key.from_path(first._tablename, long(second)) 3357 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
3358
3359 - def INVERT(self,first):
3360 return '-%s' % first.name
3361
3362 - def COMMA(self,first,second):
3363 return '%s, %s' % (self.expand(first),self.expand(second))
3364
3365 - def BELONGS(self,first,second=None):
3366 if not isinstance(second,(list, tuple)): 3367 raise SyntaxError, "Not supported" 3368 if first.type != 'id': 3369 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 3370 else: 3371 second = [Key.from_path(first._tablename, i) for i in second] 3372 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
3373
3374 - def CONTAINS(self,first,second):
3375 if not first.type.startswith('list:'): 3376 raise SyntaxError, "Not supported" 3377 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:a in b)]
3378
3379 - def NOT(self,first):
3380 nops = { self.EQ: self.NE, 3381 self.NE: self.EQ, 3382 self.LT: self.GE, 3383 self.GT: self.LE, 3384 self.LE: self.GT, 3385 self.GE: self.LT} 3386 if not isinstance(first,Query): 3387 raise SyntaxError, "Not suported" 3388 nop = nops.get(first.op,None) 3389 if not nop: 3390 raise SyntaxError, "Not suported %s" % first.op.__name__ 3391 first.op = nop 3392 return self.expand(first)
3393
3394 - def truncate(self,table,mode):
3395 self.db(table._id > 0).delete()
3396
3397 - def select_raw(self,query,fields=None,attributes=None):
3398 fields = fields or [] 3399 attributes = attributes or {} 3400 new_fields = [] 3401 for item in fields: 3402 if isinstance(item,SQLALL): 3403 new_fields += item.table 3404 else: 3405 new_fields.append(item) 3406 fields = new_fields 3407 if query: 3408 tablename = self.get_table(query) 3409 elif fields: 3410 tablename = fields[0].tablename 3411 query = fields[0].table._id>0 3412 else: 3413 raise SyntaxError, "Unable to determine a tablename" 3414 3415 if query and not query.ignore_common_filters: 3416 query = self.common_filter(query,[tablename]) 3417 3418 tableobj = self.db[tablename]._tableobj 3419 items = tableobj.all() 3420 filters = self.expand(query) 3421 for filter in filters: 3422 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 3423 continue 3424 elif filter.name=='__key__' and filter.op=='=': 3425 if filter.value==0: 3426 items = [] 3427 elif isinstance(filter.value, Key): 3428 item = tableobj.get(filter.value) 3429 items = (item and [item]) or [] 3430 else: 3431 item = tableobj.get_by_id(filter.value) 3432 items = (item and [item]) or [] 3433 elif isinstance(items,list): # i.e. there is a single record! 3434 items = [i for i in items if filter.apply(getattr(item,filter.name), 3435 filter.value)] 3436 else: 3437 if filter.name=='__key__': items.order('__key__') 3438 items = items.filter('%s %s' % (filter.name,filter.op),filter.value) 3439 if not isinstance(items,list): 3440 if attributes.get('left', None): 3441 raise SyntaxError, 'Set: no left join in appengine' 3442 if attributes.get('groupby', None): 3443 raise SyntaxError, 'Set: no groupby in appengine' 3444 orderby = attributes.get('orderby', False) 3445 if orderby: 3446 ### THIS REALLY NEEDS IMPROVEMENT !!! 3447 if isinstance(orderby, (list, tuple)): 3448 orderby = xorify(orderby) 3449 if isinstance(orderby,Expression): 3450 orderby = self.expand(orderby) 3451 orders = orderby.split(', ') 3452 for order in orders: 3453 order={'-id':'-__key__','id':'__key__'}.get(order,order) 3454 items = items.order(order) 3455 if attributes.get('limitby', None): 3456 (lmin, lmax) = attributes['limitby'] 3457 (limit, offset) = (lmax - lmin, lmin) 3458 items = items.fetch(limit, offset=offset) 3459 fields = self.db[tablename].fields 3460 return (items, tablename, fields)
3461
3462 - def select(self,query,fields,attributes):
3463 (items, tablename, fields) = self.select_raw(query,fields,attributes) 3464 # self.db['_lastsql'] = self._select(query,fields,attributes) 3465 rows = [ 3466 [t=='id' and (int(item.key().id()) if item.key().id() else 3467 item.key().name()) or getattr(item, t) for t in fields] 3468 for item in items] 3469 colnames = ['%s.%s' % (tablename, t) for t in fields] 3470 return self.parse(rows, colnames, False)
3471 3472
3473 - def count(self,query,distinct=None):
3474 if distinct: 3475 raise RuntimeError, "COUNT DISTINCT not supported" 3476 (items, tablename, fields) = self.select_raw(query) 3477 # self.db['_lastsql'] = self._count(query) 3478 try: 3479 return len(items) 3480 except TypeError: 3481 return items.count(limit=None)
3482
3483 - def delete(self,tablename, query):
3484 """ 3485 This function was changed on 2010-05-04 because according to 3486 http://code.google.com/p/googleappengine/issues/detail?id=3119 3487 GAE no longer support deleting more than 1000 records. 3488 """ 3489 # self.db['_lastsql'] = self._delete(tablename,query) 3490 (items, tablename, fields) = self.select_raw(query) 3491 # items can be one item or a query 3492 if not isinstance(items,list): 3493 counter = items.count(limit=None) 3494 leftitems = items.fetch(1000) 3495 while len(leftitems): 3496 gae.delete(leftitems) 3497 leftitems = items.fetch(1000) 3498 else: 3499 counter = len(items) 3500 gae.delete(items) 3501 return counter
3502
3503 - def update(self,tablename,query,update_fields):
3504 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 3505 (items, tablename, fields) = self.select_raw(query) 3506 counter = 0 3507 for item in items: 3508 for field, value in update_fields: 3509 setattr(item, field.name, self.represent(value,field.type)) 3510 item.put() 3511 counter += 1 3512 logger.info(str(counter)) 3513 return counter
3514
3515 - def insert(self,table,fields):
3516 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 3517 # table._db['_lastsql'] = self._insert(table,fields) 3518 tmp = table._tableobj(**dfields) 3519 tmp.put() 3520 rid = Reference(tmp.key().id()) 3521 (rid._table, rid._record) = (table, None) 3522 return rid
3523
3524 - def bulk_insert(self,table,items):
3525 parsed_items = [] 3526 for item in items: 3527 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 3528 parsed_items.append(table._tableobj(**dfields)) 3529 gae.put(parsed_items) 3530 return True
3531
3532 -def uuid2int(uuidv):
3533 return uuid.UUID(uuidv).int
3534
3535 -def int2uuid(n):
3536 return str(uuid.UUID(int=n))
3537
3538 -class CouchDBAdapter(NoSQLAdapter):
3539 uploads_in_blob = True 3540 types = { 3541 'boolean': bool, 3542 'string': str, 3543 'text': str, 3544 'password': str, 3545 'blob': str, 3546 'upload': str, 3547 'integer': long, 3548 'double': float, 3549 'date': datetime.date, 3550 'time': datetime.time, 3551 'datetime': datetime.datetime, 3552 'id': long, 3553 'reference': long, 3554 'list:string': list, 3555 'list:integer': list, 3556 'list:reference': list, 3557 } 3558
3559 - def file_exists(self, filename): pass
3560 - def file_open(self, filename, mode='rb', lock=True): pass
3561 - def file_close(self, fileobj, unlock=True): pass
3562
3563 - def expand(self,expression,field_type=None):
3564 if isinstance(expression,Field): 3565 if expression.type=='id': 3566 return "%s._id" % expression.tablename 3567 return BaseAdapter.expand(self,expression,field_type)
3568
3569 - def AND(self,first,second):
3570 return '(%s && %s)' % (self.expand(first),self.expand(second))
3571
3572 - def OR(self,first,second):
3573 return '(%s || %s)' % (self.expand(first),self.expand(second))
3574
3575 - def EQ(self,first,second):
3576 if second is None: 3577 return '(%s == null)' % self.expand(first) 3578 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
3579
3580 - def NE(self,first,second):
3581 if second is None: 3582 return '(%s != null)' % self.expand(first) 3583 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
3584
3585 - def COMMA(self,first,second):
3586 return '%s + %s' % (self.expand(first),self.expand(second))
3587
3588 - def represent(self, obj, fieldtype):
3589 value = NoSQLAdapter.represent(self, obj, fieldtype) 3590 if fieldtype=='id': 3591 return repr(str(int(value))) 3592 elif fieldtype in ('date','time','datetime','boolean'): 3593 return serializers.json(value) 3594 return repr(not isinstance(value,unicode) and value or value.encode('utf8'))
3595
3596 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 3597 pool_size=0,folder=None,db_codec ='UTF-8', 3598 credential_decoder=lambda x:x, driver_args={}, 3599 adapter_args={}):
3600 self.db = db 3601 self.uri = uri 3602 self.dbengine = 'couchdb' 3603 self.folder = folder 3604 db['_lastsql'] = '' 3605 self.db_codec = 'UTF-8' 3606 self.pool_size = pool_size 3607 3608 url='http://'+uri[10:] 3609 def connect(url=url,driver_args=driver_args): 3610 return couchdb.Server(url,**driver_args)
3611 self.pool_connection(connect,cursor=False)
3612
3613 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
3614 if migrate: 3615 try: 3616 self.connection.create(table._tablename) 3617 except: 3618 pass
3619
3620 - def insert(self,table,fields):
3621 id = uuid2int(web2py_uuid()) 3622 ctable = self.connection[table._tablename] 3623 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 3624 values['_id'] = str(id) 3625 ctable.save(values) 3626 return id
3627
3628 - def _select(self,query,fields,attributes):
3629 if not isinstance(query,Query): 3630 raise SyntaxError, "Not Supported" 3631 for key in set(attributes.keys())-set(('orderby','groupby','limitby', 3632 'required','cache','left', 3633 'distinct','having')): 3634 raise SyntaxError, 'invalid select attribute: %s' % key 3635 new_fields=[] 3636 for item in fields: 3637 if isinstance(item,SQLALL): 3638 new_fields += item.table 3639 else: 3640 new_fields.append(item) 3641 def uid(fd): 3642 return fd=='id' and '_id' or fd
3643 def get(row,fd): 3644 return fd=='id' and int(row['_id']) or row.get(fd,None) 3645 fields = new_fields 3646 tablename = self.get_table(query) 3647 fieldnames = [f.name for f in (fields or self.db[tablename])] 3648 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 3649 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 3650 fn="function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);}" %\ 3651 dict(t=tablename, 3652 query=self.expand(query), 3653 order='%s._id' % tablename, 3654 fields=fields) 3655 return fn, colnames 3656
3657 - def select(self,query,fields,attributes):
3658 if not isinstance(query,Query): 3659 raise SyntaxError, "Not Supported" 3660 fn, colnames = self._select(query,fields,attributes) 3661 tablename = colnames[0].split('.')[0] 3662 ctable = self.connection[tablename] 3663 rows = [cols['value'] for cols in ctable.query(fn)] 3664 return self.parse(rows, colnames, False)
3665
3666 - def delete(self,tablename,query):
3667 if not isinstance(query,Query): 3668 raise SyntaxError, "Not Supported" 3669 if query.first.type=='id' and query.op==self.EQ: 3670 id = query.second 3671 tablename = query.first.tablename 3672 assert(tablename == query.first.tablename) 3673 ctable = self.connection[tablename] 3674 try: 3675 del ctable[str(id)] 3676 return 1 3677 except couchdb.http.ResourceNotFound: 3678 return 0 3679 else: 3680 tablename = self.get_table(query) 3681 rows = self.select(query,[self.db[tablename]._id],{}) 3682 ctable = self.connection[tablename] 3683 for row in rows: 3684 del ctable[str(row.id)] 3685 return len(rows)
3686
3687 - def update(self,tablename,query,fields):
3688 if not isinstance(query,Query): 3689 raise SyntaxError, "Not Supported" 3690 if query.first.type=='id' and query.op==self.EQ: 3691 id = query.second 3692 tablename = query.first.tablename 3693 ctable = self.connection[tablename] 3694 try: 3695 doc = ctable[str(id)] 3696 for key,value in fields: 3697 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 3698 ctable.save(doc) 3699 return 1 3700 except couchdb.http.ResourceNotFound: 3701 return 0 3702 else: 3703 tablename = self.get_table(query) 3704 rows = self.select(query,[self.db[tablename]._id],{}) 3705 ctable = self.connection[tablename] 3706 table = self.db[tablename] 3707 for row in rows: 3708 doc = ctable[str(row.id)] 3709 for key,value in fields: 3710 doc[key.name] = self.represent(value,table[key.name].type) 3711 ctable.save(doc) 3712 return len(rows)
3713
3714 - def count(self,query,distinct=None):
3715 if distinct: 3716 raise RuntimeError, "COUNT DISTINCT not supported" 3717 if not isinstance(query,Query): 3718 raise SyntaxError, "Not Supported" 3719 tablename = self.get_table(query) 3720 rows = self.select(query,[self.db[tablename]._id],{}) 3721 return len(rows)
3722
3723 -def cleanup(text):
3724 """ 3725 validates that the given text is clean: only contains [0-9a-zA-Z_] 3726 """ 3727 3728 if re.compile('[^0-9a-zA-Z_]').findall(text): 3729 raise SyntaxError, \ 3730 'only [0-9a-zA-Z_] allowed in table and field names, received %s' \ 3731 % text 3732 return text
3733 3734
3735 -class MongoDBAdapter(NoSQLAdapter):
3736 uploads_in_blob = True 3737 types = { 3738 'boolean': bool, 3739 'string': str, 3740 'text': str, 3741 'password': str, 3742 'blob': str, 3743 'upload': str, 3744 'integer': long, 3745 'double': float, 3746 'date': datetime.date, 3747 'time': datetime.time, 3748 'datetime': datetime.datetime, 3749 'id': long, 3750 'reference': long, 3751 'list:string': list, 3752 'list:integer': list, 3753 'list:reference': list, 3754 } 3755
3756 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 3757 pool_size=0,folder=None,db_codec ='UTF-8', 3758 credential_decoder=lambda x:x, driver_args={}, 3759 adapter_args={}):
3760 self.db = db 3761 self.uri = uri 3762 self.dbengine = 'mongodb' 3763 self.folder = folder 3764 db['_lastsql'] = '' 3765 self.db_codec = 'UTF-8' 3766 self.pool_size = pool_size 3767 #this is the minimum amount of replicates that it should wait for on insert/update 3768 self.minimumreplication = adapter_args.get('minimumreplication',0) 3769 #by default alle insert and selects are performand asynchronous, but now the default is 3770 #synchronous, except when overruled by either this default or function parameter 3771 self.defaultsafe = adapter_args.get('safe',True) 3772 3773 m = re.compile('^(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self.uri[10:]) 3774 if not m: 3775 raise SyntaxError, "Invalid URI string in DAL: %s" % self.uri 3776 host = m.group('host') 3777 if not host: 3778 raise SyntaxError, 'mongodb: host name required' 3779 dbname = m.group('db') 3780 if not dbname: 3781 raise SyntaxError, 'mongodb: db name required' 3782 port = int(m.group('port') or 27017) 3783 driver_args.update(dict(host=host,port=port)) 3784 def connect(dbname=dbname,driver_args=driver_args): 3785 return pymongo.Connection(**driver_args)[dbname]
3786 self.pool_connection(connect,cursor=False)
3787
3788 - def represent(self, obj, fieldtype):
3789 value = NoSQLAdapter.represent(self, obj, fieldtype) 3790 if fieldtype =='date': 3791 if value == None: 3792 return value 3793 t = datetime.time(0, 0, 0)#this piece of data can be stripped of based on the fieldtype 3794 return datetime.datetime.combine(value, t) #mongodb doesn't has a date object and so it must datetime, string or integer 3795 elif fieldtype == 'time': 3796 if value == None: 3797 return value 3798 d = datetime.date(2000, 1, 1) #this piece of data can be stripped of based on the fieldtype 3799 return datetime.datetime.combine(d, value) #mongodb doesn't has a time object and so it must datetime, string or integer 3800 elif fieldtype == 'list:string' or fieldtype == 'list:integer' or fieldtype == 'list:reference': 3801 return value #raise SyntaxError, "Not Supported" 3802 return value
3803 3804 #Safe determines whether a asynchronious request is done or a synchronious action is done 3805 #For safety, we use by default synchronious requests
3806 - def insert(self,table,fields,safe=True):
3807 ctable = self.connection[table._tablename] 3808 values = dict((k.name,self.represent(v,table[k.name].type)) for k,v in fields) 3809 ctable.insert(values,safe=safe) 3810 return int(str(values['_id']), 16)
3811
3812 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None, isCapped=False):
3813 if isCapped: 3814 raise RuntimeError, "Not implemented" 3815 else: 3816 pass
3817
3818 - def count(self,query,distinct=None):
3819 if distinct: 3820 raise RuntimeError, "COUNT DISTINCT not supported" 3821 if not isinstance(query,Query): 3822 raise SyntaxError, "Not Supported" 3823 tablename = self.get_table(query) 3824 rows = self.select(query,[self.db[tablename]._id],{}) 3825 #Maybe it would be faster if we just implemented the pymongo .count() function which is probably quicker? 3826 # therefor call __select() connection[table].find(query).count() Since this will probably reduce the return set? 3827 return len(rows)
3828
3829 - def expand(self, expression, field_type=None):
3830 #if isinstance(expression,Field): 3831 # if expression.type=='id': 3832 # return {_id}" 3833 if isinstance(expression, Query): 3834 print "in expand and this is a query" 3835 # any query using 'id':= 3836 # set name as _id (as per pymongo/mongodb primary key) 3837 # convert second arg to an objectid field (if its not already) 3838 # if second arg is 0 convert to objectid 3839 if isinstance(expression.first,Field) and expression.first.type == 'id': 3840 expression.first.name = '_id' 3841 if expression.second != 0 and not isinstance(expression.second,pymongo.objectid.ObjectId): 3842 if isinstance(expression.second,int): 3843 try: 3844 #Cause the reference field is by default an integer and therefor this must be an integer to be able to work with other databases 3845 expression.second = pymongo.objectid.ObjectId(("%X" % expression.second)) 3846 except: 3847 raise SyntaxError, 'The second argument must by an integer that can represent an objectid.' 3848 else: 3849 try: 3850 #But a direct id is also possible 3851 expression.second = pymongo.objectid.ObjectId(expression.second) 3852 except: 3853 raise SyntaxError, 'second argument must be of type bson.objectid.ObjectId or an objectid representable integer' 3854 elif expression.second == 0: 3855 expression.second = pymongo.objectid.ObjectId('000000000000000000000000') 3856 return expression.op(expression.first, expression.second) 3857 if isinstance(expression, Field): 3858 if expression.type=='id': 3859 return "_id" 3860 else: 3861 return expression.name 3862 #return expression 3863 elif isinstance(expression, (Expression, Query)): 3864 if not expression.second is None: 3865 return expression.op(expression.first, expression.second) 3866 elif not expression.first is None: 3867 return expression.op(expression.first) 3868 elif not isinstance(expression.op, str): 3869 return expression.op() 3870 else: 3871 return expression.op 3872 elif field_type: 3873 return str(self.represent(expression,field_type)) 3874 elif isinstance(expression,(list,tuple)): 3875 return ','.join(self.represent(item,field_type) for item in expression) 3876 else: 3877 return expression
3878
3879 - def _select(self,query,fields,attributes):
3880 from pymongo import son 3881 3882 for key in set(attributes.keys())-set(('limitby','orderby')): 3883 raise SyntaxError, 'invalid select attribute: %s' % key 3884 3885 new_fields=[] 3886 mongosort_list = [] 3887 3888 # try an orderby attribute 3889 orderby = attributes.get('orderby', False) 3890 limitby = attributes.get('limitby', False) 3891 #distinct = attributes.get('distinct', False) 3892 if orderby: 3893 #print "in if orderby %s" % orderby 3894 if isinstance(orderby, (list, tuple)): 3895 print "in xorify" 3896 orderby = xorify(orderby) 3897 3898 3899 # !!!! need to add 'random' 3900 for f in self.expand(orderby).split(','): 3901 if f.startswith('-'): 3902 mongosort_list.append((f[1:],-1)) 3903 else: 3904 mongosort_list.append((f,1)) 3905 print "mongosort_list = %s" % mongosort_list 3906 3907 if limitby: 3908 # a tuple 3909 limitby_skip,limitby_limit = limitby 3910 else: 3911 limitby_skip = 0 3912 limitby_limit = 0 3913 3914 #if distinct: 3915 #print "in distinct %s" % distinct 3916 3917 mongofields_dict = son.SON() 3918 mongoqry_dict = {} 3919 for item in fields: 3920 if isinstance(item,SQLALL): 3921 new_fields += item.table 3922 else: 3923 new_fields.append(item) 3924 fields = new_fields 3925 if isinstance(query,Query): 3926 tablename = self.get_table(query) 3927 elif len(fields) != 0: 3928 tablename = fields[0].tablename 3929 else: 3930 raise SyntaxError, "The table name could not be found in the query nor from the select statement." 3931 fieldnames = [f for f in (fields or self.db[tablename])] # ie table.field 3932 mongoqry_dict = self.expand(query) 3933 for f in fieldnames: 3934 mongofields_dict[f.name] = 1 # ie field=1 3935 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, limitby_limit, limitby_skip
3936 3937 # need to define all the 'sql' methods gt,lt etc.... 3938
3939 - def select(self,query,fields,attributes):
3940 3941 tablename, mongoqry_dict , mongofields_dict, mongosort_list, limitby_limit, limitby_skip = self._select(query,fields,attributes) 3942 try: 3943 print "mongoqry_dict=%s" % mongoqry_dict 3944 except: 3945 pass 3946 print "mongofields_dict=%s" % mongofields_dict 3947 ctable = self.connection[tablename] 3948 mongo_list_dicts = ctable.find(mongoqry_dict,mongofields_dict,skip=limitby_skip, limit=limitby_limit, sort=mongosort_list) # pymongo cursor object 3949 print "mongo_list_dicts=%s" % mongo_list_dicts 3950 #if mongo_list_dicts.count() > 0: # 3951 #colnames = mongo_list_dicts[0].keys() # assuming all docs have same "shape", grab colnames from first dictionary (aka row) 3952 #else: 3953 #colnames = mongofields_dict.keys() 3954 #print "colnames = %s" % colnames 3955 #rows = [row.values() for row in mongo_list_dicts] 3956 rows = mongo_list_dicts 3957 return self.parse(rows, mongofields_dict.keys(), False, tablename)
3958
3959 - def parse(self, rows, colnames, blob_decode=True,tablename=None):
3960 import pymongo.objectid 3961 print "in parse" 3962 print "colnames=%s" % colnames 3963 db = self.db 3964 virtualtables = [] 3965 table_colnames = [] 3966 new_rows = [] 3967 for (i,row) in enumerate(rows): 3968 print "i,row = %s,%s" % (i,row) 3969 new_row = Row() 3970 for j,colname in enumerate(colnames): 3971 # hack to get past 'id' key error, we seem to need to keep the 'id' key so lets create an id row value 3972 if colname == 'id': 3973 #try: 3974 if isinstance(row['_id'],pymongo.objectid.ObjectId): 3975 row[colname] = int(str(row['_id']),16) 3976 else: 3977 #in case of alternative key 3978 row[colname] = row['_id'] 3979 #except: 3980 #an id can also be user defined 3981 #row[colname] = row['_id'] 3982 #Alternative solutions are UUID's, counter function in mongo 3983 #del row['_id'] 3984 #colnames.append('_id') 3985 print "j = %s" % j 3986 value = row.get(colname,None) # blob field not implemented, or missing key:value in a mongo document 3987 colname = "%s.%s" % (tablename, colname) # hack to match re (table_field) 3988 if i == 0: #only on first row 3989 table_colnames.append(colname) 3990 if not table_field.match(colname): 3991 if not '_extra' in new_row: 3992 new_row['_extra'] = Row() 3993 new_row['_extra'][colnames[j]] = value 3994 select_as_parser = re.compile("\s+AS\s+(\S+)") 3995 new_column_name = select_as_parser.search(colnames[j]) 3996 if not new_column_name is None: 3997 column_name = new_column_name.groups(0) 3998 setattr(new_row,column_name[0],value) 3999 continue 4000 (tablename, fieldname) = colname.split('.') 4001 table = db[tablename] 4002 field = table[fieldname] 4003 field_type = field.type 4004 # hack to get past field_type = 'id' 4005 #if field_type == 'id': field_type = '_id' 4006 print "field = %s, type = %s" % (field, field_type) 4007 if field.type != 'blob' and isinstance(value, str): 4008 try: 4009 value = value.decode(db._db_codec) 4010 except Exception: 4011 pass 4012 if isinstance(value, unicode): 4013 value = value.encode('utf-8') 4014 if not tablename in new_row: 4015 colset = new_row[tablename] = Row() 4016 if tablename not in virtualtables: 4017 virtualtables.append(tablename) 4018 else: 4019 colset = new_row[tablename] 4020 4021 if isinstance(field_type, SQLCustomType): 4022 colset[fieldname] = field_type.decoder(value) 4023 # field_type = field_type.type 4024 elif not isinstance(field_type, str) or value is None: 4025 colset[fieldname] = value 4026 elif isinstance(field_type, str) and \ 4027 field_type.startswith('reference'): 4028 referee = field_type[10:].strip() 4029 if not '.' in referee: 4030 colset[fieldname] = rid = Reference(value) 4031 (rid._table, rid._record) = (db[referee], None) 4032 else: ### reference not by id 4033 colset[fieldname] = value 4034 elif field_type == 'boolean': 4035 if value == True or str(value)[:1].lower() == 't': 4036 colset[fieldname] = True 4037 else: 4038 colset[fieldname] = False 4039 elif field_type == 'date' \ 4040 and (not isinstance(value, datetime.date)\ 4041 or isinstance(value, datetime.datetime)): 4042 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 4043 colset[fieldname] = datetime.date(y, m, d) 4044 elif field_type == 'time' \ 4045 and not isinstance(value, datetime.time): #psr pymongo time as datetime 4046 if isinstance(value,datetime.datetime): 4047 colset[fieldname] = value.time() 4048 else: 4049 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 4050 if len(time_items) == 3: 4051 (h, mi, s) = time_items 4052 else: 4053 (h, mi, s) = time_items + [0] 4054 colset[fieldname] = datetime.time(h, mi, s) 4055 elif field_type == 'datetime'\ 4056 and not isinstance(value, datetime.datetime): 4057 (y, m, d) = map(int,str(value)[:10].strip().split('-')) 4058 time_items = map(int,str(value)[11:19].strip().split(':')[:3]) 4059 if len(time_items) == 3: 4060 (h, mi, s) = time_items 4061 else: 4062 (h, mi, s) = time_items + [0] 4063 colset[fieldname] = datetime.datetime(y, m, d, h, mi, s) 4064 elif field_type == 'blob' and blob_decode: 4065 colset[fieldname] = base64.b64decode(str(value)) 4066 elif field_type.startswith('decimal'): 4067 decimals = int(field_type[8:-1].split(',')[-1]) 4068 if self.dbengine == 'sqlite': 4069 value = ('%.' + str(decimals) + 'f') % value 4070 if not isinstance(value, decimal.Decimal): 4071 value = decimal.Decimal(str(value)) 4072 colset[fieldname] = value 4073 elif field_type.startswith('list:integer'): 4074 if not self.dbengine=='google:datastore' and not self.dbengine=='mongodb': #<-- comparation to google datastore is kinda unnessarly 4075 colset[fieldname] = bar_decode_integer(value) 4076 else: 4077 colset[fieldname] = value 4078 elif field_type.startswith('list:reference'): 4079 if not self.dbengine=='google:datastore' and not self.dbengine=='mongodb': #<-- comparation to google datastore is kinda unnessarly 4080 colset[fieldname] = bar_decode_integer(value) 4081 else: 4082 colset[fieldname] = value 4083 elif field_type.startswith('list:string'): 4084 if not self.dbengine=='google:datastore' and not self.dbengine=='mongodb': #<-- comparation to google datastore is kinda unnessarly 4085 colset[fieldname] = bar_decode_string(value) 4086 else: 4087 colset[fieldname] = value 4088 else: 4089 colset[fieldname] = value 4090 if field_type == 'id': 4091 id = colset[field.name] 4092 colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a) 4093 colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete() 4094 for (referee_table, referee_name) in \ 4095 table._referenced_by: 4096 s = db[referee_table][referee_name] 4097 referee_link = db._referee_name and \ 4098 db._referee_name % dict(table=referee_table,field=referee_name) 4099 if referee_link and not referee_link in colset: 4100 colset[referee_link] = Set(db, s == id) 4101 colset['id'] = id 4102 new_rows.append(new_row) 4103 print "table_colnames = %s" % table_colnames 4104 rowsobj = Rows(db, new_rows, table_colnames, rawrows=rows) 4105 4106 for tablename in virtualtables: 4107 ### new style virtual fields 4108 table = db[tablename] 4109 fields_virtual = [(f,v) for (f,v) in table.items() if isinstance(v,FieldVirtual)] 4110 fields_lazy = [(f,v) for (f,v) in table.items() if isinstance(v,FieldLazy)] 4111 if fields_virtual or fields_lazy: 4112 for row in rowsobj.records: 4113 box = row[tablename] 4114 for f,v in fields_virtual: 4115 box[f] = v.f(row) 4116 for f,v in fields_lazy: 4117 box[f] = (v.handler or VirtualCommand)(v.f,row) 4118 4119 ### old style virtual fields 4120 for item in table.virtualfields: 4121 try: 4122 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 4123 except KeyError: 4124 # to avoid breaking virtualfields when partial select 4125 pass 4126 return rowsobj
4127
4128 - def INVERT(self,first):
4129 #print "in invert first=%s" % first 4130 return '-%s' % self.expand(first)
4131
4132 - def drop(self, table, mode=''):
4133 ctable = self.connection[table._tablename] 4134 ctable.drop()
4135
4136 - def truncate(self,table,mode):
4137 ctable = self.connection[table._tablename] 4138 ctable.remove(None, safe=True)
4139
4140 - def update(self,tablename,query,fields):
4141 if not isinstance(query,Query): 4142 raise SyntaxError, "Not Supported" 4143 4144 raise RuntimeError, "Not implemented"
4145
4146 - def bulk_insert(self, table, items):
4147 return [self.insert(table,item) for item in items]
4148 4149 #TODO This will probably not work:(
4150 - def NOT(self, first):
4151 result = {} 4152 result["$not"] = self.expand(first) 4153 return result
4154
4155 - def AND(self,first,second):
4156 f = self.expand(first) 4157 s = self.expand(second) 4158 f.update(s) 4159 return f
4160
4161 - def OR(self,first,second):
4162 # pymongo expects: .find( {'$or' : [{'name':'1'}, {'name':'2'}] } ) 4163 result = {} 4164 f = self.expand(first) 4165 s = self.expand(second) 4166 result['$or'] = [f,s] 4167 return result
4168
4169 - def BELONGS(self, first, second):
4170 if isinstance(second, str): 4171 return {self.expand(first) : {"$in" : [ second[:-1]]} } 4172 elif second==[] or second==(): 4173 return {1:0} 4174 items.append(self.expand(item, first.type) for item in second) 4175 return {self.expand(first) : {"$in" : items} }
4176
4177 - def LIKE(self, first, second):
4178 #escaping regex operators? 4179 return {self.expand(first) : ('%s' % self.expand(second, 'string').replace('%','/'))}
4180
4181 - def STARTSWITH(self, first, second):
4182 #escaping regex operators? 4183 return {self.expand(first) : ('/^%s/' % self.expand(second, 'string'))}
4184
4185 - def ENDSWITH(self, first, second):
4186 #escaping regex operators? 4187 return {self.expand(first) : ('/%s^/' % self.expand(second, 'string'))}
4188
4189 - def CONTAINS(self, first, second):
4190 #There is a technical difference, but mongodb doesn't support that, but the result will be the same 4191 return {self.expand(first) : ('/%s/' % self.expand(second, 'string'))}
4192
4193 - def EQ(self,first,second):
4194 result = {} 4195 #if second is None: 4196 #return '(%s == null)' % self.expand(first) 4197 #return '(%s == %s)' % (self.expand(first),self.expand(second,first.type)) 4198 result[self.expand(first)] = self.expand(second) 4199 return result
4200
4201 - def NE(self, first, second=None):
4202 print "in NE" 4203 result = {} 4204 result[self.expand(first)] = {'$ne': self.expand(second)} 4205 return result
4206
4207 - def LT(self,first,second=None):
4208 if second is None: 4209 raise RuntimeError, "Cannot compare %s < None" % first 4210 print "in LT" 4211 result = {} 4212 result[self.expand(first)] = {'$lt': self.expand(second)} 4213 return result
4214
4215 - def LE(self,first,second=None):
4216 if second is None: 4217 raise RuntimeError, "Cannot compare %s <= None" % first 4218 print "in LE" 4219 result = {} 4220 result[self.expand(first)] = {'$lte': self.expand(second)} 4221 return result
4222
4223 - def GT(self,first,second):
4224 print "in GT" 4225 #import pymongo.objectid 4226 result = {} 4227 #if expanded_first == '_id': 4228 #if expanded_second != 0 and not isinstance(second,pymongo.objectid.ObjectId): 4229 #raise SyntaxError, 'second argument must be of type bson.objectid.ObjectId' 4230 #elif expanded_second == 0: 4231 #expanded_second = pymongo.objectid.ObjectId('000000000000000000000000') 4232 result[self.expand(first)] = {'$gt': self.expand(second)} 4233 return result
4234
4235 - def GE(self,first,second=None):
4236 if second is None: 4237 raise RuntimeError, "Cannot compare %s >= None" % first 4238 print "in GE" 4239 result = {} 4240 result[self.expand(first)] = {'$gte': self.expand(second)} 4241 return result
4242
4243 - def ADD(self, first, second):
4244 raise NotSupported, "This must yet be replaced with javescript in order to accomplish this. Sorry" 4245 return '%s + %s' % (self.expand(first), self.expand(second, first.type))
4246
4247 - def SUB(self, first, second):
4248 raise NotSupported, "This must yet be replaced with javescript in order to accomplish this. Sorry" 4249 return '(%s - %s)' % (self.expand(first), self.expand(second, first.type))
4250
4251 - def MUL(self, first, second):
4252 raise NotSupported, "This must yet be replaced with javescript in order to accomplish this. Sorry" 4253 return '(%s * %s)' % (self.expand(first), self.expand(second, first.type))
4254
4255 - def DIV(self, first, second):
4256 raise NotSupported, "This must yet be replaced with javescript in order to accomplish this. Sorry" 4257 return '(%s / %s)' % (self.expand(first), self.expand(second, first.type))
4258
4259 - def MOD(self, first, second):
4260 raise NotSupported, "This must yet be replaced with javescript in order to accomplish this. Sorry" 4261 return '(%s %% %s)' % (self.expand(first), self.expand(second, first.type))
4262
4263 - def AS(self, first, second):
4264 raise NotSupported, "This must yet be replaced with javescript in order to accomplish this. Sorry" 4265 return '%s AS %s' % (self.expand(first), second)
4266 4267 #We could implement an option that simulates a full featured SQL database. But I think the option should be set explicit or implemented as another library.
4268 - def ON(self, first, second):
4269 raise NotSupported, "This is not possible in NoSQL, but can be simulated with a wrapper." 4270 return '%s ON %s' % (self.expand(first), self.expand(second))
4271
4272 - def COMMA(self, first, second):
4273 return '%s, %s' % (self.expand(first), self.expand(second))
4274 4275 4276 4277 ######################################################################## 4278 # end of adapters 4279 ######################################################################## 4280 4281 ADAPTERS = { 4282 'sqlite': SQLiteAdapter, 4283 'sqlite:memory': SQLiteAdapter, 4284 'mysql': MySQLAdapter, 4285 'postgres': PostgreSQLAdapter, 4286 'oracle': OracleAdapter, 4287 'mssql': MSSQLAdapter, 4288 'mssql2': MSSQL2Adapter, 4289 'db2': DB2Adapter, 4290 'teradata': TeradataAdapter, 4291 'informix': InformixAdapter, 4292 'firebird': FireBirdAdapter, 4293 'firebird_embedded': FireBirdAdapter, 4294 'ingres': IngresAdapter, 4295 'ingresu': IngresUnicodeAdapter, 4296 'sapdb': SAPDBAdapter, 4297 'cubrid': CubridAdapter, 4298 'jdbc:sqlite': JDBCSQLiteAdapter, 4299 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 4300 'jdbc:postgres': JDBCPostgreSQLAdapter, 4301 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 4302 'google:datastore': GoogleDatastoreAdapter, 4303 'google:sql': GoogleSQLAdapter, 4304 'couchdb': CouchDBAdapter, 4305 'mongodb': MongoDBAdapter, 4306 } 4307 4308
4309 -def sqlhtml_validators(field):
4310 """ 4311 Field type validation, using web2py's validators mechanism. 4312 4313 makes sure the content of a field is in line with the declared 4314 fieldtype 4315 """ 4316 if not have_validators: 4317 return [] 4318 field_type, field_length = field.type, field.length 4319 if isinstance(field_type, SQLCustomType): 4320 if hasattr(field_type, 'validator'): 4321 return field_type.validator 4322 else: 4323 field_type = field_type.type 4324 elif not isinstance(field_type,str): 4325 return [] 4326 requires=[] 4327 def ff(r,id): 4328 row=r(id) 4329 if not row: 4330 return id 4331 elif hasattr(r, '_format') and isinstance(r._format,str): 4332 return r._format % row 4333 elif hasattr(r, '_format') and callable(r._format): 4334 return r._format(row) 4335 else: 4336 return id
4337 if field_type == 'string': 4338 requires.append(validators.IS_LENGTH(field_length)) 4339 elif field_type == 'text': 4340 requires.append(validators.IS_LENGTH(field_length)) 4341 elif field_type == 'password': 4342 requires.append(validators.IS_LENGTH(field_length)) 4343 elif field_type == 'double': 4344 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 4345 elif field_type == 'integer': 4346 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 4347 elif field_type.startswith('decimal'): 4348 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 4349 elif field_type == 'date': 4350 requires.append(validators.IS_DATE()) 4351 elif field_type == 'time': 4352 requires.append(validators.IS_TIME()) 4353 elif field_type == 'datetime': 4354 requires.append(validators.IS_DATETIME()) 4355 elif field.db and field_type.startswith('reference') and \ 4356 field_type.find('.') < 0 and \ 4357 field_type[10:] in field.db.tables: 4358 referenced = field.db[field_type[10:]] 4359 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 4360 field.represent = field.represent or repr_ref 4361 if hasattr(referenced, '_format') and referenced._format: 4362 requires = validators.IS_IN_DB(field.db,referenced._id, 4363 referenced._format) 4364 if field.unique: 4365 requires._and = validators.IS_NOT_IN_DB(field.db,field) 4366 if field.tablename == field_type[10:]: 4367 return validators.IS_EMPTY_OR(requires) 4368 return requires 4369 elif field.db and field_type.startswith('list:reference') and \ 4370 field_type.find('.') < 0 and \ 4371 field_type[15:] in field.db.tables: 4372 referenced = field.db[field_type[15:]] 4373 def list_ref_repr(ids, row=None, r=referenced, f=ff): 4374 if not ids: 4375 return None 4376 refs = r._db(r._id.belongs(ids)).select(r._id) 4377 return (refs and ', '.join(str(f(r,ref.id)) for ref in refs) or '') 4378 field.represent = field.represent or list_ref_repr 4379 if hasattr(referenced, '_format') and referenced._format: 4380 requires = validators.IS_IN_DB(field.db,referenced._id, 4381 referenced._format,multiple=True) 4382 else: 4383 requires = validators.IS_IN_DB(field.db,referenced._id, 4384 multiple=True) 4385 if field.unique: 4386 requires._and = validators.IS_NOT_IN_DB(field.db,field) 4387 return requires 4388 elif field_type.startswith('list:'): 4389 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 4390 field.represent = field.represent or repr_list 4391 if field.unique: 4392 requires.insert(0,validators.IS_NOT_IN_DB(field.db,field)) 4393 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 4394 if field.notnull and not field_type[:2] in sff: 4395 requires.insert(0, validators.IS_NOT_EMPTY()) 4396 elif not field.notnull and field_type[:2] in sff and requires: 4397 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 4398 return requires 4399 4400
4401 -def bar_escape(item):
4402 return str(item).replace('|', '||')
4403
4404 -def bar_encode(items):
4405 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
4406
4407 -def bar_decode_integer(value):
4408 return [int(x) for x in value.split('|') if x.strip()]
4409
4410 -def bar_decode_string(value):
4411 return [x.replace('||', '|') for x in string_unpack.split(value[1:-1]) if x.strip()]
4412 4413
4414 -class Row(dict):
4415 4416 """ 4417 a dictionary that lets you do d['a'] as well as d.a 4418 this is only used to store a Row 4419 """ 4420
4421 - def __getitem__(self, key):
4422 key=str(key) 4423 m = table_field.match(key) 4424 if key in self.get('_extra',{}): 4425 return self._extra[key] 4426 elif m: 4427 try: 4428 return dict.__getitem__(self, m.group(1))[m.group(2)] 4429 except (KeyError,TypeError): 4430 key = m.group(2) 4431 return dict.__getitem__(self, key)
4432
4433 - def __call__(self,key):
4434 return self.__getitem__(key)
4435
4436 - def __setitem__(self, key, value):
4437 dict.__setitem__(self, str(key), value)
4438
4439 - def __getattr__(self, key):
4440 return self[key]
4441
4442 - def __setattr__(self, key, value):
4443 self[key] = value
4444
4445 - def __repr__(self):
4446 return '<Row ' + dict.__repr__(self) + '>'
4447
4448 - def __int__(self):
4449 return dict.__getitem__(self,'id')
4450
4451 - def __eq__(self,other):
4452 try: 4453 return self.as_dict() == other.as_dict() 4454 except AttributeError: 4455 return False
4456
4457 - def __ne__(self,other):
4458 return not (self == other)
4459
4460 - def __copy__(self):
4461 return Row(dict(self))
4462
4463 - def as_dict(self,datetime_to_str=False):
4464 SERIALIZABLE_TYPES = (str,unicode,int,long,float,bool,list) 4465 d = dict(self) 4466 for k in copy.copy(d.keys()): 4467 v=d[k] 4468 if d[k] is None: 4469 continue 4470 elif isinstance(v,Row): 4471 d[k]=v.as_dict() 4472 elif isinstance(v,Reference): 4473 d[k]=int(v) 4474 elif isinstance(v,decimal.Decimal): 4475 d[k]=float(v) 4476 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 4477 if datetime_to_str: 4478 d[k] = v.isoformat().replace('T',' ')[:19] 4479 elif not isinstance(v,SERIALIZABLE_TYPES): 4480 del d[k] 4481 return d
4482 4483
4484 -def Row_unpickler(data):
4485 return Row(cPickle.loads(data))
4486
4487 -def Row_pickler(data):
4488 return Row_unpickler, (cPickle.dumps(data.as_dict(datetime_to_str=False)),)
4489 4490 copy_reg.pickle(Row, Row_pickler, Row_unpickler) 4491 4492 4493 ################################################################################ 4494 # Everything below should be independent on the specifics of the 4495 # database and should for RDBMs and some NoSQL databases 4496 ################################################################################ 4497
4498 -class SQLCallableList(list):
4499 - def __call__(self):
4500 return copy.copy(self)
4501
4502 -def smart_query(fields,text):
4503 if not isinstance(fields,(list,tuple)): 4504 fields = [fields] 4505 new_fields = [] 4506 for field in fields: 4507 if isinstance(field,Field): 4508 new_fields.append(field) 4509 elif isinstance(field,Table): 4510 for ofield in field: 4511 new_fields.append(ofield) 4512 else: 4513 raise RuntimeError, "fields must be a list of fields" 4514 fields = new_fields 4515 field_map = {} 4516 for field in fields: 4517 n = field.name.lower() 4518 if not n in field_map: 4519 field_map[n] = field 4520 n = str(field).lower() 4521 if not n in field_map: 4522 field_map[n] = field 4523 re_constants = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 4524 constants = {} 4525 i = 0 4526 while True: 4527 m = re_constants.search(text) 4528 if not m: break 4529 text = text[:m.start()]+('#%i' % i)+text[m.end():] 4530 constants[str(i)] = m.group()[1:-1] 4531 i+=1 4532 text = re.sub('\s+',' ',text).lower() 4533 for a,b in [('&','and'), 4534 ('|','or'), 4535 ('~','not'), 4536 ('==','=='), 4537 ('<','<'), 4538 ('>','>'), 4539 ('<=','<='), 4540 ('>=','>='), 4541 ('<>','!='), 4542 ('=<','<='), 4543 ('=>','>='), 4544 ('=','=='), 4545 (' less or equal than ','<='), 4546 (' greater or equal than ','>='), 4547 (' equal or less than ','<='), 4548 (' equal or greater than ','>='), 4549 (' less or equal ','<='), 4550 (' greater or equal ','>='), 4551 (' equal or less ','<='), 4552 (' equal or greater ','>='), 4553 (' not equal to ','!='), 4554 (' not equal ','!='), 4555 (' equal to ','=='), 4556 (' equal ','=='), 4557 (' equals ','!='), 4558 (' less than ','<'), 4559 (' greater than ','>'), 4560 (' starts with ','startswith'), 4561 (' ends with ','endswith'), 4562 (' is ','==')]: 4563 if a[0]==' ': 4564 text = text.replace(' is'+a,' %s ' % b) 4565 text = text.replace(a,' %s ' % b) 4566 text = re.sub('\s+',' ',text).lower() 4567 query = field = neg = op = logic = None 4568 for item in text.split(): 4569 if field is None: 4570 if item == 'not': 4571 neg = True 4572 elif not neg and not logic and item in ('and','or'): 4573 logic = item 4574 elif item in field_map: 4575 field = field_map[item] 4576 else: 4577 raise RuntimeError, "Invalid syntax" 4578 elif not field is None and op is None: 4579 op = item 4580 elif not op is None: 4581 if item.startswith('#'): 4582 if not item[1:] in constants: 4583 raise RuntimeError, "Invalid syntax" 4584 value = constants[item[1:]] 4585 else: 4586 value = item 4587 if op == '==': op = 'like' 4588 if op == '==': new_query = field==value 4589 elif op == '<': new_query = field<value 4590 elif op == '>': new_query = field>value 4591 elif op == '<=': new_query = field<=value 4592 elif op == '>=': new_query = field>=value 4593 elif field.type in ('text','string'): 4594 if op == 'contains': new_query = field.contains(value) 4595 elif op == 'like': new_query = field.like(value) 4596 elif op == 'startswith': new_query = field.startswith(value) 4597 elif op == 'endswith': new_query = field.endswith(value) 4598 else: raise RuntimeError, "Invalid operation" 4599 else: raise RuntimeError, "Invalid operation" 4600 if neg: new_query = ~new_query 4601 if query is None: 4602 query = new_query 4603 elif logic == 'and': 4604 query &= new_query 4605 elif logic == 'or': 4606 query |= new_query 4607 field = op = neg = logic = None 4608 return query
4609 4610
4611 -class DAL(dict):
4612 4613 """ 4614 an instance of this class represents a database connection 4615 4616 Example:: 4617 4618 db = DAL('sqlite://test.db') 4619 db.define_table('tablename', Field('fieldname1'), 4620 Field('fieldname2')) 4621 """ 4622 4623 @staticmethod
4624 - def set_folder(folder):
4625 """ 4626 # ## this allows gluon to set a folder for this thread 4627 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 4628 """ 4629 BaseAdapter.set_folder(folder)
4630 4631 @staticmethod
4632 - def distributed_transaction_begin(*instances):
4633 if not instances: 4634 return 4635 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 4636 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 4637 instances = enumerate(instances) 4638 for (i, db) in instances: 4639 if not db._adapter.support_distributed_transaction(): 4640 raise SyntaxError, \ 4641 'distributed transaction not suported by %s' % db._dbname 4642 for (i, db) in instances: 4643 db._adapter.distributed_transaction_begin(keys[i])
4644 4645 @staticmethod
4646 - def distributed_transaction_commit(*instances):
4647 if not instances: 4648 return 4649 instances = enumerate(instances) 4650 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 4651 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 4652 for (i, db) in instances: 4653 if not db._adapter.support_distributed_transaction(): 4654 raise SyntaxError, \ 4655 'distributed transaction not suported by %s' % db._dbanme 4656 try: 4657 for (i, db) in instances: 4658 db._adapter.prepare(keys[i]) 4659 except: 4660 for (i, db) in instances: 4661 db._adapter.rollback_prepared(keys[i]) 4662 raise RuntimeError, 'failure to commit distributed transaction' 4663 else: 4664 for (i, db) in instances: 4665 db._adapter.commit_prepared(keys[i]) 4666 return
4667 4668
4669 - def __init__(self, uri='sqlite://dummy.db', 4670 pool_size=0, folder=None, 4671 db_codec='UTF-8', check_reserved=None, 4672 migrate=True, fake_migrate=False, 4673 migrate_enabled=True, fake_migrate_all=False, 4674 decode_credentials=False, driver_args=None, 4675 adapter_args=None, attempts=5, auto_import=False):
4676 """ 4677 Creates a new Database Abstraction Layer instance. 4678 4679 Keyword arguments: 4680 4681 :uri: string that contains information for connecting to a database. 4682 (default: 'sqlite://dummy.db') 4683 :pool_size: How many open connections to make to the database object. 4684 :folder: <please update me> 4685 :db_codec: string encoding of the database (default: 'UTF-8') 4686 :check_reserved: list of adapters to check tablenames and column names 4687 against sql reserved keywords. (Default None) 4688 4689 * 'common' List of sql keywords that are common to all database types 4690 such as "SELECT, INSERT". (recommended) 4691 * 'all' Checks against all known SQL keywords. (not recommended) 4692 <adaptername> Checks against the specific adapters list of keywords 4693 (recommended) 4694 * '<adaptername>_nonreserved' Checks against the specific adapters 4695 list of nonreserved keywords. (if available) 4696 :migrate (defaults to True) sets default migrate behavior for all tables 4697 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 4698 :migrate_enabled (defaults to True). If set to False disables ALL migrations 4699 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 4700 :attempts (defaults to 5). Number of times to attempt connecting 4701 """ 4702 if not decode_credentials: 4703 credential_decoder = lambda cred: cred 4704 else: 4705 credential_decoder = lambda cred: urllib.unquote(cred) 4706 if folder: 4707 self.set_folder(folder) 4708 self._uri = uri 4709 self._pool_size = pool_size 4710 self._db_codec = db_codec 4711 self._lastsql = '' 4712 self._timings = [] 4713 self._pending_references = {} 4714 self._request_tenant = 'request_tenant' 4715 self._common_fields = [] 4716 self._referee_name = '%(table)s' 4717 if not str(attempts).isdigit() or attempts < 0: 4718 attempts = 5 4719 if uri: 4720 uris = isinstance(uri,(list,tuple)) and uri or [uri] 4721 error = '' 4722 connected = False 4723 for k in range(attempts): 4724 for uri in uris: 4725 try: 4726 if is_jdbc and not uri.startswith('jdbc:'): 4727 uri = 'jdbc:'+uri 4728 self._dbname = regex_dbname.match(uri).group() 4729 if not self._dbname in ADAPTERS: 4730 raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname 4731 # notice that driver args or {} else driver_args 4732 # defaults to {} global, not correct 4733 args = (self,uri,pool_size,folder, 4734 db_codec, credential_decoder, 4735 driver_args or {}, adapter_args or {}) 4736 self._adapter = ADAPTERS[self._dbname](*args) 4737 connected = True 4738 break 4739 except SyntaxError: 4740 raise 4741 except Exception, error: 4742 tb = traceback.format_exc() 4743 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 4744 if connected: 4745 break 4746 else: 4747 time.sleep(1) 4748 if not connected: 4749 raise RuntimeError, "Failure to connect, tried %d times:\n%s" % (attempts, tb) 4750 else: 4751 args = (self,'None',0,folder,db_codec) 4752 self._adapter = BaseAdapter(*args) 4753 migrate = fake_migrate = False 4754 adapter = self._adapter 4755 self._uri_hash = hashlib.md5(adapter.uri).hexdigest() 4756 self.tables = SQLCallableList() 4757 self.check_reserved = check_reserved 4758 if self.check_reserved: 4759 from reserved_sql_keywords import ADAPTERS as RSK 4760 self.RSK = RSK 4761 self._migrate = migrate 4762 self._fake_migrate = fake_migrate 4763 self._migrate_enabled = migrate_enabled 4764 self._fake_migrate_all = fake_migrate_all 4765 if auto_import: 4766 self.import_table_definitions(adapter.folder)
4767
4768 - def import_table_definitions(self,path,migrate=False,fake_migrate=False):
4769 pattern = os.path.join(path,self._uri_hash+'_*.table') 4770 for filename in glob.glob(pattern): 4771 tfile = self._adapter.file_open(filename, 'r') 4772 try: 4773 sql_fields = cPickle.load(tfile) 4774 name = filename[len(pattern)-7:-6] 4775 mf = [(value['sortable'],Field(key,type=value['type'])) \ 4776 for key, value in sql_fields.items()] 4777 mf.sort(lambda a,b: cmp(a[0],b[0])) 4778 self.define_table(name,*[item[1] for item in mf], 4779 **dict(migrate=migrate,fake_migrate=fake_migrate)) 4780 finally: 4781 self._adapter.file_close(tfile)
4782
4783 - def check_reserved_keyword(self, name):
4784 """ 4785 Validates ``name`` against SQL keywords 4786 Uses self.check_reserve which is a list of 4787 operators to use. 4788 self.check_reserved 4789 ['common', 'postgres', 'mysql'] 4790 self.check_reserved 4791 ['all'] 4792 """ 4793 for backend in self.check_reserved: 4794 if name.upper() in self.RSK[backend]: 4795 raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper())
4796
4797 - def __contains__(self, tablename):
4798 if self.has_key(tablename): 4799 return True 4800 else: 4801 return False
4802
4803 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
4804 """ 4805 EXAMPLE: 4806 4807 db.define_table('person',Field('name'),Field('info')) 4808 db.define_table('pet',Field('owner',db.person),Field('name'),Field('info')) 4809 4810 @request.restful() 4811 def index(): 4812 def GET(*args,**vars): 4813 patterns = [ 4814 "/friends[person]", 4815 "/{friend.name.startswith}", 4816 "/{friend.name}/:field", 4817 "/{friend.name}/pets[pet.owner]", 4818 "/{friend.name}/pet[pet.owner]/{pet.name}", 4819 "/{friend.name}/pet[pet.owner]/{pet.name}/:field" 4820 ] 4821 parser = db.parse_as_rest(patterns,args,vars) 4822 if parser.status == 200: 4823 return dict(content=parser.response) 4824 else: 4825 raise HTTP(parser.status,parser.error) 4826 def POST(table_name,**vars): 4827 if table_name == 'person': 4828 return db.person.validate_and_insert(**vars) 4829 elif table_name == 'pet': 4830 return db.pet.validate_and_insert(**vars) 4831 else: 4832 raise HTTP(400) 4833 return locals() 4834 """ 4835 4836 db = self 4837 re1 = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 4838 re2 = re.compile('^.+\[.+\]$') 4839 4840 def auto_table(table,base='',depth=0): 4841 patterns = [] 4842 for field in db[table].fields: 4843 if base: 4844 tag = '%s/%s' % (base,field.replace('_','-')) 4845 else: 4846 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 4847 f = db[table][field] 4848 if not f.readable: continue 4849 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 4850 tag += '/{%s.%s}' % (table,field) 4851 patterns.append(tag) 4852 patterns.append(tag+'/:field') 4853 elif f.type.startswith('boolean'): 4854 tag += '/{%s.%s}' % (table,field) 4855 patterns.append(tag) 4856 patterns.append(tag+'/:field') 4857 elif f.type.startswith('double') or f.type.startswith('integer'): 4858 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 4859 patterns.append(tag) 4860 patterns.append(tag+'/:field') 4861 elif f.type.startswith('list:'): 4862 tag += '/{%s.%s.contains}' % (table,field) 4863 patterns.append(tag) 4864 patterns.append(tag+'/:field') 4865 elif f.type in ('date','datetime'): 4866 tag+= '/{%s.%s.year}' % (table,field) 4867 patterns.append(tag) 4868 patterns.append(tag+'/:field') 4869 tag+='/{%s.%s.month}' % (table,field) 4870 patterns.append(tag) 4871 patterns.append(tag+'/:field') 4872 tag+='/{%s.%s.day}' % (table,field) 4873 patterns.append(tag) 4874 patterns.append(tag+'/:field') 4875 if f.type in ('datetime','time'): 4876 tag+= '/{%s.%s.hour}' % (table,field) 4877 patterns.append(tag) 4878 patterns.append(tag+'/:field') 4879 tag+='/{%s.%s.minute}' % (table,field) 4880 patterns.append(tag) 4881 patterns.append(tag+'/:field') 4882 tag+='/{%s.%s.second}' % (table,field) 4883 patterns.append(tag) 4884 patterns.append(tag+'/:field') 4885 if depth>0: 4886 for rtable,rfield in db[table]._referenced_by: 4887 tag+='/%s[%s.%s]' % (rtable,rtable,rfield) 4888 patterns.append(tag) 4889 patterns += auto_table(rtable,base=tag,depth=depth-1) 4890 return patterns
4891 4892 if patterns=='auto': 4893 patterns=[] 4894 for table in db.tables: 4895 if not table.startswith('auth_'): 4896 patterns.append('/%s[%s]' % (table,table)) 4897 patterns += auto_table(table,base='',depth=1) 4898 else: 4899 i = 0 4900 while i<len(patterns): 4901 pattern = patterns[i] 4902 tokens = pattern.split('/') 4903 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 4904 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 4905 '/'.join(tokens[:-1])) 4906 patterns = patterns[:i]+new_patterns+patterns[i+1:] 4907 i += len(new_patterns) 4908 else: 4909 i += 1 4910 if '/'.join(args) == 'patterns': 4911 return Row({'status':200,'pattern':'list', 4912 'error':None,'response':patterns}) 4913 for pattern in patterns: 4914 otable=table=None 4915 if not isinstance(queries,dict): 4916 dbset=db(queries) 4917 i=0 4918 tags = pattern[1:].split('/') 4919 if len(tags)!=len(args): 4920 continue 4921 for tag in tags: 4922 if re1.match(tag): 4923 # print 're1:'+tag 4924 tokens = tag[1:-1].split('.') 4925 table, field = tokens[0], tokens[1] 4926 if not otable or table == otable: 4927 if len(tokens)==2 or tokens[2]=='eq': 4928 query = db[table][field]==args[i] 4929 elif tokens[2]=='ne': 4930 query = db[table][field]!=args[i] 4931 elif tokens[2]=='lt': 4932 query = db[table][field]<args[i] 4933 elif tokens[2]=='gt': 4934 query = db[table][field]>args[i] 4935 elif tokens[2]=='ge': 4936 query = db[table][field]>=args[i] 4937 elif tokens[2]=='le': 4938 query = db[table][field]<=args[i] 4939 elif tokens[2]=='year': 4940 query = db[table][field].year()==args[i] 4941 elif tokens[2]=='month': 4942 query = db[table][field].month()==args[i] 4943 elif tokens[2]=='day': 4944 query = db[table][field].day()==args[i] 4945 elif tokens[2]=='hour': 4946 query = db[table][field].hour()==args[i] 4947 elif tokens[2]=='minute': 4948 query = db[table][field].minutes()==args[i] 4949 elif tokens[2]=='second': 4950 query = db[table][field].seconds()==args[i] 4951 elif tokens[2]=='startswith': 4952 query = db[table][field].startswith(args[i]) 4953 elif tokens[2]=='contains': 4954 query = db[table][field].contains(args[i]) 4955 else: 4956 raise RuntimeError, "invalid pattern: %s" % pattern 4957 if len(tokens)==4 and tokens[3]=='not': 4958 query = ~query 4959 elif len(tokens)>=4: 4960 raise RuntimeError, "invalid pattern: %s" % pattern 4961 if not otable and isinstance(queries,dict): 4962 dbset = db(queries[table]) 4963 dbset=dbset(query) 4964 else: 4965 raise RuntimeError, "missing relation in pattern: %s" % pattern 4966 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 4967 ref = tag[tag.find('[')+1:-1] 4968 if '.' in ref and otable: 4969 table,field = ref.split('.') 4970 # print table,field 4971 if nested_select: 4972 try: 4973 dbset=db(db[table][field].belongs(dbset._select(db[otable]._id))) 4974 except ValueError: 4975 return Row({'status':400,'pattern':pattern, 4976 'error':'invalid path','response':None}) 4977 else: 4978 items = [item.id for item in dbset.select(db[otable]._id)] 4979 dbset=db(db[table][field].belongs(items)) 4980 else: 4981 table = ref 4982 if not otable and isinstance(queries,dict): 4983 dbset = db(queries[table]) 4984 dbset=dbset(db[table]) 4985 elif tag==':field' and table: 4986 # # print 're3:'+tag 4987 field = args[i] 4988 if not field in db[table]: break 4989 try: 4990 item = dbset.select(db[table][field],limitby=(0,1)).first() 4991 except ValueError: 4992 return Row({'status':400,'pattern':pattern, 4993 'error':'invalid path','response':None}) 4994 if not item: 4995 return Row({'status':404,'pattern':pattern, 4996 'error':'record not found','response':None}) 4997 else: 4998 return Row({'status':200,'response':item[field], 4999 'pattern':pattern}) 5000 elif tag != args[i]: 5001 break 5002 otable = table 5003 i += 1 5004 if i==len(tags) and table: 5005 ofields = vars.get('order',db[table]._id.name).split('|') 5006 try: 5007 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 5008 except KeyError: 5009 return Row({'status':400,'error':'invalid orderby','response':None}) 5010 fields = [field for field in db[table] if field.readable] 5011 count = dbset.count() 5012 try: 5013 offset = int(vars.get('offset',None) or 0) 5014 limits = (offset,int(vars.get('limit',None) or 1000)+offset) 5015 except ValueError: 5016 Row({'status':400,'error':'invalid limits','response':None}) 5017 if count > limits[1]-limits[0]: 5018 Row({'status':400,'error':'too many records','response':None}) 5019 try: 5020 response = dbset.select(limitby=limits,orderby=orderby,*fields) 5021 except ValueError: 5022 return Row({'status':400,'pattern':pattern, 5023 'error':'invalid path','response':None}) 5024 return Row({'status':200,'response':response,'pattern':pattern}) 5025 return Row({'status':400,'error':'no matching pattern','response':None})
5026 5027
5028 - def define_table( 5029 self, 5030 tablename, 5031 *fields, 5032 **args 5033 ):
5034 5035 for key in args: 5036 if key not in [ 5037 'migrate', 5038 'primarykey', 5039 'fake_migrate', 5040 'format', 5041 'singular', 5042 'plural', 5043 'trigger_name', 5044 'sequence_name', 5045 'common_filter', 5046 'polymodel', 5047 'table_class']: 5048 raise SyntaxError, 'invalid table "%s" attribute: %s' \ 5049 % (tablename, key) 5050 if not isinstance(tablename,str): 5051 raise SyntaxError, "missing table name" 5052 tablename = cleanup(tablename) 5053 migrate = self._migrate_enabled and args.get('migrate', 5054 self._migrate) 5055 fake_migrate = self._fake_migrate_all or args.get('fake_migrate', 5056 self._fake_migrate) 5057 table_class = args.get('table_class',Table) 5058 format = args.get('format',None) 5059 trigger_name = args.get('trigger_name', None) 5060 sequence_name = args.get('sequence_name', None) 5061 primarykey =args.get('primarykey',None) 5062 polymodel = args.get('polymodel',None) 5063 singular = args.get('singular',tablename.replace('_',' ').capitalize()) 5064 plural = args.get('plural',pluralize(singular.lower()).capitalize()) 5065 lowertablename = tablename.lower() 5066 5067 if tablename.startswith('_') or hasattr(self,lowertablename) or \ 5068 regex_python_keywords.match(tablename): 5069 raise SyntaxError, 'invalid table name: %s' % tablename 5070 elif lowertablename in self.tables: 5071 raise SyntaxError, 'table already defined: %s' % tablename 5072 elif self.check_reserved: 5073 self.check_reserved_keyword(tablename) 5074 5075 if self._common_fields: 5076 fields = [f for f in fields] + [f for f in self._common_fields] 5077 5078 common_filter = args.get('common_filter', None) 5079 5080 t = self[tablename] = table_class(self, tablename, *fields, 5081 **dict(primarykey=primarykey, 5082 trigger_name=trigger_name, 5083 sequence_name=sequence_name, 5084 common_filter=common_filter)) 5085 5086 # db magic 5087 if self._uri in (None,'None'): 5088 return t 5089 5090 t._create_references() 5091 5092 if migrate or self._adapter.dbengine=='google:datastore': 5093 try: 5094 sql_locker.acquire() 5095 self._adapter.create_table(t,migrate=migrate, 5096 fake_migrate=fake_migrate, 5097 polymodel=polymodel) 5098 finally: 5099 sql_locker.release() 5100 else: 5101 t._dbt = None 5102 self.tables.append(tablename) 5103 t._format = format 5104 t._singular = singular 5105 t._plural = plural 5106 return t
5107
5108 - def __iter__(self):
5109 for tablename in self.tables: 5110 yield self[tablename]
5111
5112 - def __getitem__(self, key):
5113 return dict.__getitem__(self, str(key))
5114
5115 - def __setitem__(self, key, value):
5116 dict.__setitem__(self, str(key), value)
5117
5118 - def __getattr__(self, key):
5119 return self[key]
5120
5121 - def __setattr__(self, key, value):
5122 if key[:1]!='_' and key in self: 5123 raise SyntaxError, \ 5124 'Object %s exists and cannot be redefined' % key 5125 self[key] = value
5126
5127 - def __repr__(self):
5128 return '<DAL ' + dict.__repr__(self) + '>'
5129
5130 - def smart_query(self,fields,text):
5131 return Set(self, smart_query(fields,text))
5132
5133 - def __call__(self, query=None, ignore_common_filters=None):
5134 if isinstance(query,Table): 5135 query = query._id>0 5136 elif isinstance(query,Field): 5137 query = query!=None 5138 return Set(self, query, ignore_common_filters=ignore_common_filters)
5139
5140 - def commit(self):
5141 self._adapter.commit()
5142
5143 - def rollback(self):
5144 self._adapter.rollback()
5145
5146 - def executesql(self, query, placeholders=None, as_dict=False):
5147 """ 5148 placeholders is optional and will always be None when using DAL. 5149 If using raw SQL with placeholders, placeholders may be 5150 a sequence of values to be substituted in 5151 or, (if supported by the DB driver), a dictionary with keys 5152 matching named placeholders in your SQL. 5153 5154 Added 2009-12-05 "as_dict" optional argument. Will always be 5155 None when using DAL. If using raw SQL can be set to True 5156 and the results cursor returned by the DB driver will be 5157 converted to a sequence of dictionaries keyed with the db 5158 field names. Tested with SQLite but should work with any database 5159 since the cursor.description used to get field names is part of the 5160 Python dbi 2.0 specs. Results returned with as_dict=True are 5161 the same as those returned when applying .to_list() to a DAL query. 5162 5163 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 5164 5165 --bmeredyk 5166 """ 5167 if placeholders: 5168 self._adapter.execute(query, placeholders) 5169 else: 5170 self._adapter.execute(query) 5171 if as_dict: 5172 if not hasattr(self._adapter.cursor,'description'): 5173 raise RuntimeError, "database does not support executesql(...,as_dict=True)" 5174 # Non-DAL legacy db query, converts cursor results to dict. 5175 # sequence of 7-item sequences. each sequence tells about a column. 5176 # first item is always the field name according to Python Database API specs 5177 columns = self._adapter.cursor.description 5178 # reduce the column info down to just the field names 5179 fields = [f[0] for f in columns] 5180 # will hold our finished resultset in a list 5181 data = self._adapter.cursor.fetchall() 5182 # convert the list for each row into a dictionary so it's 5183 # easier to work with. row['field_name'] rather than row[0] 5184 return [dict(zip(fields,row)) for row in data] 5185 # see if any results returned from database 5186 try: 5187 return self._adapter.cursor.fetchall() 5188 except: 5189 return None
5190
5191 - def _update_referenced_by(self, other):
5192 for tablename in self.tables: 5193 by = self[tablename]._referenced_by 5194 by[:] = [item for item in by if not item[0] == other]
5195
5196 - def export_to_csv_file(self, ofile, *args, **kwargs):
5197 step = int(kwargs.get('max_fetch_rows,',500)) 5198 write_colnames = kwargs['write_colnames'] = \ 5199 kwargs.get("write_colnames", True) 5200 for table in self.tables: 5201 ofile.write('TABLE %s\r\n' % table) 5202 query = self[table]._id > 0 5203 nrows = self(query).count() 5204 kwargs['write_colnames'] = write_colnames 5205 for k in range(0,nrows,step): 5206 self(query).select(limitby=(k,k+step)).export_to_csv_file( 5207 ofile, *args, **kwargs) 5208 kwargs['write_colnames'] = False 5209 ofile.write('\r\n\r\n') 5210 ofile.write('END')
5211
5212 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 5213 unique='uuid', *args, **kwargs):
5214 if id_map is None: id_map={} 5215 for line in ifile: 5216 line = line.strip() 5217 if not line: 5218 continue 5219 elif line == 'END': 5220 return 5221 elif not line.startswith('TABLE ') or not line[6:] in self.tables: 5222 raise SyntaxError, 'invalid file format' 5223 else: 5224 tablename = line[6:] 5225 self[tablename].import_from_csv_file(ifile, id_map, null, 5226 unique, *args, **kwargs)
5227 5228
5229 -class SQLALL(object):
5230 """ 5231 Helper class providing a comma-separated string having all the field names 5232 (prefixed by table name and '.') 5233 5234 normally only called from within gluon.sql 5235 """ 5236
5237 - def __init__(self, table):
5238 self.table = table
5239
5240 - def __str__(self):
5241 return ', '.join([str(field) for field in self.table])
5242 5243
5244 -class Reference(int):
5245
5246 - def __allocate(self):
5247 if not self._record: 5248 self._record = self._table[int(self)] 5249 if not self._record: 5250 raise RuntimeError, "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self))
5251
5252 - def __getattr__(self, key):
5253 if key == 'id': 5254 return int(self) 5255 self.__allocate() 5256 return self._record.get(key, None)
5257
5258 - def get(self, key):
5259 return self.__getattr__(key)
5260
5261 - def __setattr__(self, key, value):
5262 if key.startswith('_'): 5263 int.__setattr__(self, key, value) 5264 return 5265 self.__allocate() 5266 self._record[key] = value
5267
5268 - def __getitem__(self, key):
5269 if key == 'id': 5270 return int(self) 5271 self.__allocate() 5272 return self._record.get(key, None)
5273
5274 - def __setitem__(self,key,value):
5275 self.__allocate() 5276 self._record[key] = value
5277 5278
5279 -def Reference_unpickler(data):
5280 return marshal.loads(data)
5281
5282 -def Reference_pickler(data):
5283 try: 5284 marshal_dump = marshal.dumps(int(data)) 5285 except AttributeError: 5286 marshal_dump = 'i%s' % struct.pack('<i', int(data)) 5287 return (Reference_unpickler, (marshal_dump,))
5288 5289 copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler) 5290 5291
5292 -class Table(dict):
5293 5294 """ 5295 an instance of this class represents a database table 5296 5297 Example:: 5298 5299 db = DAL(...) 5300 db.define_table('users', Field('name')) 5301 db.users.insert(name='me') # print db.users._insert(...) to see SQL 5302 db.users.drop() 5303 """ 5304
5305 - def __init__( 5306 self, 5307 db, 5308 tablename, 5309 *fields, 5310 **args 5311 ):
5312 """ 5313 Initializes the table and performs checking on the provided fields. 5314 5315 Each table will have automatically an 'id'. 5316 5317 If a field is of type Table, the fields (excluding 'id') from that table 5318 will be used instead. 5319 5320 :raises SyntaxError: when a supplied field is of incorrect type. 5321 """ 5322 self._tablename = tablename 5323 self._sequence_name = args.get('sequence_name',None) or \ 5324 db and db._adapter.sequence_name(tablename) 5325 self._trigger_name = args.get('trigger_name',None) or \ 5326 db and db._adapter.trigger_name(tablename) 5327 self._common_filter = args.get('common_filter', None) 5328 primarykey = args.get('primarykey', None) 5329 fieldnames,newfields=set(),[] 5330 if primarykey: 5331 if not isinstance(primarykey,list): 5332 raise SyntaxError, \ 5333 "primarykey must be a list of fields from table '%s'" \ 5334 % tablename 5335 self._primarykey = primarykey 5336 elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: 5337 field = Field('id', 'id') 5338 newfields.append(field) 5339 fieldnames.add('id') 5340 self._id = field 5341 for field in fields: 5342 if not isinstance(field, (Field, Table)): 5343 raise SyntaxError, \ 5344 'define_table argument is not a Field or Table: %s' % field 5345 elif isinstance(field, Field) and not field.name in fieldnames: 5346 if hasattr(field, '_db'): 5347 field = copy.copy(field) 5348 newfields.append(field) 5349 fieldnames.add(field.name) 5350 if field.type=='id': 5351 self._id = field 5352 elif isinstance(field, Table): 5353 table = field 5354 for field in table: 5355 if not field.name in fieldnames and not field.type=='id': 5356 newfields.append(copy.copy(field)) 5357 fieldnames.add(field.name) 5358 else: 5359 # let's ignore new fields with duplicated names!!! 5360 pass 5361 fields = newfields 5362 self._db = db 5363 tablename = tablename 5364 self.fields = SQLCallableList() 5365 self.virtualfields = [] 5366 fields = list(fields) 5367 5368 if db and self._db._adapter.uploads_in_blob==True: 5369 for field in fields: 5370 if isinstance(field, Field) and field.type == 'upload'\ 5371 and field.uploadfield is True: 5372 tmp = field.uploadfield = '%s_blob' % field.name 5373 fields.append(self._db.Field(tmp, 'blob', default='')) 5374 5375 lower_fieldnames = set() 5376 reserved = dir(Table) + ['fields'] 5377 for field in fields: 5378 if db and db.check_reserved: 5379 db.check_reserved_keyword(field.name) 5380 elif field.name in reserved: 5381 raise SyntaxError, "field name %s not allowed" % field.name 5382 5383 if field.name.lower() in lower_fieldnames: 5384 raise SyntaxError, "duplicate field %s in table %s" \ 5385 % (field.name, tablename) 5386 else: 5387 lower_fieldnames.add(field.name.lower()) 5388 5389 self.fields.append(field.name) 5390 self[field.name] = field 5391 if field.type == 'id': 5392 self['id'] = field 5393 field.tablename = field._tablename = tablename 5394 field.table = field._table = self 5395 field.db = field._db = self._db 5396 if self._db and not field.type in ('text','blob') and \ 5397 self._db._adapter.maxcharlength < field.length: 5398 field.length = self._db._adapter.maxcharlength 5399 if field.requires is DEFAULT: 5400 field.requires = sqlhtml_validators(field) 5401 self.ALL = SQLALL(self) 5402 5403 if hasattr(self,'_primarykey'): 5404 for k in self._primarykey: 5405 if k not in self.fields: 5406 raise SyntaxError, \ 5407 "primarykey must be a list of fields from table '%s " % tablename 5408 else: 5409 self[k].notnull = True
5410
5411 - def update(self,*args,**kwargs):
5412 raise RuntimeError, "Syntax Not Supported"
5413
5414 - def _validate(self,**vars):
5415 errors = Row() 5416 for key,value in vars.items(): 5417 value,error = self[key].validate(value) 5418 if error: 5419 errors[key] = error 5420 return errors
5421
5422 - def _create_references(self):
5423 pr = self._db._pending_references 5424 self._referenced_by = [] 5425 for fieldname in self.fields: 5426 field=self[fieldname] 5427 if isinstance(field.type,str) and field.type[:10] == 'reference ': 5428 ref = field.type[10:].strip() 5429 if not ref.split(): 5430 raise SyntaxError, 'Table: reference to nothing: %s' %ref 5431 refs = ref.split('.') 5432 rtablename = refs[0] 5433 if not rtablename in self._db: 5434 pr[rtablename] = pr.get(rtablename,[]) + [field] 5435 continue 5436 rtable = self._db[rtablename] 5437 if len(refs)==2: 5438 rfieldname = refs[1] 5439 if not hasattr(rtable,'_primarykey'): 5440 raise SyntaxError,\ 5441 'keyed tables can only reference other keyed tables (for now)' 5442 if rfieldname not in rtable.fields: 5443 raise SyntaxError,\ 5444 "invalid field '%s' for referenced table '%s' in table '%s'" \ 5445 % (rfieldname, rtablename, self._tablename) 5446 rtable._referenced_by.append((self._tablename, field.name)) 5447 for referee in pr.get(self._tablename,[]): 5448 self._referenced_by.append((referee._tablename,referee.name))
5449
5450 - def _filter_fields(self, record, id=False):
5451 return dict([(k, v) for (k, v) in record.items() if k 5452 in self.fields and (self[k].type!='id' or id)])
5453
5454 - def _build_query(self,key):
5455 """ for keyed table only """ 5456 query = None 5457 for k,v in key.iteritems(): 5458 if k in self._primarykey: 5459 if query: 5460 query = query & (self[k] == v) 5461 else: 5462 query = (self[k] == v) 5463 else: 5464 raise SyntaxError, \ 5465 'Field %s is not part of the primary key of %s' % \ 5466 (k,self._tablename) 5467 return query
5468
5469 - def __getitem__(self, key):
5470 if not key: 5471 return None 5472 elif isinstance(key, dict): 5473 """ for keyed table """ 5474 query = self._build_query(key) 5475 rows = self._db(query).select() 5476 if rows: 5477 return rows[0] 5478 return None 5479 elif str(key).isdigit(): 5480 return self._db(self._id == key).select(limitby=(0,1)).first() 5481 elif key: 5482 return dict.__getitem__(self, str(key))
5483
5484 - def __call__(self, key=DEFAULT, **kwargs):
5485 for_update = kwargs.get('_for_update',False) 5486 if '_for_update' in kwargs: del kwargs['_for_update'] 5487 if not key is DEFAULT: 5488 if isinstance(key, Query): 5489 record = self._db(key).select( 5490 limitby=(0,1),for_update=for_update).first() 5491 elif not str(key).isdigit(): 5492 record = None 5493 else: 5494 record = self._db(self._id == key).select( 5495 limitby=(0,1),for_update=for_update).first() 5496 if record: 5497 for k,v in kwargs.items(): 5498 if record[k]!=v: return None 5499 return record 5500 elif kwargs: 5501 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.items()]) 5502 return self._db(query).select(limitby=(0,1),for_update=for_update).first() 5503 else: 5504 return None
5505
5506 - def __setitem__(self, key, value):
5507 if isinstance(key, dict) and isinstance(value, dict): 5508 """ option for keyed table """ 5509 if set(key.keys()) == set(self._primarykey): 5510 value = self._filter_fields(value) 5511 kv = {} 5512 kv.update(value) 5513 kv.update(key) 5514 if not self.insert(**kv): 5515 query = self._build_query(key) 5516 self._db(query).update(**self._filter_fields(value)) 5517 else: 5518 raise SyntaxError,\ 5519 'key must have all fields from primary key: %s'%\ 5520 (self._primarykey) 5521 elif str(key).isdigit(): 5522 if key == 0: 5523 self.insert(**self._filter_fields(value)) 5524 elif not self._db(self._id == key)\ 5525 .update(**self._filter_fields(value)): 5526 raise SyntaxError, 'No such record: %s' % key 5527 else: 5528 if isinstance(key, dict): 5529 raise SyntaxError,\ 5530 'value must be a dictionary: %s' % value 5531 dict.__setitem__(self, str(key), value)
5532
5533 - def __delitem__(self, key):
5534 if isinstance(key, dict): 5535 query = self._build_query(key) 5536 if not self._db(query).delete(): 5537 raise SyntaxError, 'No such record: %s' % key 5538 elif not str(key).isdigit() or not self._db(self._id == key).delete(): 5539 raise SyntaxError, 'No such record: %s' % key
5540
5541 - def __getattr__(self, key):
5542 return self[key]
5543
5544 - def __setattr__(self, key, value):
5545 if key[:1]!='_' and key in self: 5546 raise SyntaxError, 'Object exists and cannot be redefined: %s' % key 5547 self[key] = value
5548
5549 - def __iter__(self):
5550 for fieldname in self.fields: 5551 yield self[fieldname]
5552
5553 - def __repr__(self):
5554 return '<Table ' + dict.__repr__(self) + '>'
5555
5556 - def __str__(self):
5557 if self.get('_ot', None): 5558 return '%s AS %s' % (self._ot, self._tablename) 5559 return self._tablename
5560
5561 - def _drop(self, mode = ''):
5562 return self._db._adapter._drop(self, mode)
5563
5564 - def drop(self, mode = ''):
5565 return self._db._adapter.drop(self,mode)
5566
5567 - def _listify(self,fields,update=False):
5568 new_fields = [] 5569 new_fields_names = [] 5570 for name in fields: 5571 if not name in self.fields: 5572 if name != 'id': 5573 raise SyntaxError, 'Field %s does not belong to the table' % name 5574 else: 5575 new_fields.append((self[name],fields[name])) 5576 new_fields_names.append(name) 5577 for ofield in self: 5578 if not ofield.name in new_fields_names: 5579 if not update and not ofield.default is None: 5580 new_fields.append((ofield,ofield.default)) 5581 elif update and not ofield.update is None: 5582 new_fields.append((ofield,ofield.update)) 5583 for ofield in self: 5584 if not ofield.name in new_fields_names and ofield.compute: 5585 try: 5586 new_fields.append((ofield,ofield.compute(Row(fields)))) 5587 except KeyError: 5588 pass 5589 if not update and ofield.required and not ofield.name in new_fields_names: 5590 raise SyntaxError,'Table: missing required field: %s' % ofield.name 5591 return new_fields
5592
5593 - def _insert(self, **fields):
5594 return self._db._adapter._insert(self,self._listify(fields))
5595
5596 - def insert(self, **fields):
5597 return self._db._adapter.insert(self,self._listify(fields))
5598
5599 - def validate_and_insert(self,**fields):
5600 response = Row() 5601 response.errors = Row() 5602 new_fields = copy.copy(fields) 5603 for key,value in fields.items(): 5604 value,error = self[key].validate(value) 5605 if error: 5606 response.errors[key] = error 5607 else: 5608 new_fields[key] = value 5609 if not response.errors: 5610 response.id = self.insert(**new_fields) 5611 else: 5612 response.id = None 5613 return response
5614
5615 - def update_or_insert(self, _key=DEFAULT, **values):
5616 if _key is DEFAULT: 5617 record = self(**values) 5618 else: 5619 record = self(_key) 5620 if record: 5621 record.update_record(**values) 5622 newid = None 5623 else: 5624 newid = self.insert(**values) 5625 return newid
5626
5627 - def bulk_insert(self, items):
5628 """ 5629 here items is a list of dictionaries 5630 """ 5631 items = [self._listify(item) for item in items] 5632 return self._db._adapter.bulk_insert(self,items)
5633
5634 - def _truncate(self, mode = None):
5635 return self._db._adapter._truncate(self, mode)
5636
5637 - def truncate(self, mode = None):
5638 return self._db._adapter.truncate(self, mode)
5639
5640 - def import_from_csv_file( 5641 self, 5642 csvfile, 5643 id_map=None, 5644 null='<NULL>', 5645 unique='uuid', 5646 *args, **kwargs 5647 ):
5648 """ 5649 import records from csv file. Column headers must have same names as 5650 table fields. field 'id' is ignored. If column names read 'table.file' 5651 the 'table.' prefix is ignored. 5652 'unique' argument is a field which must be unique 5653 (typically a uuid field) 5654 """ 5655 5656 delimiter = kwargs.get('delimiter', ',') 5657 quotechar = kwargs.get('quotechar', '"') 5658 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 5659 5660 reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) 5661 colnames = None 5662 if isinstance(id_map, dict): 5663 if not self._tablename in id_map: 5664 id_map[self._tablename] = {} 5665 id_map_self = id_map[self._tablename] 5666 5667 def fix(field, value, id_map): 5668 list_reference_s='list:reference' 5669 if value == null: 5670 value = None 5671 elif field.type=='blob': 5672 value = base64.b64decode(value) 5673 elif field.type=='double': 5674 if not value.strip(): 5675 value = None 5676 else: 5677 value = float(value) 5678 elif field.type=='integer': 5679 if not value.strip(): 5680 value = None 5681 else: 5682 value = int(value) 5683 elif field.type.startswith('list:string'): 5684 value = bar_decode_string(value) 5685 elif field.type.startswith(list_reference_s): 5686 ref_table = field.type[len(list_reference_s):].strip() 5687 value = [id_map[ref_table][int(v)] \ 5688 for v in bar_decode_string(value)] 5689 elif field.type.startswith('list:'): 5690 value = bar_decode_integer(value) 5691 elif id_map and field.type.startswith('reference'): 5692 try: 5693 value = id_map[field.type[9:].strip()][int(value)] 5694 except KeyError: 5695 pass 5696 return (field.name, value)
5697 5698 def is_id(colname): 5699 if colname in self: 5700 return self[colname].type == 'id' 5701 else: 5702 return False
5703 5704 for line in reader: 5705 if not line: 5706 break 5707 if not colnames: 5708 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 5709 cols, cid = [], [] 5710 for i,colname in enumerate(colnames): 5711 if is_id(colname): 5712 cid = i 5713 else: 5714 cols.append(i) 5715 if colname == unique: 5716 unique_idx = i 5717 else: 5718 items = [fix(self[colnames[i]], line[i], id_map) \ 5719 for i in cols if colnames[i] in self.fields] 5720 # Validation. Check for duplicate of 'unique' &, 5721 # if present, update instead of insert. 5722 if not unique or unique not in colnames: 5723 new_id = self.insert(**dict(items)) 5724 else: 5725 unique_value = line[unique_idx] 5726 query = self._db[self][unique] == unique_value 5727 record = self._db(query).select().first() 5728 if record: 5729 record.update_record(**dict(items)) 5730 new_id = record[self._id.name] 5731 else: 5732 new_id = self.insert(**dict(items)) 5733 if id_map and cid != []: 5734 id_map_self[int(line[cid])] = new_id 5735
5736 - def with_alias(self, alias):
5737 return self._db._adapter.alias(self,alias)
5738
5739 - def on(self, query):
5740 return Expression(self._db,self._db._adapter.ON,self,query)
5741 5742 5743
5744 -class Expression(object):
5745
5746 - def __init__( 5747 self, 5748 db, 5749 op, 5750 first=None, 5751 second=None, 5752 type=None, 5753 ):
5754 5755 self.db = db 5756 self.op = op 5757 self.first = first 5758 self.second = second 5759 ### self._tablename = first._tablename ## CHECK 5760 if not type and first and hasattr(first,'type'): 5761 self.type = first.type 5762 else: 5763 self.type = type
5764
5765 - def sum(self):
5766 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'SUM', self.type)
5767
5768 - def max(self):
5769 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MAX', self.type)
5770
5771 - def min(self):
5772 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MIN', self.type)
5773
5774 - def len(self):
5775 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
5776
5777 - def lower(self):
5778 return Expression(self.db, self.db._adapter.LOWER, self, None, self.type)
5779
5780 - def upper(self):
5781 return Expression(self.db, self.db._adapter.UPPER, self, None, self.type)
5782
5783 - def year(self):
5784 return Expression(self.db, self.db._adapter.EXTRACT, self, 'year', 'integer')
5785
5786 - def month(self):
5787 return Expression(self.db, self.db._adapter.EXTRACT, self, 'month', 'integer')
5788
5789 - def day(self):
5790 return Expression(self.db, self.db._adapter.EXTRACT, self, 'day', 'integer')
5791
5792 - def hour(self):
5793 return Expression(self.db, self.db._adapter.EXTRACT, self, 'hour', 'integer')
5794
5795 - def minutes(self):
5796 return Expression(self.db, self.db._adapter.EXTRACT, self, 'minute', 'integer')
5797
5798 - def coalesce(self,*others):
5799 return Expression(self.db, self.db._adapter.COALESCE, self, others, self.type)
5800
5801 - def coalesce_zero(self):
5802 return Expression(self.db, self.db._adapter.COALESCE_ZERO, self, None, self.type)
5803
5804 - def seconds(self):
5805 return Expression(self.db, self.db._adapter.EXTRACT, self, 'second', 'integer')
5806
5807 - def __getslice__(self, start, stop):
5808 if start < 0: 5809 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 5810 else: 5811 pos0 = start + 1 5812 5813 if stop < 0: 5814 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 5815 elif stop == sys.maxint: 5816 length = self.len() 5817 else: 5818 length = '(%s - %s)' % (stop + 1, pos0) 5819 return Expression(self.db,self.db._adapter.SUBSTRING, 5820 self, (pos0, length), self.type)
5821
5822 - def __getitem__(self, i):
5823 return self[i:i + 1]
5824
5825 - def __str__(self):
5826 return self.db._adapter.expand(self,self.type)
5827
5828 - def __or__(self, other): # for use in sortby
5829 return Expression(self.db,self.db._adapter.COMMA,self,other,self.type)
5830
5831 - def __invert__(self):
5832 if hasattr(self,'_op') and self.op == self.db._adapter.INVERT: 5833 return self.first 5834 return Expression(self.db,self.db._adapter.INVERT,self,type=self.type)
5835
5836 - def __add__(self, other):
5837 return Expression(self.db,self.db._adapter.ADD,self,other,self.type)
5838
5839 - def __sub__(self, other):
5840 if self.type == 'integer': 5841 result_type = 'integer' 5842 elif self.type in ['date','time','datetime','double']: 5843 result_type = 'double' 5844 else: 5845 raise SyntaxError, "subtraction operation not supported for type" 5846 return Expression(self.db,self.db._adapter.SUB,self,other, 5847 result_type)
5848 - def __mul__(self, other):
5849 return Expression(self.db,self.db._adapter.MUL,self,other,self.type)
5850
5851 - def __div__(self, other):
5852 return Expression(self.db,self.db._adapter.DIV,self,other,self.type)
5853
5854 - def __mod__(self, other):
5855 return Expression(self.db,self.db._adapter.MOD,self,other,self.type)
5856
5857 - def __eq__(self, value):
5858 return Query(self.db, self.db._adapter.EQ, self, value)
5859
5860 - def __ne__(self, value):
5861 return Query(self.db, self.db._adapter.NE, self, value)
5862
5863 - def __lt__(self, value):
5864 return Query(self.db, self.db._adapter.LT, self, value)
5865
5866 - def __le__(self, value):
5867 return Query(self.db, self.db._adapter.LE, self, value)
5868
5869 - def __gt__(self, value):
5870 return Query(self.db, self.db._adapter.GT, self, value)
5871
5872 - def __ge__(self, value):
5873 return Query(self.db, self.db._adapter.GE, self, value)
5874
5875 - def like(self, value):
5876 return Query(self.db, self.db._adapter.LIKE, self, value)
5877
5878 - def belongs(self, value):
5879 return Query(self.db, self.db._adapter.BELONGS, self, value)
5880
5881 - def startswith(self, value):
5882 if not self.type in ('string', 'text'): 5883 raise SyntaxError, "startswith used with incompatible field type" 5884 return Query(self.db, self.db._adapter.STARTSWITH, self, value)
5885
5886 - def endswith(self, value):
5887 if not self.type in ('string', 'text'): 5888 raise SyntaxError, "endswith used with incompatible field type" 5889 return Query(self.db, self.db._adapter.ENDSWITH, self, value)
5890
5891 - def contains(self, value, all=False):
5892 if isinstance(value,(list, tuple)): 5893 subqueries = [self.contains(str(v).strip()) for v in value if str(v).strip()] 5894 return reduce(all and AND or OR, subqueries) 5895 if not self.type in ('string', 'text') and not self.type.startswith('list:'): 5896 raise SyntaxError, "contains used with incompatible field type" 5897 return Query(self.db, self.db._adapter.CONTAINS, self, value)
5898
5899 - def with_alias(self, alias):
5900 return Expression(self.db, self.db._adapter.AS, self, alias, self.type)
5901 5902 # for use in both Query and sortby 5903 5904
5905 -class SQLCustomType(object):
5906 """ 5907 allows defining of custom SQL types 5908 5909 Example:: 5910 5911 decimal = SQLCustomType( 5912 type ='double', 5913 native ='integer', 5914 encoder =(lambda x: int(float(x) * 100)), 5915 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 5916 ) 5917 5918 db.define_table( 5919 'example', 5920 Field('value', type=decimal) 5921 ) 5922 5923 :param type: the web2py type (default = 'string') 5924 :param native: the backend type 5925 :param encoder: how to encode the value to store it in the backend 5926 :param decoder: how to decode the value retrieved from the backend 5927 :param validator: what validators to use ( default = None, will use the 5928 default validator for type) 5929 """ 5930
5931 - def __init__( 5932 self, 5933 type='string', 5934 native=None, 5935 encoder=None, 5936 decoder=None, 5937 validator=None, 5938 _class=None, 5939 ):
5940 5941 self.type = type 5942 self.native = native 5943 self.encoder = encoder or (lambda x: x) 5944 self.decoder = decoder or (lambda x: x) 5945 self.validator = validator 5946 self._class = _class or type
5947
5948 - def startswith(self, text=None):
5949 try: 5950 return self.type.startswith(self, text) 5951 except TypeError: 5952 return False
5953
5954 - def __getslice__(self, a=0, b=100):
5955 return None
5956
5957 - def __getitem__(self, i):
5958 return None
5959
5960 - def __str__(self):
5961 return self._class
5962
5963 -class FieldVirtual(object):
5964 - def __init__(self, f):
5965 self.f = f
5966
5967 -class FieldLazy(object):
5968 - def __init__(self, f, handler=None):
5969 self.f = f 5970 self.handler = handler
5971 5972
5973 -class Field(Expression):
5974 5975 Virtual = FieldVirtual 5976 Lazy = FieldLazy 5977 5978 """ 5979 an instance of this class represents a database field 5980 5981 example:: 5982 5983 a = Field(name, 'string', length=32, default=None, required=False, 5984 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 5985 notnull=False, unique=False, 5986 uploadfield=True, widget=None, label=None, comment=None, 5987 uploadfield=True, # True means store on disk, 5988 # 'a_field_name' means store in this field in db 5989 # False means file content will be discarded. 5990 writable=True, readable=True, update=None, authorize=None, 5991 autodelete=False, represent=None, uploadfolder=None, 5992 uploadseparate=False # upload to separate directories by uuid_keys 5993 # first 2 character and tablename.fieldname 5994 # False - old behavior 5995 # True - put uploaded file in 5996 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 5997 # directory) 5998 5999 to be used as argument of DAL.define_table 6000 6001 allowed field types: 6002 string, boolean, integer, double, text, blob, 6003 date, time, datetime, upload, password 6004 6005 strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) 6006 fields should have a default or they will be required in SQLFORMs 6007 the requires argument is used to validate the field input in SQLFORMs 6008 6009 """ 6010
6011 - def __init__( 6012 self, 6013 fieldname, 6014 type='string', 6015 length=None, 6016 default=DEFAULT, 6017 required=False, 6018 requires=DEFAULT, 6019 ondelete='CASCADE', 6020 notnull=False, 6021 unique=False, 6022 uploadfield=True, 6023 widget=None, 6024 label=DEFAULT, 6025 comment=None, 6026 writable=True, 6027 readable=True, 6028 update=None, 6029 authorize=None, 6030 autodelete=False, 6031 represent=None, 6032 uploadfolder=None, 6033 uploadseparate=False, 6034 compute=None, 6035 custom_store=None, 6036 custom_retrieve=None, 6037 custom_delete=None, 6038 ):
6039 self.db = None 6040 self.op = None 6041 self.first = None 6042 self.second = None 6043 if not isinstance(fieldname,str): 6044 raise SyntaxError, "missing field name" 6045 self.name = fieldname = cleanup(fieldname) 6046 if hasattr(Table,fieldname) or fieldname[0] == '_' or \ 6047 regex_python_keywords.match(fieldname): 6048 raise SyntaxError, 'Field: invalid field name: %s' % fieldname 6049 if isinstance(type, Table): 6050 type = 'reference ' + type._tablename 6051 self.type = type # 'string', 'integer' 6052 self.length = (length is None) and DEFAULTLENGTH.get(type,512) or length 6053 if default is DEFAULT: 6054 self.default = update or None 6055 else: 6056 self.default = default 6057 self.required = required # is this field required 6058 self.ondelete = ondelete.upper() # this is for reference fields only 6059 self.notnull = notnull 6060 self.unique = unique 6061 self.uploadfield = uploadfield 6062 self.uploadfolder = uploadfolder 6063 self.uploadseparate = uploadseparate 6064 self.widget = widget 6065 if label is DEFAULT: 6066 self.label = fieldname.replace('_', ' ').title() 6067 else: 6068 self.label = label or '' 6069 self.comment = comment 6070 self.writable = writable 6071 self.readable = readable 6072 self.update = update 6073 self.authorize = authorize 6074 self.autodelete = autodelete 6075 if not represent and type in ('list:integer','list:string'): 6076 represent=lambda x,r=None: ', '.join(str(y) for y in x or []) 6077 self.represent = represent 6078 self.compute = compute 6079 self.isattachment = True 6080 self.custom_store = custom_store 6081 self.custom_retrieve = custom_retrieve 6082 self.custom_delete = custom_delete 6083 if self.label is None: 6084 self.label = fieldname.replace('_',' ').title() 6085 if requires is None: 6086 self.requires = [] 6087 else: 6088 self.requires = requires
6089
6090 - def store(self, file, filename=None, path=None):
6091 if self.custom_store: 6092 return self.custom_store(file,filename,path) 6093 if isinstance(file, cgi.FieldStorage): 6094 file = file.file 6095 filename = filename or file.filename 6096 elif not filename: 6097 filename = file.name 6098 filename = os.path.basename(filename.replace('/', os.sep)\ 6099 .replace('\\', os.sep)) 6100 m = re.compile('\.(?P<e>\w{1,5})$').search(filename) 6101 extension = m and m.group('e') or 'txt' 6102 uuid_key = web2py_uuid().replace('-', '')[-16:] 6103 encoded_filename = base64.b16encode(filename).lower() 6104 newfilename = '%s.%s.%s.%s' % \ 6105 (self._tablename, self.name, uuid_key, encoded_filename) 6106 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 6107 if isinstance(self.uploadfield,Field): 6108 blob_uploadfield_name = self.uploadfield.uploadfield 6109 keys={self.uploadfield.name: newfilename, 6110 blob_uploadfield_name: file.read()} 6111 self.uploadfield.table.insert(**keys) 6112 elif self.uploadfield == True: 6113 if path: 6114 pass 6115 elif self.uploadfolder: 6116 path = self.uploadfolder 6117 elif self.db._adapter.folder: 6118 path = os.path.join(self.db._adapter.folder, '..', 'uploads') 6119 else: 6120 raise RuntimeError, "you must specify a Field(...,uploadfolder=...)" 6121 if self.uploadseparate: 6122 path = os.path.join(path,"%s.%s" % (self._tablename, self.name),uuid_key[:2]) 6123 if not os.path.exists(path): 6124 os.makedirs(path) 6125 pathfilename = os.path.join(path, newfilename) 6126 dest_file = open(pathfilename, 'wb') 6127 try: 6128 shutil.copyfileobj(file, dest_file) 6129 except IOError: 6130 raise IOError, 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename 6131 dest_file.close() 6132 return newfilename
6133
6134 - def retrieve(self, name, path=None):
6135 if self.custom_retrieve: 6136 return self.custom_retrieve(name, path) 6137 import http 6138 if self.authorize or isinstance(self.uploadfield, str): 6139 row = self.db(self == name).select().first() 6140 if not row: 6141 raise http.HTTP(404) 6142 if self.authorize and not self.authorize(row): 6143 raise http.HTTP(403) 6144 try: 6145 m = regex_content.match(name) 6146 if not m or not self.isattachment: 6147 raise TypeError, 'Can\'t retrieve %s' % name 6148 filename = base64.b16decode(m.group('name'), True) 6149 filename = regex_cleanup_fn.sub('_', filename) 6150 except (TypeError, AttributeError): 6151 filename = name 6152 if isinstance(self.uploadfield, str): # ## if file is in DB 6153 return (filename, cStringIO.StringIO(row[self.uploadfield] or '')) 6154 elif isinstance(self.uploadfield,Field): 6155 blob_uploadfield_name = self.uploadfield.uploadfield 6156 query = self.uploadfield == name 6157 data = self.uploadfield.table(query)[blob_uploadfield_name] 6158 return (filename, cStringIO.StringIO(data)) 6159 else: 6160 # ## if file is on filesystem 6161 if path: 6162 pass 6163 elif self.uploadfolder: 6164 path = self.uploadfolder 6165 else: 6166 path = os.path.join(self.db._adapter.folder, '..', 'uploads') 6167 if self.uploadseparate: 6168 t = m.group('table') 6169 f = m.group('field') 6170 u = m.group('uuidkey') 6171 path = os.path.join(path,"%s.%s" % (t,f),u[:2]) 6172 return (filename, open(os.path.join(path, name), 'rb'))
6173
6174 - def formatter(self, value):
6175 if value is None or not self.requires: 6176 return value 6177 if not isinstance(self.requires, (list, tuple)): 6178 requires = [self.requires] 6179 elif isinstance(self.requires, tuple): 6180 requires = list(self.requires) 6181 else: 6182 requires = copy.copy(self.requires) 6183 requires.reverse() 6184 for item in requires: 6185 if hasattr(item, 'formatter'): 6186 value = item.formatter(value) 6187 return value
6188
6189 - def validate(self, value):
6190 if not self.requires: 6191 return (value, None) 6192 requires = self.requires 6193 if not isinstance(requires, (list, tuple)): 6194 requires = [requires] 6195 for validator in requires: 6196 (value, error) = validator(value) 6197 if error: 6198 return (value, error) 6199 return (value, None)
6200
6201 - def count(self):
6202 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'COUNT', 'integer')
6203
6204 - def __nonzero__(self):
6205 return True
6206
6207 - def __str__(self):
6208 try: 6209 return '%s.%s' % (self.tablename, self.name) 6210 except: 6211 return '<no table>.%s' % self.name
6212 6213
6214 -def raw(s): return Expression(None,s)
6215
6216 -class Query(object):
6217 6218 """ 6219 a query object necessary to define a set. 6220 it can be stored or can be passed to DAL.__call__() to obtain a Set 6221 6222 Example:: 6223 6224 query = db.users.name=='Max' 6225 set = db(query) 6226 records = set.select() 6227 6228 """ 6229
6230 - def __init__( 6231 self, 6232 db, 6233 op, 6234 first=None, 6235 second=None, 6236 ignore_common_filters = False, 6237 ):
6238 self.db = self._db = db 6239 self.op = op 6240 self.first = first 6241 self.second = second 6242 self.ignore_common_filters = ignore_common_filters
6243
6244 - def __str__(self):
6245 return self.db._adapter.expand(self)
6246
6247 - def __and__(self, other):
6248 return Query(self.db,self.db._adapter.AND,self,other)
6249
6250 - def __or__(self, other):
6251 return Query(self.db,self.db._adapter.OR,self,other)
6252
6253 - def __invert__(self):
6254 if self.op==self.db._adapter.NOT: 6255 return self.first 6256 return Query(self.db,self.db._adapter.NOT,self)
6257 6258 6259 regex_quotes = re.compile("'[^']*'") 6260 6261
6262 -def xorify(orderby):
6263 if not orderby: 6264 return None 6265 orderby2 = orderby[0] 6266 for item in orderby[1:]: 6267 orderby2 = orderby2 | item 6268 return orderby2
6269 6270
6271 -class Set(object):
6272 6273 """ 6274 a Set represents a set of records in the database, 6275 the records are identified by the query=Query(...) object. 6276 normally the Set is generated by DAL.__call__(Query(...)) 6277 6278 given a set, for example 6279 set = db(db.users.name=='Max') 6280 you can: 6281 set.update(db.users.name='Massimo') 6282 set.delete() # all elements in the set 6283 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 6284 and take subsets: 6285 subset = set(db.users.id<5) 6286 """ 6287
6288 - def __init__(self, db, query, ignore_common_filters = None):
6289 self.db = db 6290 self._db = db # for backward compatibility 6291 if query and not ignore_common_filters is None and \ 6292 query.ignore_common_filters != ignore_common_filters: 6293 query = copy.copy(query) 6294 query.ignore_common_filters = ignore_common_filters 6295 self.query = query
6296
6297 - def __call__(self, query, ignore_common_filters=False):
6298 if isinstance(query,Table): 6299 query = query._id>0 6300 elif isinstance(query,str): 6301 query = raw(query) 6302 elif isinstance(query,Field): 6303 query = query!=None 6304 if self.query: 6305 return Set(self.db, self.query & query, 6306 ignore_common_filters = ignore_common_filters) 6307 else: 6308 return Set(self.db, query, 6309 ignore_common_filters = ignore_common_filters)
6310
6311 - def _count(self,distinct=None):
6312 return self.db._adapter._count(self.query,distinct)
6313
6314 - def _select(self, *fields, **attributes):
6315 return self.db._adapter._select(self.query,fields,attributes)
6316
6317 - def _delete(self):
6318 tablename=self.db._adapter.get_table(self.query) 6319 return self.db._adapter._delete(tablename,self.query)
6320
6321 - def _update(self, **update_fields):
6322 tablename = self.db._adapter.get_table(self.query) 6323 fields = self.db[tablename]._listify(update_fields,update=True) 6324 return self.db._adapter._update(tablename,self.query,fields)
6325
6326 - def isempty(self):
6327 return not self.select(limitby=(0,1))
6328
6329 - def count(self,distinct=None):
6330 return self.db._adapter.count(self.query,distinct)
6331
6332 - def select(self, *fields, **attributes):
6333 return self.db._adapter.select(self.query,fields,attributes)
6334
6335 - def delete(self):
6336 tablename=self.db._adapter.get_table(self.query) 6337 self.delete_uploaded_files() 6338 return self.db._adapter.delete(tablename,self.query)
6339
6340 - def update(self, **update_fields):
6341 tablename = self.db._adapter.get_table(self.query) 6342 fields = self.db[tablename]._listify(update_fields,update=True) 6343 if not fields: 6344 raise SyntaxError, "No fields to update" 6345 self.delete_uploaded_files(update_fields) 6346 return self.db._adapter.update(tablename,self.query,fields)
6347
6348 - def validate_and_update(self, **update_fields):
6349 tablename = self.db._adapter.get_table(self.query) 6350 response = Row() 6351 response.errors = Row() 6352 new_fields = copy.copy(update_fields) 6353 for key,value in update_fields.items(): 6354 value,error = self.db[tablename][key].validate(value) 6355 if error: 6356 response.errors[key] = error 6357 else: 6358 new_fields[key] = value 6359 fields = self.db[tablename]._listify(new_fields,update=True) 6360 if not fields: 6361 raise SyntaxError, "No fields to update" 6362 if response.errors: 6363 response.updated = None 6364 else: 6365 self.delete_uploaded_files(new_fields) 6366 response.updated = self.db._adapter.update(tablename,self.query,fields) 6367 return response
6368
6369 - def delete_uploaded_files(self, upload_fields=None):
6370 table = self.db[self.db._adapter.tables(self.query)[0]] 6371 # ## mind uploadfield==True means file is not in DB 6372 if upload_fields: 6373 fields = upload_fields.keys() 6374 else: 6375 fields = table.fields 6376 fields = [f for f in fields if table[f].type == 'upload' 6377 and table[f].uploadfield == True 6378 and table[f].autodelete] 6379 if not fields: 6380 return 6381 for record in self.select(*[table[f] for f in fields]): 6382 for fieldname in fields: 6383 field = table[fieldname] 6384 oldname = record.get(fieldname, None) 6385 if not oldname: 6386 continue 6387 if upload_fields and oldname == upload_fields[fieldname]: 6388 continue 6389 if field.custom_delete: 6390 field.custom_delete(oldname) 6391 else: 6392 uploadfolder = field.uploadfolder 6393 if not uploadfolder: 6394 uploadfolder = os.path.join(self.db._adapter.folder, '..', 'uploads') 6395 if field.uploadseparate: 6396 items = oldname.split('.') 6397 uploadfolder = os.path.join(uploadfolder, 6398 "%s.%s" % (items[0], items[1]), 6399 items[2][:2]) 6400 oldpath = os.path.join(uploadfolder, oldname) 6401 if os.path.exists(oldpath): 6402 os.unlink(oldpath)
6403
6404 -def update_record(pack, a=None):
6405 (colset, table, id) = pack 6406 b = a or dict(colset) 6407 c = dict([(k,v) for (k,v) in b.items() if k in table.fields and table[k].type!='id']) 6408 table._db(table._id==id).update(**c) 6409 for (k, v) in c.items(): 6410 colset[k] = v
6411
6412 -class VirtualCommand(object):
6413 - def __init__(self,method,row):
6414 self.method=method 6415 self.row=row
6416 - def __call__(self,*args,**kwargs):
6417 return self.method(self.row,*args,**kwargs)
6418
6419 -def lazy_virtualfield(f):
6420 f.__lazy__ = True 6421 return f
6422
6423 -class Rows(object):
6424 6425 """ 6426 A wrapper for the return value of a select. It basically represents a table. 6427 It has an iterator and each row is represented as a dictionary. 6428 """ 6429 6430 # ## TODO: this class still needs some work to care for ID/OID 6431
6432 - def __init__( 6433 self, 6434 db=None, 6435 records=[], 6436 colnames=[], 6437 compact=True, 6438 rawrows=None 6439 ):
6440 self.db = db 6441 self.records = records 6442 self.colnames = colnames 6443 self.compact = compact 6444 self.response = rawrows
6445
6446 - def setvirtualfields(self,**keyed_virtualfields):
6447 """ 6448 db.define_table('x',Field('number','integer')) 6449 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 6450 6451 from gluon.dal import lazy_virtualfield 6452 6453 class MyVirtualFields(object): 6454 # normal virtual field (backward compatible, discouraged) 6455 def normal_shift(self): return self.x.number+1 6456 # lazy virtual field (because of @staticmethod) 6457 @lazy_virtualfield 6458 def lazy_shift(instance,row,delta=4): return row.x.number+delta 6459 db.x.virtualfields.append(MyVirtualFields()) 6460 6461 for row in db(db.x).select(): 6462 print row.number, row.normal_shift, row.lazy_shift(delta=7) 6463 """ 6464 if not keyed_virtualfields: 6465 return self 6466 for row in self.records: 6467 for (tablename,virtualfields) in keyed_virtualfields.items(): 6468 attributes = dir(virtualfields) 6469 if not tablename in row: 6470 box = row[tablename] = Row() 6471 else: 6472 box = row[tablename] 6473 updated = False 6474 for attribute in attributes: 6475 if attribute[0] != '_': 6476 method = getattr(virtualfields,attribute) 6477 if hasattr(method,'__lazy__'): 6478 box[attribute]=VirtualCommand(method,row) 6479 elif type(method)==types.MethodType: 6480 if not updated: 6481 virtualfields.__dict__.update(row) 6482 updated = True 6483 box[attribute]=method() 6484 return self
6485
6486 - def __and__(self,other):
6487 if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects' 6488 records = self.records+other.records 6489 return Rows(self.db,records,self.colnames)
6490
6491 - def __or__(self,other):
6492 if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects' 6493 records = self.records 6494 records += [record for record in other.records \ 6495 if not record in records] 6496 return Rows(self.db,records,self.colnames)
6497
6498 - def __nonzero__(self):
6499 if len(self.records): 6500 return 1 6501 return 0
6502
6503 - def __len__(self):
6504 return len(self.records)
6505
6506 - def __getslice__(self, a, b):
6507 return Rows(self.db,self.records[a:b],self.colnames)
6508
6509 - def __getitem__(self, i):
6510 row = self.records[i] 6511 keys = row.keys() 6512 if self.compact and len(keys) == 1 and keys[0] != '_extra': 6513 return row[row.keys()[0]] 6514 return row
6515
6516 - def __iter__(self):
6517 """ 6518 iterator over records 6519 """ 6520 6521 for i in xrange(len(self)): 6522 yield self[i]
6523
6524 - def __str__(self):
6525 """ 6526 serializes the table into a csv file 6527 """ 6528 6529 s = cStringIO.StringIO() 6530 self.export_to_csv_file(s) 6531 return s.getvalue()
6532
6533 - def first(self):
6534 if not self.records: 6535 return None 6536 return self[0]
6537
6538 - def last(self):
6539 if not self.records: 6540 return None 6541 return self[-1]
6542
6543 - def find(self,f):
6544 """ 6545 returns a new Rows object, a subset of the original object, 6546 filtered by the function f 6547 """ 6548 if not self.records: 6549 return Rows(self.db, [], self.colnames) 6550 records = [] 6551 for i in range(0,len(self)): 6552 row = self[i] 6553 if f(row): 6554 records.append(self.records[i]) 6555 return Rows(self.db, records, self.colnames)
6556
6557 - def exclude(self, f):
6558 """ 6559 removes elements from the calling Rows object, filtered by the function f, 6560 and returns a new Rows object containing the removed elements 6561 """ 6562 if not self.records: 6563 return Rows(self.db, [], self.colnames) 6564 removed = [] 6565 i=0 6566 while i<len(self): 6567 row = self[i] 6568 if f(row): 6569 removed.append(self.records[i]) 6570 del self.records[i] 6571 else: 6572 i += 1 6573 return Rows(self.db, removed, self.colnames)
6574
6575 - def sort(self, f, reverse=False):
6576 """ 6577 returns a list of sorted elements (not sorted in place) 6578 """ 6579 return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames)
6580
6581 - def as_list(self, 6582 compact=True, 6583 storage_to_dict=True, 6584 datetime_to_str=True):
6585 """ 6586 returns the data as a list or dictionary. 6587 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 6588 :param datetime_to_str: convert datetime fields as strings (default True) 6589 """ 6590 (oc, self.compact) = (self.compact, compact) 6591 if storage_to_dict: 6592 items = [item.as_dict(datetime_to_str) for item in self] 6593 else: 6594 items = [item for item in self] 6595 self.compact = compact 6596 return items
6597 6598
6599 - def as_dict(self, 6600 key='id', 6601 compact=True, 6602 storage_to_dict=True, 6603 datetime_to_str=True):
6604 """ 6605 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 6606 6607 :param key: the name of the field to be used as dict key, normally the id 6608 :param compact: ? (default True) 6609 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 6610 :param datetime_to_str: convert datetime fields as strings (default True) 6611 """ 6612 rows = self.as_list(compact, storage_to_dict, datetime_to_str) 6613 if isinstance(key,str) and key.count('.')==1: 6614 (table, field) = key.split('.') 6615 return dict([(r[table][field],r) for r in rows]) 6616 elif isinstance(key,str): 6617 return dict([(r[key],r) for r in rows]) 6618 else: 6619 return dict([(key(r),r) for r in rows])
6620
6621 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
6622 """ 6623 export data to csv, the first line contains the column names 6624 6625 :param ofile: where the csv must be exported to 6626 :param null: how null values must be represented (default '<NULL>') 6627 :param delimiter: delimiter to separate values (default ',') 6628 :param quotechar: character to use to quote string values (default '"') 6629 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 6630 :param represent: use the fields .represent value (default False) 6631 :param colnames: list of column names to use (default self.colnames) 6632 This will only work when exporting rows objects!!!! 6633 DO NOT use this with db.export_to_csv() 6634 """ 6635 delimiter = kwargs.get('delimiter', ',') 6636 quotechar = kwargs.get('quotechar', '"') 6637 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 6638 represent = kwargs.get('represent', False) 6639 writer = csv.writer(ofile, delimiter=delimiter, 6640 quotechar=quotechar, quoting=quoting) 6641 colnames = kwargs.get('colnames', self.colnames) 6642 write_colnames = kwargs.get('write_colnames',True) 6643 # a proper csv starting with the column names 6644 if write_colnames: 6645 writer.writerow(colnames) 6646 6647 def none_exception(value): 6648 """ 6649 returns a cleaned up value that can be used for csv export: 6650 - unicode text is encoded as such 6651 - None values are replaced with the given representation (default <NULL>) 6652 """ 6653 if value is None: 6654 return null 6655 elif isinstance(value, unicode): 6656 return value.encode('utf8') 6657 elif isinstance(value,Reference): 6658 return int(value) 6659 elif hasattr(value, 'isoformat'): 6660 return value.isoformat()[:19].replace('T', ' ') 6661 elif isinstance(value, (list,tuple)): # for type='list:..' 6662 return bar_encode(value) 6663 return value
6664 6665 for record in self: 6666 row = [] 6667 for col in colnames: 6668 if not table_field.match(col): 6669 row.append(record._extra[col]) 6670 else: 6671 (t, f) = col.split('.') 6672 field = self.db[t][f] 6673 if isinstance(record.get(t, None), (Row,dict)): 6674 value = record[t][f] 6675 else: 6676 value = record[f] 6677 if field.type=='blob' and not value is None: 6678 value = base64.b64encode(value) 6679 elif represent and field.represent: 6680 value = field.represent(value) 6681 row.append(none_exception(value)) 6682 writer.writerow(row)
6683
6684 - def xml(self):
6685 """ 6686 serializes the table using sqlhtml.SQLTABLE (if present) 6687 """ 6688 6689 import sqlhtml 6690 return sqlhtml.SQLTABLE(self).xml()
6691
6692 - def json(self, mode='object', default=None):
6693 """ 6694 serializes the table to a JSON list of objects 6695 """ 6696 mode = mode.lower() 6697 if not mode in ['object', 'array']: 6698 raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode 6699 6700 def inner_loop(record, col): 6701 (t, f) = col.split('.') 6702 res = None 6703 if not table_field.match(col): 6704 key = col 6705 res = record._extra[col] 6706 else: 6707 key = f 6708 if isinstance(record.get(t, None), Row): 6709 res = record[t][f] 6710 else: 6711 res = record[f] 6712 if mode == 'object': 6713 return (key, res) 6714 else: 6715 return res
6716 6717 if mode == 'object': 6718 items = [dict([inner_loop(record, col) for col in 6719 self.colnames]) for record in self] 6720 else: 6721 items = [[inner_loop(record, col) for col in self.colnames] 6722 for record in self] 6723 if have_serializers: 6724 return serializers.json(items,default=default or serializers.custom_json) 6725 else: 6726 import simplejson 6727 return simplejson.dumps(items) 6728
6729 -def Rows_unpickler(data):
6730 return cPickle.loads(data)
6731
6732 -def Rows_pickler(data):
6733 return Rows_unpickler, \ 6734 (cPickle.dumps(data.as_list(storage_to_dict=True, 6735 datetime_to_str=False)),)
6736 6737 copy_reg.pickle(Rows, Rows_pickler, Rows_unpickler) 6738 6739 6740 ################################################################################ 6741 # dummy function used to define some doctests 6742 ################################################################################ 6743
6744 -def test_all():
6745 """ 6746 6747 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 6748 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 6749 >>> tmp = db.define_table('users',\ 6750 Field('stringf', 'string', length=32, required=True),\ 6751 Field('booleanf', 'boolean', default=False),\ 6752 Field('passwordf', 'password', notnull=True),\ 6753 Field('uploadf', 'upload'),\ 6754 Field('blobf', 'blob'),\ 6755 Field('integerf', 'integer', unique=True),\ 6756 Field('doublef', 'double', unique=True,notnull=True),\ 6757 Field('datef', 'date', default=datetime.date.today()),\ 6758 Field('timef', 'time'),\ 6759 Field('datetimef', 'datetime'),\ 6760 migrate='test_user.table') 6761 6762 Insert a field 6763 6764 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 6765 uploadf=None, integerf=5, doublef=3.14,\ 6766 datef=datetime.date(2001, 1, 1),\ 6767 timef=datetime.time(12, 30, 15),\ 6768 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 6769 1 6770 6771 Drop the table 6772 6773 >>> db.users.drop() 6774 6775 Examples of insert, select, update, delete 6776 6777 >>> tmp = db.define_table('person',\ 6778 Field('name'),\ 6779 Field('birth','date'),\ 6780 migrate='test_person.table') 6781 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 6782 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 6783 6784 commented len(db().select(db.person.ALL)) 6785 commented 2 6786 6787 >>> me = db(db.person.id==person_id).select()[0] # test select 6788 >>> me.name 6789 'Massimo' 6790 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 6791 1 6792 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 6793 1 6794 6795 Update a single record 6796 6797 >>> me.update_record(name=\"Max\") 6798 >>> me.name 6799 'Max' 6800 6801 Examples of complex search conditions 6802 6803 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 6804 1 6805 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 6806 1 6807 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 6808 1 6809 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 6810 >>> me.name 6811 'Max' 6812 6813 Examples of search conditions using extract from date/datetime/time 6814 6815 >>> len(db(db.person.birth.month()==12).select()) 6816 1 6817 >>> len(db(db.person.birth.year()>1900).select()) 6818 1 6819 6820 Example of usage of NULL 6821 6822 >>> len(db(db.person.birth==None).select()) ### test NULL 6823 0 6824 >>> len(db(db.person.birth!=None).select()) ### test NULL 6825 1 6826 6827 Examples of search conditions using lower, upper, and like 6828 6829 >>> len(db(db.person.name.upper()=='MAX').select()) 6830 1 6831 >>> len(db(db.person.name.like('%ax')).select()) 6832 1 6833 >>> len(db(db.person.name.upper().like('%AX')).select()) 6834 1 6835 >>> len(db(~db.person.name.upper().like('%AX')).select()) 6836 0 6837 6838 orderby, groupby and limitby 6839 6840 >>> people = db().select(db.person.name, orderby=db.person.name) 6841 >>> order = db.person.name|~db.person.birth 6842 >>> people = db().select(db.person.name, orderby=order) 6843 6844 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 6845 6846 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 6847 6848 Example of one 2 many relation 6849 6850 >>> tmp = db.define_table('dog',\ 6851 Field('name'),\ 6852 Field('birth','date'),\ 6853 Field('owner',db.person),\ 6854 migrate='test_dog.table') 6855 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 6856 1 6857 6858 A simple JOIN 6859 6860 >>> len(db(db.dog.owner==db.person.id).select()) 6861 1 6862 6863 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 6864 1 6865 6866 Drop tables 6867 6868 >>> db.dog.drop() 6869 >>> db.person.drop() 6870 6871 Example of many 2 many relation and Set 6872 6873 >>> tmp = db.define_table('author', Field('name'),\ 6874 migrate='test_author.table') 6875 >>> tmp = db.define_table('paper', Field('title'),\ 6876 migrate='test_paper.table') 6877 >>> tmp = db.define_table('authorship',\ 6878 Field('author_id', db.author),\ 6879 Field('paper_id', db.paper),\ 6880 migrate='test_authorship.table') 6881 >>> aid = db.author.insert(name='Massimo') 6882 >>> pid = db.paper.insert(title='QCD') 6883 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 6884 6885 Define a Set 6886 6887 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 6888 >>> rows = authored_papers.select(db.author.name, db.paper.title) 6889 >>> for row in rows: print row.author.name, row.paper.title 6890 Massimo QCD 6891 6892 Example of search condition using belongs 6893 6894 >>> set = (1, 2, 3) 6895 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 6896 >>> print rows[0].title 6897 QCD 6898 6899 Example of search condition using nested select 6900 6901 >>> nested_select = db()._select(db.authorship.paper_id) 6902 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 6903 >>> print rows[0].title 6904 QCD 6905 6906 Example of expressions 6907 6908 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 6909 >>> db(mynumber.id>0).delete() 6910 0 6911 >>> for i in range(10): tmp = mynumber.insert(x=i) 6912 >>> db(mynumber.id>0).select(mynumber.x.sum())[0](mynumber.x.sum()) 6913 45 6914 6915 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 6916 5 6917 6918 Output in csv 6919 6920 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 6921 author.name,paper.title\r 6922 Massimo,QCD 6923 6924 Delete all leftover tables 6925 6926 >>> DAL.distributed_transaction_commit(db) 6927 6928 >>> db.mynumber.drop() 6929 >>> db.authorship.drop() 6930 >>> db.author.drop() 6931 >>> db.paper.drop() 6932 """
6933 ################################################################################ 6934 # deprecated since the new DAL; here only for backward compatibility 6935 ################################################################################ 6936 6937 SQLField = Field 6938 SQLTable = Table 6939 SQLXorable = Expression 6940 SQLQuery = Query 6941 SQLSet = Set 6942 SQLRows = Rows 6943 SQLStorage = Row 6944 SQLDB = DAL 6945 GQLDB = DAL 6946 DAL.Field = Field # was necessary in gluon/globals.py session.connect 6947 DAL.Table = Table # was necessary in gluon/globals.py session.connect 6948 6949 ################################################################################ 6950 # run tests 6951 ################################################################################ 6952 6953 if __name__ == '__main__': 6954 import doctest 6955 doctest.testmod() 6956