note: dbi-1.15 makes a bunch of DBLINK.PM obselete (e.g., fetchall_hashref, selectall_*). this is a reasonable example of late binding and handling "flyweight scalars" to encapsulate objects -- and us of a DESTROY to clean them up, AUTOLOAD to pass unknown operations to a parent. also has a bit of pod thrown in for good luck :-) generate-db is a reasonable example of using dbi to execute commands for generating a database. it doesn't have to retrieve or explicitly prepare anything since the DDL operations are only run once per session. -- Steven Lembark 2930 W. Palmer St. Chicago, IL 60647 lembark@wrkhors.com 800-762-1582 filename="DBlink.pm" ######################################################################## # DBLink.pm # # manages link to database. dabase handle lives in here as a # lexical object. callers pass in sql, get back a flyweight # scalar w/ index into hash of the sql they want run. they can # then use execute method on their object to get records back from # the database. # # constructing the object does not actually prepare the statement # handle. all it does is store the sql in a hash and return the # hash key. this allows for quick startup and simplifies the init # logic since everything can easily be done. when the actualy # execute call is made the statement handle is assigned via ||=, # so that each statement is only dealt with once. # # the destructor deletes the associated hash entry which then daisy- # chains to destructors of objects stored in the hash. # # this can also be used w/ stock method calls, with the caller # providing the hash key and calling the sub's directly. once # all code has been converted to object calls this may go away. # ######################################################################## package DBlink; use strict; # deal with output during compilation (i.e., when the module # is use-ed). later sub calls will get whatever's in effect # at the time. this only neatens up any verbose logging we # do at database connection time. local $\ = "\n"; local $, = "\n\t"; # CPAN modules use Carp; use DBI; use DBD::Oracle; use Benchmark; ######################################################################## # globals ######################################################################## # where we cache the sql and statement handles. # the first queryid we generate ourselves here is 'aaa' == ++zz; my %queryz = (); my $queryid = 'zz'; # used to make a database connection. # for now there is only one database connection supported and it is # configured here. we make the connection when the module is first # used. # # note: this will normally be done w/in Apache::DBI, which caches # the database handle externally. this allows us to keep re-using # the same handle w/o worrying if the database has gone down in the # meantime or closed the existing handle. my $sid = $ENV{ORACLE_SID} || croak "Missing ORACLE_SID"; my $host = $ENV{ORACLE_HOST} || croak "Missing ORACLE_HOST"; my $port = $ENV{ORACLE_PORT} || croak "Missing ORACLE_PORT"; my $user = $ENV{ORACLE_USER} || croak "Missing ORACLE_USER"; my $pass = $ENV{ORACLE_PASS} || $user; # format is sprintf for user, pass, host, port, sid. my $idformat = q{%s/%s@(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=%s)(PORT=%s))(CONNECT_DATA=(SID=%s)))}; my @dbhfieldz = ( 'dbi:Oracle:', ( sprintf $idformat, $user, $pass, $host, $port, $sid ), '', {RaiseError => 1} ); # makes it easier to keep $idformat in one place... # discards any object returns connect array. sub connectfieldz { shift if ref $_[0]; my %valz = @_; my @fieldz = (); push @fieldz, $valz{user} || $ENV{ORACLE_USER} || croak "Missing ORACLE_USER"; push @fieldz, $valz{pass} || $ENV{ORACLE_PASS} || $valz{pass}; push @fieldz, $valz{host} || $ENV{ORACLE_HOST} || croak "Missing ORACLE_HOST"; push @fieldz, $valz{port} || $ENV{ORACLE_PORT} || croak "Missing ORACLE_PORT"; push @fieldz, $valz{sid} || $ENV{ORACLE_SID} || croak "Missing ORACLE_SID"; @fieldz = ( 'dbi:Oracle:', ( sprintf $idformat, @fieldz ), '', {RaiseError => 1} ); wantarray ? @fieldz : \@fieldz } # q{alter session set nls_date_format = 'dd-mon-yyyy-hh24:mi:ss'} my @initsql = (); my $dbh = 0; # this is called from the use pragma after the module is required. # it processes any module arguments on the "use" line. in this case # it pushes them onto @initsql after discarding the package name. sub import { if( $dbh ) { print STDERR "\n\tDatabase handle aready open,\n\ttoo late to configure it.\n"; } else { shift; push @initsql, @_ if @_; } } # slightly more descriptive name for it. # e.g., # # DBlink->dbh_config( 'sql to add onto the connect init' ); # # this can be called until the database handle hasn't already # been opened. *dbh_config = \&import; ######################################################################## # methods ######################################################################## # overload the double-quote operator to extract the object's # stored sql. using the subroutine here is faster but can # cause inheritence oddities -- if anyone cares to derived # other objects from this one. sub sql { $queryz{${$_[0]}}->{sql} || '' }; use overload q{""} => 'sql'; # save the sql, leave the real work for later. # # removing extreneous white space makes it a bit easier # to debug. sub prepare { my $class = shift; # each query we get has a new id. this can lead to # duplication of querys if people aren't careful. # there will also be "holes" in the key set due to # transient objects over time. q.e.d., mapping the # entire keyset as a qc check probably is a waste. if( my $sql = shift ) { my %argz = @_ ? @_ : (); my $id = ++$queryid; $sql =~ s/^\s+//; $sql =~ s/\s+$//; $sql =~ s/\n\n+/\n/gs; $sql =~ s/[ \t]+/ /gs; print STDERR "\npreparing: $id => $sql" if $argz{verbose}; # store the sql, set the statement handle to false and # return zero. we will prepare the query itself when # the object actually tries to execute it. # # the user can pass in whatever they like, we can examine # examine these for things like RaiseError or AutoCommit # when we use the thing. $queryz{$id}{sql} = $sql; $queryz{$id}{arg} = \%argz; $queryz{$id}{sth} = 0; # if the first argument was an object use its class otherwise # the first item was a class name string. this trick allows # the use of copy constructors. bless \$id, ref $class || $class } else { # not much we can do without a query... return false # to the caller. print STDERR 'Bogus call to DBlink::prepare, no sql'; 0 } } # we may want to just execute a statement and then discard the # statement handle. quick fix: if the first argument isn't # a blessed reference then assume it's a sql statement and # generate a one-time statement handle to execute it. sub execute { my $item = shift; # if the first item isn't a reference then it's a package name, # treat this as a constructor, manufacture a temporary item from # sql in the second argument and keep going. when $id goes out # of scope we'll dump the item and its statement handle. my $id = ref $item ? $item : prepare( $item, @_ ); # if we have just prepared the item then assume @_ contained only # arguments used for prepare and discard it. @_ =() unless ref $item; # at this point $queryz{$$id} is guaranteed to exist. my $qry = $queryz{$$id}; my $verbose = $qry->{arg}{verbose} ||= 0; # printing the entire SQL each time it's run leads to waaay too # much output. using verbose > 1 writes the stuff as necessary. if( $verbose ) { local $, = "\n\t"; print STDERR "\nExecuting: $$id"; print STDERR @_ ? join "\n\t", "\nWith args:", @_ : "\tWithout Args\n"; print STDERR "\n\n$qry->{sql}" if $verbose > 1; } # caller gets back an empty array if anything blows up or they've # requested "nofetch" mode. my $rowz = []; eval { # attempt to open the database connection. this is # delayed until we really need a statement handle. $dbh ||= DBI->connect( @dbhfieldz ); # prepare the query if it isn't already, save the # statement handle for future use, execute the query # and return all of the rows as a reference or an # array. # # this will either set $rows to a true value or raise # an exception. my $sth = $qry->{sth} ||= $dbh->prepare( $qry->{sql} ); my $t0 = Benchmark->new if $verbose; $sth->execute( @_ ); print STDERR "\n$$id Time: ", +( timestr(timediff(Benchmark->new, $t0)) ) if $verbose; # don't bother to grab the rows if we've been asked not # to. this might cause problems if people call this w/ # something that does return rows and turn on nofetch -- # probably should cancel the query of the statememtn handle # shows any rows outstanding. $rowz = $sth->fetchall_arrayref unless $qry->{arg}{nofetch}; }; # since we are not dealing with ad-hoc queries there is # no reason any of them should fail. this should, however, # use CGI::Carp in the real version. for now this will # suffice. croak "Roadkill: $@\n\n$qry->{sql}\n" if $@; print STDERR "\n$$id rows:", scalar @$rowz, "\n" if $verbose && !$qry->{arg}{nofetch}; # in some situations -- mainly the drilldown reports -- it's simpler # to get back the results as a hash. fix is to interleave the # field names and values here, replacing the array-of-arrays with # an array-of-hashes on the way through. # # beware of running this w/ the query-from-hell since field names # tend to be long and this expands quickly in memory... if( @$rowz && $qry->{arg}{hashref} ) { my @namz = @{ fields $id }; # replace the array ref in each row with a hash ref. # this works because $row is an alias for each item in # the $rowz array. for my $row ( @$rowz ) { $row = { map { ( $namz[$_], $row->[$_] ) } (0..$#namz) }; } } # some queries are intended to return one row only, # break the fields up at this level -- saves a level # of de-referencing for the caller. # # other queries return a single column. # this pretty much assumes that the caller didn't # also use hashref. # # these can also be used together to extract a single # value for testing, e.g., my( $foo ) = $a->execute( $b ) # will grab a single value as the first column from the # first row. # # note: the order here is important. dereferecing the # first item after running the map for onecol will replace # the array ref w/ a scalar. # # note: this will loose data if the query returns # more than one row. it also has to be done after # the hashref argument is processed. # # note: these depend on having data in $rowz. if there # is no data then these assign undef. $rowz = $rowz->[0] || [] if( $qry->{arg}{onerow} ); @$rowz = map { $_->[0] } @$rowz if $qry->{arg}{onecol}; # hand back the result as array or reference depending on # the called context. wantarray ? @$rowz : $rowz } # let's assume that the list of fields in the database won't change # during normal operation. /. caching it won't hurt. if a query # with hashref set is used frequently this will save some time over # calling $qry->{sth}->names a zillion times. sub fields { my $id = shift; my $qry = $queryz{$$id} or croak "Bogus query id $$id"; my $sth = $qry->{sth} ||= $dbh->prepare( $qry->{sql} ); $queryz{$$id}->{fields} ||= $sth->{NAME_lc}; wantarray ? @{ $qry->{fields} } : $qry->{fields} } # pass anything we don't know how to the statement handle. # if it doesn't know about the thing then give up. sub AUTOLOAD { no strict; my ( $sub ) = $AUTOLOAD =~ m{.*::(.+)} or croak "AUTOLOAD '$AUTOLOAD' with no detectable subroutine"; my $id = shift; # whatever it was, pass it along to the statement handle. my $qry = $queryz{$$id}; $qry->{sth} ||= $dbh->prepare( $qry->{sql} ); $qry->{sth}->$$sub( @_ ) } # since the statement handle isn't actually stored in our object # we need to explicitly zap the hash entry if the id scalar goes # out of scope. # # destroying the hash entry will cascade into whatever destructors # are pending for any objects we have stored there (e.g., sth}. sub DESTROY { my $id = shift; if( my $qry = $queryz{$$id} ) { print STDERR "DBlink: dropping $$id ($qry->{sql})\n" if $qry->{arg}{verbose}; delete $queryz{$$id}; } } 1 __END__ --------------1D2C51D58BDCB0E9E32CC0A8 Content-Type: text/plain; charset=us-ascii; name="generate-db" Content-Transfer-Encoding: 7bit Content-Disposition: inline; filename="generate-db" #!/usr/local/bin/perl -w =pod =head1 Name generate-db =head1 Synopsis process ts_* files to create schemas, tablespaces or their contents. generate-db {--schema|--space|--table} [--debug] [--zap] [--date=partition-start-date] [--days=partition-calendar-days] [--minutes=partition_size] [--filesize=dbf-size-in-MB] one of --schema, --space or --table is required. =head1 Description eval's each input file, which populates the hash reference "$sql" as a side effect. the contents of $sql include keys for: drop, make, cleanup, create and post. the values for drop/make contain SQL or shell commands to remove or create the tablespace; values for cleanup/create are used to remove/add items within the tablespace (tables, views, indexes, etc); post commands are processed after all creation is complete and normally contain any inserts required for reference tables. switches: =over 4 =item --schema --space --table drop and regeneate the schema (userid), tablespace or contents respectively. there is no default, one of the items is required. =item --debug don't execute commands, just display them. default is to execute commands. =item --zap don't create anything, just remove it (processes drop, cleanup only). default is to perform creates also. =item --date start date for partitioning. this can be given in any date that UnixDate (see Date::Manip) can understand. YYYYMMDD, YYYY-MM-DD or dd-Mmm-YYYY being popular ones. default is the current day. =item --days number of calendar days to partition. default is one. =item --minutes number of minutes per partition, must divide evenly into a full day (i.e., 1440 % minutes = 0). default is a full day (1440 minutes). =item --filesize starting dbf file size. this is not used by most of the ts_* files since they specify the size explicitly. the default is 4 (i.e., database files are created with 4MB space, max space of 512MB). =back =over 4 =item + This is designed to be called from esh. An example .env file for this would be: { ORACLE_SID => 'viewerp', ORACLE_HOST => 'oraview', ORACLE_USER => 'tdrdw', ORACLE_HOME => '/ora0000/app/oracle/product/8.1.6', CALL_TABLE_DIR => '/ora4000/oradata/xyx /ora5000/oradata/xyx /ora6000/oradata/xyx /ora7000/oradata/xyx /ora8000/oradata/xyx /ora9000/oradata/xyx /ora10000/oradata/xyx /ora11000/oradata/xyx', CALL_INDEX_DIR => '/ora18000/oradata/xyx /ora19000/oradata/xyx /ora20000/oradata/xyx /ora21000/oradata/xyx', MESG_TABLE_DIR => '/ora12000/oradata/xyx /ora13000/oradata/xyx /ora14000/oradata/xyx /ora15000/oradata/xyx /ora16000/oradata/xyx /ora17000/oradata/xyx', MESG_INDEX_DIR => '/ora18000/oradata/xyx /ora19000/oradata/xyx /ora20000/oradata/xyx /ora21000/oradata/xyx', LKUP_TABLE_DIR => '/ora2000/oradata/xyx', LKUP_INDEX_DIR => '/ora21000/oradata/xyx', } <<<<<<< generate-db ======= ORACLE_SID => 'viewerp', ORACLE_HOST => 'oraview', ORACLE_USER => 'tdrdw', ORACLE_HOME => '/ora0000/app/oracle/product/8.1.6', # message tables and the lookups have only primary keys which # live in the same tablespace as the data. call_event has # other indexes which live in another single tablespace. CALL_TABLE_DIR => '/ora4000/demo /ora5000/demo /ora6000/demo /ora7000/demo /ora8000/demo /ora9000/demo /ora10000/demo /ora11000/demo', CALL_INDEX_DIR => '/ora18000/demo /ora19000/demo /ora20000/demo /ora21000/demo', MSG_TABLE_DIR => '/ora12000/demo /ora13000/demo /ora14000/demo /ora15000/demo /ora16000/demo /ora17000/demo', LKUP_TABLE_DIR => '/ora2000/demo', } >>>>>>> 1.17 =over 4 ORACLE_SID, ORACLE_HOST, ORACLE_USER & ORACLE_HOME are used by generate-db. =item - ORACLE_USER is created as necessary if --schema is used. =item - ORACLE_HOST should probably be an interface on the local system for --schema or --space operations due to filesystem commands used to clean up datafiles. =item - ORACLE_SID and ORACLE_HOME have their normal meanings for oracle. =back =item + *_DIR varaibles in the environment are used by the ts_* files to split up datafiles. they aren't used by generate-db per-se but it's probably helpful to have them in the same host environemnt file. =back =head1 Author Knightsbridge Solutions. =head1 Copyright (C) 2001 Knightsbridge Solutions. All rights reserved. =head1 Known Bugs =head1 Section Notes =cut ######################################################################## # housekeeping ######################################################################## use strict; # newline output after each print, use newline-tab as list # separator. $\ = "\n"; $, = "\n\t"; # put everything to stderr, saves extra entrys in print. select STDERR; # CPAN modules use Carp; use DBI; use DBD::Oracle; use Getopt::Long; use Date::Manip; # homegrown modules use tdrdw 'dblink'; use DBlink; ######################################################################## # deal with the command line ######################################################################## my @optionz = qw( space! table! schema! zap! debug! minutes=i days=i date=s filedir=s filesize=i ); my $cmdline = {}; unless( GetOptions($cmdline, @optionz) ) { print STDERR <{space} || $cmdline->{table} || $cmdline->{schema}; croak "ORACLE_USER missing in environment" unless my $user = $ENV{ORACLE_USER}; croak "No sql file arguments on command line!" unless @ARGV; $cmdline->{space} ||= $cmdline->{schema}; $cmdline->{table} ||= $cmdline->{space}; my $nocreate = $cmdline->{zap}; # default is a 4MB initial file size for all .dbf's. my $filesize = $cmdline->{filesize} || 4; $filesize .= 'M'; my $debugmode = $cmdline->{debug} || 0; my $days = $cmdline->{days} || 1; my $date = $cmdline->{date} || 'today'; my $min = $cmdline->{minutes} || 1440; ######################################################################## # globals ######################################################################## <<<<<<< generate-db # this should go away and be replaced by esh-defined variables. my $defdir = '/ora21000/oradata/viewerp'; ======= >>>>>>> 1.17 <<<<<<< generate-db # DBI->connect parameters ======= # database handle used for the work via $dbh->do() in most cases. >>>>>>> 1.17 my $connect = qq{dbi:Oracle:host=$ENV{ORACLE_HOST};sid=$ENV{ORACLE_SID}}; # database connection as the user for this schema. # this begins life defined but false, the connection will be made # once someone actually uses a statement. my $dbh = 0; ######################################################################## # subroutines ######################################################################## =head3 munge_table generate sql substituted for PARTLINZ and insert it into the table generation code. partitioning creates N+2 partitions, N for the date broken up into partition-minutes units of time one for "before" one for "after". the default calendar date is today. partitions are named like: partition p%Y%m%d_%H%M values less than ( %s ) tablespace TABLESPACE, available tablespaces are discovered by querying v$tablespaces for names like "${user}_${tablespacename}%" in uppercase. this allows the code to handle multiple schemas w/o the underlying tablespace code knowing about it. =head3 replacing PARTLINZ the token \bPARTLINZ\b in all liens of the table sql (i.e., @{ $table{create} }) are replaced with the joined lines. the syntax: s/\bPARTLINZ\b/join "\n\t", .../eg treats the replacement as perl code and runs it wherever there is a match ("e") and handles the replacemnt globally ("g"). the global is probably overkill since we will normally only be dealing with a single statement at one time, but this may change over time as the program is expanded to create multiple tables. newline-tabs make it look nicer on printout and make debugging a bit easier. =cut sub munge_table { my $tablespace = shift; my $tablesql = shift; if( grep /PARTLINZ/, @$tablesql ) { # we won't generate buckets which don't add up to a full # day (e.g., 7 minutes). croak "Invalid partition interval: $min" if 1440 % $min; my @tspacz = DBlink->execute( qq{select name from v\$tablespace where name like '\U${user}%$tablespace\%\E'}, onecol => 1, verbose => 1 ) or croak "no tablespaces matching $tablespace"; # convert the date and offset to internal format # time begins at the "before" bucket cutoff time and advances # by minutes through $after. note that the partitioning # scheme uses strictly less than, so a final partition of # "tomororw" will contain today's data. my $time = DateCalc "$date 00:00:00"; my $after = DateCalc $time, "+ $days days"; my $offset = ParseDateDelta "+ $min min" ; # all of the items from prior less than after are all less than # comparisons. the MAXVALUE is an overflow bucket. # # the default of 1440 will generate punder as date-00:00:00, # p0000 as the only data bucket for the date and pover as anything # after date-23:59:59.99999 (note the use of strictly less than # in the bucket comparisons). my @linz = ( "\n" ); my $partformat = q{ partition p%Y%m%d_%H%M values less than ( %s ) tablespace TABLESPACE, }; my $i = 0; for ( ; Date_Cmp( $time, $after ) < 1 ; $time = DateCalc( $time, $offset), ++$i ) { # compute the new time by adding the offset and add a partition # line for that time. the partition is "pHHMM" (i.e., p and # the military time of the partition cutoff period). ( my $fmt = $partformat ) =~ s/TABLESPACE/$tspacz[$i % @tspacz]/eg; push @linz, UnixDate $time, $fmt; } push @linz, 'partition p9999 values less than ( MAXVALUE ) tablespace ' . $tspacz[$i % @tspacz]; print 'Partitions:', @linz; # replace PARTLINZ with the accumulated list of partitions. s/PARTLINZ/join "\n\t", @linz/eo && print "\nUpdated PARTLINZ:\n$_" for( @$tablesql ); } # handle hard-coded tablespace entrys used for non-partition # tables and indexes. s/(tablespace\s+)(?!$user)/$1${user}_/oi for @$tablesql; # return the reference for daisychaining. $tablesql } <<<<<<< generate-db =head3 munge_tablespace takes an array reference (e.g., $sql->{drop}) and modifies the contents for the current schema & environment. tablespaces are hard-coded in the ts_* files. step here is to replace tablespaces that aren't followed by a schama with the schema. this is where s/(tablespace\s+)(?!$user)/$1${user}_/oi && print "\nUpdated tablespace: \n$_"; comes from, trick is to locate all occasions of the word tablepsace followed by white space followed by something other than the username -- (?!X) is a perl regex operator for not-followed-by -- and add the matched portion (the word tablespace and whitespace) followed by the user. =cut ======= >>>>>>> 1.17 sub munge_tablespace { my $sql = shift; <<<<<<< generate-db for( @$sql ) { ======= >>>>>>> 1.17 <<<<<<< generate-db s/(tablespace\s+)(?!$user)/$1${user}_/oi && print "\nUpdated tablespace: \n$_"; } ======= s/(tablespace\s+)(?!$user)/$1${user}_/oi && print "\nUpdated tablespace: \n$_" for( @$sql ); >>>>>>> 1.17 # pass back the reference for daisy-chaninging. $sql } sub douser { my $user = $ENV{ORACLE_USER} or croak 'Roadkill: missing ORACLE_USER'; my $pass = $ENV{ORACLE_PASS} || $user; my @adminconnect = DBlink::connectfieldz qw( user admin pass nimda ); eval { my $admin = DBI->connect( @adminconnect ); # step one: get rid of any existing user. if this # fails for any reason other than the user not existing # we cannot proceed to add the user and we croak. $admin->{RaiseError} = 1; print "Dropping user $user..."; eval { $admin->do( qq{drop user $user cascade} ) }; !$@ || $@ =~ /ORA-01918/ or croak "Failed dropping $user: $@"; if( $nocreate ) { # attempt to create the userid w/ dba access. $admin->do( qq{create user $user identified by $user} ); $admin->do( qq{grant connect, dba to $user} ); } }; croak "Roadkill: new user create failed: $@" if $@; if( $nocreate ) { print STDERR "Finsihed removing $user"; exit 0 } # at this point the new user exists. 0 } # process the munged lines to execute the cleanup/create # operations. oracle's sqlplus syntax allows use of # 'host' or '!' to indicate that the command should be # run by the shell. so... anything beginningn with # host or ! is pushed through qx, the rest is processed # via dbi through oracle. # # note: the eval only does something if AutoRaise is # true; otherwise the caller will probably have to check # for errors via $dbh -- or most likely ignore them. sub dosql { while ( @_ ) { my $item = shift; $dbh->{RaiseError} = shift; # less work to convert the occasional text item into # an anon array than to expand potentially large lists # multiple times... # # could also use: # # for( ref $item ? @$item : $item ) # # but for now this makes debugging a bit easier. my $sqlz = ref $item ? $item : [ $item ]; SQL: for ( @$sqlz ) { # ignore blank lines, though there shouldn't be any # at this point... unless( $_ ) { print "dosql: Oddity: blank statement in source SQL"; next SQL; } # remove extra newlines -- the blank lines # seem to upset oracle's parser. s/^\s+//; s/\s+$//; s/\n+/\n/gs; s/\n\t+/\n\t/gs; unless( $_ ) { print "dosql: Oddity: blank statement after whitespace cleanup."; next SQL; } print "Doing:\n$_\n"; # if we find a ! or "host" at the beginning, pass the # next command through the shell -- use $1 from the # regex as the item to process. if( m{^(?:host|!)\b(.+)}i ) { # we will survive any of these, regardless of # any error setting passed in. # # note: avoid things that run regexen between # the m{}, above and this line. print $debugmode ? 'Shell: $1' : qx( $1 ); } elsif( $_ ) { # given value passed in for RaiseError we will # either ignore errors or croak on them. print $debugmode ? "SQL: $_" : eval{ $dbh->do( $_ ) }; croak "Failed: $@" if $@; } else { print "Oddity: empty line in input sql"; } } } 0 } ######################################################################## # real work begins here ######################################################################## # create a new user if the user asked us to and we aren't # zapping an existing one. douser if $cmdline->{schema} && !$cmdline->{zap}; # we only get this far if --zap wasn't used. # at this point either $user exists or --schema should have # been used. my @connect = DBlink::connectfieldz ( user => $user, pass => $user ); $dbh = DBI->connect( @connect ) or croak "Roadkill: no dbi connection as $user"; # command line arguments are list of files containg per-tablespace # code to generate the results. # # job here is to walk down the list of files, read each one and # process the table & -space generation in each. tablespaces # seem like the best bet here since their contents are created # together. # # if we separate indexes out into seprate tablespaces then the # sets of tablespaces might be housed in one file or kept # separate and processed sequentially via the command line. TABLESPACE: for( @ARGV ) { # cycle looks like: # # contents of the file are slurped whole. # $sql is set to false. # the input file is opened. # we eval the file contents. # check $sql for a non-false value. # # if we are still alive after the check -- i.e., $sql isn't # false -- then we have a reasonale chance of running. # # re-assigning $file is only done to make debugging easier, # the file handle is cleaned up either way. local $/ = undef; my $sql = ''; open my $file, "< $_" or croak "$_ : $!"; <<<<<<< generate-db eval <$file>; ======= $file = <$file>; >>>>>>> 1.17 <<<<<<< generate-db ======= eval $file; >>>>>>> 1.17 croak "no \$sql assigned in $_" unless $sql; # make the process happen: # # drop the tables -- saves problems to clean them up # here where accidental cross-tablespace entrys # may get cleaned up automatically. # # if we are creating the space then drop it and # then remake it. the "--zap" switch skips the # subsequent creation at each level. # # create the new tables and run any post-processing # necessary. difference between post-proc and main # table operations is that the latter are munged to # account for the tablespace names and partitions. # # note: some itemz may lack cleanup, create or # post sections if they only create a tablespace # or don't have any post-processing (e.g., inserting # data into lookup tables). # # if we are still alive after the drop & make, run the # creates then run post processing. post-processing # ignores errors to allow for simple default values via # insert which may cause primary key collisions. # # post is run after the creates since we can't post # process something that didn't get created... dosql $sql->{cleanup} => 0 if $sql->{cleanup}; if( $cmdline->{space} ) { <<<<<<< generate-db doit ( munge_tablespace $sql->{drop} => 0 ); ======= dosql ( ( munge_tablespace $sql->{drop} ) => 0 ); >>>>>>> 1.17 <<<<<<< generate-db next TABLESPACE if $nocreate; ======= next TABLESPACE if( $nocreate ); >>>>>>> 1.17 <<<<<<< generate-db doit ( ( munge_tablespace $sql->{make} ) => 1 ); ======= # if we get this far then we aren't zapping anything # and need to create the tablespace. dosql ( ( munge_tablespace $sql->{make} ) => 1 ); >>>>>>> 1.17 } # always run the create and post portions unless # we are zapping. dosql ( ( munge_table $sql->{tsname}, $sql->{create} ) => 1, $sql->{post} => 0, ) if $sql->{create} || $sql->{post}; } # keep the shell happy 0 __END__