Perhaps you could use something like this (we use when batch updating a table with +20 million rows and don't want to hog the replication).
import sys
import psycopg2
from datetime import datetime
firstid = 0
splitsize = 50 # Size of each batch
# Complicated query
query_complex = """
CREATE TEMP TABLE tmptmp AS
SELECT * FROM schema.massive_table
"""
# Query to be run at intervals
query = """
SELECT * FROM tmptmp WHERE id BETWEEN %(startid)s AND %(endid)s
"""
conn = psycopg2.connect("dbname=database_name user=postgres")
curs = conn.cursor()
# Run complicated query
curs.execute(query_complex)
# Get highest id
curs.execute("SELECT max(id) FROM tmptmp")
maxid = curs.fetchall()[0][0]
print "Max id: %s" % maxid
for startid in range(firstid, maxid, splitsize):
endid = startid + splitsize - 1
print "%s: Running query on range %s to %s" % (datetime.now(), startid, endid)
curs.execute(query, {'startid':startid, 'endid':endid})
print "%s: Affected rows: %s. Total completed: %s%%" % (datetime.now(), curs.rowcount, round((endid * 100) / maxid, 3))
print "Done."
The output that follows:
Max id: 308
2010-06-18 11:59:11.271000: Running query on range 0 to 49
2010-06-18 11:59:11.271000: Affected rows: 49. Total completed: 15.0%
2010-06-18 11:59:11.271000: Running query on range 50 to 99
2010-06-18 11:59:11.271000: Affected rows: 50. Total completed: 32.0%
2010-06-18 11:59:11.271000: Running query on range 100 to 149
2010-06-18 11:59:11.271000: Affected rows: 50. Total completed: 48.0%
2010-06-18 11:59:11.271000: Running query on range 150 to 199
2010-06-18 11:59:11.271000: Affected rows: 49. Total completed: 64.0%
2010-06-18 11:59:11.271000: Running query on range 200 to 249
2010-06-18 11:59:11.271000: Affected rows: 42. Total completed: 80.0%
2010-06-18 11:59:11.271000: Running query on range 250 to 299
2010-06-18 11:59:11.318000: Affected rows: 3. Total completed: 97.0%
2010-06-18 11:59:11.318000: Running query on range 300 to 349
2010-06-18 11:59:11.318000: Affected rows: 1. Total completed: 113.0%
Done.
// John