I have a python script which runs at the begining of each day using Task Scheduler, read a constantly growing log files (text files) and insert data into Postgresql DB. New log file generated each day. The approximate size of each log is 1GB.
- Platform: Windows 7, I don't prefer it usually but I have to this time)
- RAM: 32GB
I searched on tunning PostgreSQL for handling heavy I/O and here is what I modified:
shared_buffers: 8GB 
work_mem: 100 MB 
maintenance_work_mem: 512 MB 
checkpoint_segments: 100 
checkpoint_timepot: 1hr 
synchronous_commit = off 
full_page_writes = off 
fsync = off
The script written to read log file line by line and insert into DB:
 import psycopg2 as psycopg
    try:
      connectStr = "dbname='postgis20' user='postgres' password='' host='localhost'"
      cx = psycopg.connect(connectStr)
      cu = cx.cursor()
      logging.info("connected to DB")
    except:
      logging.error("could not connect to the database")
import time
file = open('textfile.log', 'r')
while 1:
    where = file.tell()
    line = file.readline()
    if not line:
        time.sleep(1)
        file.seek(where)
    else:
        print line, # already has newline
        dodecode(line)
def dodecode(fields):
   global cx
   from time import strftime, gmtime
   from calendar import timegm
   import os
   msg = fields.split(',')
   part = eval(msg[2])
   msgnum = int(msg[3:6])
   print "message#:", msgnum
   print fields
   if (part==1):
     if msgnum==1:
       msg1 = msg_1.decode(bv)
       #print "message1 :",msg1
       Insert(msgnum,time,msg1)
     elif msgnum==2:
       msg2 = msg_2.decode(bv)
       #print "message2 :",msg2
       Insert(msgnum,time,msg2)
     elif msgnum==3:
     ....
     ....
     ....    
def Insert(msgnum,time,msg):
 global cx
 try:    
         if msgnum in [1,2,3]:   
          if msg['type']==0:
            cu.execute("INSERT INTO table1 ( messageid, timestamp, userid, position, text ) SELECT "+str(msgnum)+", '"+time+"', "+str(msg['UserID'])+", ST_GeomFromText('POINT("+str(float(msg['longitude']), '"+text+"')+" "+str(float(msg['latitude']))+")']))+"  WHERE NOT EXISTS (SELECT * FROM table1 WHERE timestamp='"+time+"' AND text='"+text+"';")      
            cu.execute("INSERT INTO table2 ( field1,field2,field3, time_stamp, pos,) SELECT "+str(msg['UserID'])+","+str(int(msg['UserName']))+","+str(int(msg['UserIO']))+", '"+time+"', ST_GeomFromText('POINT("+str(float(msg['longitude']))+" "+str(float(msg['latitude']))+")')," WHERE NOT EXISTS (SELECT * FROM table2 WHERE field1="+str(msg['UserID'])+");")
            cu.execute("Update table2 SET field3='"+str(int(msg['UserIO']))+"',time_stamp='"+str(time)+"',pos=ST_GeomFromText('POINT("+str(float(msg['longitude']))+" "+str(float(msg['latitude']))+")'),"' WHERE field1='"+str(msg['UserID'])+"' AND time_stamp < '"+str(time)+"';")
          elif msg['type']==1:
            cu.execute("INSERT INTO table1 ( messageid, timestamp, userid, position, text ) SELECT "+str(msgnum)+", '"+time+"', "+str(msg['UserID'])+", ST_GeomFromText('POINT("+str(float(msg['longitude']), '"+text+"')+" "+str(float(msg['latitude']))+")']))+"  WHERE NOT EXISTS (SELECT * FROM table1 WHERE timestamp='"+time+"' AND text='"+text+"';")    
            cu.execute("INSERT INTO table2 ( field1,field2,field3, time_stamp, pos,) SELECT "+str(msg['UserID'])+","+str(int(msg['UserName']))+","+str(int(msg['UserIO']))+", '"+time+"', ST_GeomFromText('POINT("+str(float(msg['longitude']))+" "+str(float(msg['latitude']))+")')," WHERE NOT EXISTS (SELECT * FROM table2 WHERE field1="+str(msg['UserID'])+");")
            cu.execute("Update table2 SET field3='"+str(int(msg['UserIO']))+"',time_stamp='"+str(time)+"',pos=ST_GeomFromText('POINT("+str(float(msg['longitude']))+" "+str(float(msg['latitude']))+")'),"' WHERE field1='"+str(msg['UserID'])+"' AND time_stamp < '"+str(time)+"';")
          elif msg['type']==2:
        ....
        ....
        ....
The problem is, after 6 hours of running the script, it olny inserted 5 minutes of the file data! I suspect the data is streaming into log file as chunks instead of lines, but I really can't figure out how to fix this to make it more like real-time data in the DB.
 
     
    