リビジョン | ffa1d14723109168280a26e10532cd95d1789c61 (tree) |
---|---|
日時 | 2010-04-09 11:18:24 |
作者 | ![]() |
コミッター | Kei Funagayama |
Tuning Logging
@@ -1,8 +1,8 @@ | ||
1 | 1 | [loggers] |
2 | -keys=root,pysilhouette,pysilhouette_trace,sqlalchemy.engine | |
2 | +keys=root,pysilhouette,pysilhouette_trace,sqlalchemy.engine,sqlalchemy.pool,sqlalchemy.orm,pysilhouette.asynscheduler,pysilhouette.asynperformer | |
3 | 3 | |
4 | 4 | [handlers] |
5 | -keys=default,pysilhouette,pysilhouette_trace,sqlalchemy.engine | |
5 | +keys=default,pysilhouette,pysilhouette_trace,sqlalchemy.engine,sqlalchemy.pool,sqlalchemy.orm,pysilhouette.asynscheduler,pysilhouette.asynperformer | |
6 | 6 | |
7 | 7 | [formatters] |
8 | 8 | keys=default,common |
@@ -36,24 +36,69 @@ class=handlers.RotatingFileHandler | ||
36 | 36 | formatter=common |
37 | 37 | args=('/var/log/pysilhouette/sql.log', 'a', (5 *1024 *1024), 5) |
38 | 38 | |
39 | +[handler_sqlalchemy.pool] | |
40 | +class=handlers.RotatingFileHandler | |
41 | +formatter=common | |
42 | +args=('/var/log/pysilhouette/sql.log', 'a', (5 *1024 *1024), 5) | |
43 | + | |
44 | +[handler_sqlalchemy.orm] | |
45 | +class=handlers.RotatingFileHandler | |
46 | +formatter=common | |
47 | +args=('/var/log/pysilhouette/sql.log', 'a', (5 *1024 *1024), 5) | |
48 | + | |
49 | +[handler_pysilhouette.asynscheduler] | |
50 | +class=handlers.RotatingFileHandler | |
51 | +formatter=common | |
52 | +args=('/var/log/pysilhouette/application.log', 'a', (5 *1024 *1024), 5) | |
53 | + | |
54 | +[handler_pysilhouette.asynperformer] | |
55 | +class=handlers.RotatingFileHandler | |
56 | +formatter=common | |
57 | +args=('/var/log/pysilhouette/application.log', 'a', (5 *1024 *1024), 5) | |
58 | + | |
39 | 59 | [logger_root] |
40 | 60 | level=ERROR |
41 | 61 | handlers=default |
42 | 62 | |
43 | 63 | [logger_pysilhouette] |
44 | -level=DEBUG | |
64 | +level=INFO | |
45 | 65 | handlers=pysilhouette |
46 | 66 | propagate=0 |
47 | 67 | qualname=pysilhouette |
48 | 68 | |
49 | 69 | [logger_pysilhouette_trace] |
50 | -level=DEBUG | |
70 | +level=INFO | |
51 | 71 | handlers=pysilhouette_trace |
52 | 72 | propagate=0 |
53 | 73 | qualname=pysilhouette_trace |
54 | 74 | |
55 | 75 | [logger_sqlalchemy.engine] |
56 | -level=DEBUG | |
76 | +level=WARNING | |
57 | 77 | handlers=sqlalchemy.engine |
58 | 78 | propagate=0 |
59 | 79 | qualname=sqlalchemy.engine |
80 | + | |
81 | +[logger_sqlalchemy.pool] | |
82 | +level=WARNING | |
83 | +handlers=sqlalchemy.pool | |
84 | +propagate=0 | |
85 | +qualname=sqlalchemy.pool | |
86 | + | |
87 | +[logger_sqlalchemy.orm] | |
88 | +level=INFO | |
89 | +handlers=sqlalchemy.orm | |
90 | +propagate=0 | |
91 | +qualname=sqlalchemy.orm | |
92 | + | |
93 | +[logger_pysilhouette.asynscheduler] | |
94 | +level=WARNING | |
95 | +handlers=pysilhouette.asynscheduler | |
96 | +propagate=0 | |
97 | +qualname=pysilhouette.asynscheduler | |
98 | + | |
99 | +[logger_pysilhouette.asynperformer] | |
100 | +level=WARNING | |
101 | +handlers=pysilhouette.asynperformer | |
102 | +propagate=0 | |
103 | +qualname=pysilhouette.asynperformer | |
104 | + |
@@ -71,19 +71,9 @@ class AsynPerformer(ER): | ||
71 | 71 | finally: |
72 | 72 | fp.close() |
73 | 73 | |
74 | - self.logger.info('Received code from the FIFO file. - code=%s' % code) | |
74 | + #self.logger.info('Received code from the FIFO file. - code=%s' % code) | |
75 | 75 | session = self.db.get_session() |
76 | 76 | |
77 | - # TODO:dummy data | |
78 | - #dummy_set_job(self.cf, | |
79 | - # int(self.cf['asynperformer.thread.pool.size']), | |
80 | - # 'echo "aaaaaa"', | |
81 | - # 'echo "bbbbb"', | |
82 | - # 'echo "cccc"', | |
83 | - # #'serial', | |
84 | - # 'parallel', | |
85 | - # self.db) | |
86 | - | |
87 | 77 | # Pending JobGroup search |
88 | 78 | if self.cf['asynperformer.thread.pool.size'] <= tq.now_alive(): |
89 | 79 | continue |
@@ -93,30 +83,27 @@ class AsynPerformer(ER): | ||
93 | 83 | int(self.cf['asynperformer.thread.pool.size']) - tq.now_alive()) |
94 | 84 | |
95 | 85 | session.close() |
96 | - | |
97 | - self.logger.info('Queued the Job Group from the database. - Number of JobGroup=%d' \ | |
98 | - % len(m_jgs)) | |
99 | - | |
100 | - self.logger.debug('filo code=%s, cf asynperformer.mkfifo.start.code=%s' % (code, self.cf["asynperformer.mkfifo.start.code"])) | |
86 | + #self.logger.info('Queued the Job Group from the database. - Number of JobGroup=%d' % len(m_jgs)) | |
87 | + #self.logger.debug('filo code=%s, cf asynperformer.mkfifo.start.code=%s' % (code, self.cf["asynperformer.mkfifo.start.code"])) | |
88 | + self.logger.info('Activity Information. - [fifo_code=%s, type=serial, jobgroup_num=%d]' % (code, len(m_jgs))) | |
101 | 89 | if code == self.cf["asynperformer.mkfifo.start.code"]: |
102 | 90 | if 0 < len(m_jgs): |
103 | 91 | for m_jg in m_jgs: |
104 | 92 | try: |
105 | - # thread worker!! start | |
106 | - tq.put(ThreadWorker(self.cf, self.db, m_jg.id)) | |
93 | + tq.put(ThreadWorker(self.cf, self.db, m_jg.id)) # thread worker!! start | |
107 | 94 | except Exception, e: |
108 | - self.logger.info('Failed to perform the job group. Exceptions are not expected. - jobgroup_id=%d : %s' | |
95 | + self.logger.debug('Failed to perform the job group. Exceptions are not expected. - jobgroup_id=%d : %s' | |
109 | 96 | % (m_jg.id, str(e.args))) |
110 | 97 | print >>sys.stderr, traceback.format_exc() |
111 | 98 | t_logger = logging.getLogger('pysilhouette_traceback') |
112 | 99 | t_logger.error(traceback.format_exc()) |
113 | 100 | else: |
114 | - self.logger.info('No Job Group.') | |
101 | + self.logger.debug('No Job Group.') | |
115 | 102 | elif code == self.cf["asynperformer.mkfifo.stop.code"]: |
116 | - self.logger.info('Received stop code from the FIFO file. - code=%s' % code) | |
103 | + self.logger.warning('Received stop code from the FIFO file. - code=%s' % code) | |
117 | 104 | break |
118 | 105 | else: |
119 | - self.logger.info('Received illegal code from the FIFO file. - code=%s' % code) | |
106 | + self.logger.warning('Received illegal code from the FIFO file. - code=%s' % code) | |
120 | 107 | |
121 | 108 | def sigterm_handler(signum, frame): |
122 | 109 | logger = logging.getLogger('pysilhouette.asynperformer') |
@@ -78,9 +78,8 @@ class AsynScheduler(ER): | ||
78 | 78 | |
79 | 79 | def sigterm_handler(signum, frame): |
80 | 80 | logger = logging.getLogger('pysilhouette.asynscheduler.signal') |
81 | - logger.info('Stop the asynschedulerd with signal- pid=%s, signal=%s' % (os.getpid(), signum)) | |
81 | + logger.warning('Stop the asynschedulerd with signal- pid=%s, signal=%s' % (os.getpid(), signum)) | |
82 | 82 | |
83 | - | |
84 | 83 | def main(): |
85 | 84 | (opts, args) = getopts() |
86 | 85 | if chkopts(opts) is True: |
@@ -116,7 +115,7 @@ def main(): | ||
116 | 115 | finally: |
117 | 116 | if opts.daemon is True and os.path.isfile(opts.pidfile): |
118 | 117 | os.unlink(opts.pidfile) |
119 | - logger.info('Process file has been deleted.. - pidfile=%s' % opts.pidfile) | |
118 | + logger.warning('Process file has been deleted.. - pidfile=%s' % opts.pidfile) | |
120 | 119 | |
121 | 120 | return PROCERROR |
122 | 121 |
@@ -158,6 +158,7 @@ def observer(opts, cf): | ||
158 | 158 | |
159 | 159 | try: |
160 | 160 | while True: |
161 | + simple_log = [] | |
161 | 162 | # Performer |
162 | 163 | if not pf.poll() is None: |
163 | 164 | logger.debug('return code=%d' % pf.returncode) |
@@ -168,7 +169,8 @@ def observer(opts, cf): | ||
168 | 169 | logger.info('performer : [start] - pid=%s, count=%s/%s' |
169 | 170 | % (pf.pid, count, cf['observer.restart.count'])) |
170 | 171 | else: |
171 | - logger.info('performer [running] - pid=%s, count=%s/%s' | |
172 | + simple_log.append('performer (running) - count=%s/%s' % (count, cf['observer.restart.count'])) | |
173 | + logger.debug('performer [running] - pid=%s, count=%s/%s' | |
172 | 174 | % (pf.pid, count, cf['observer.restart.count'])) |
173 | 175 | |
174 | 176 | # Scheduler |
@@ -181,7 +183,8 @@ def observer(opts, cf): | ||
181 | 183 | logger.info('scheduler : [start] - pid=%s, count=%s/%s' |
182 | 184 | % (sd.pid, count, cf['observer.restart.count'])) |
183 | 185 | else: |
184 | - logger.info('scheduler [running] - pid=%s, count=%s/%s' | |
186 | + simple_log.append('scheduler (running) - count=%s/%s' % (count, cf['observer.restart.count'])) | |
187 | + logger.debug('scheduler [running] - pid=%s, count=%s/%s' | |
185 | 188 | % (sd.pid, count, cf['observer.restart.count'])) |
186 | 189 | |
187 | 190 | # AsynPerformer |
@@ -194,7 +197,8 @@ def observer(opts, cf): | ||
194 | 197 | logger.info('asynperformer : [start] - pid=%s, count=%s/%s' |
195 | 198 | % (asynpf.pid, count, cf['observer.restart.count'])) |
196 | 199 | else: |
197 | - logger.info('asynperformer [running] - pid=%s, count=%s/%s' | |
200 | + simple_log.append('asynperformer (running) - count=%s/%s' % (count, cf['observer.restart.count'])) | |
201 | + logger.debug('asynperformer [running] - pid=%s, count=%s/%s' | |
198 | 202 | % (asynpf.pid, count, cf['observer.restart.count'])) |
199 | 203 | |
200 | 204 | # AsynScheduler |
@@ -207,9 +211,12 @@ def observer(opts, cf): | ||
207 | 211 | logger.info('asynscheduler : [start] - pid=%s, count=%s/%s' |
208 | 212 | % (asynsd.pid, count, cf['observer.restart.count'])) |
209 | 213 | else: |
210 | - logger.info('asynscheduler [running] - pid=%s, count=%s/%s' | |
214 | + simple_log.append('asynscheduler (running) - count=%s/%s' % ( count, cf['observer.restart.count'])) | |
215 | + logger.debug('asynscheduler [running] - pid=%s, count=%s/%s' | |
211 | 216 | % (asynsd.pid, count, cf['observer.restart.count'])) |
212 | 217 | |
218 | + logger.info(str(simple_log)[1:-1]) | |
219 | + | |
213 | 220 | # status output |
214 | 221 | status(count, status_count, default_count, False) |
215 | 222 |
@@ -53,52 +53,23 @@ def create_database(cf): | ||
53 | 53 | db = Database(cf['database.url'], |
54 | 54 | encoding="utf-8", |
55 | 55 | convert_unicode=True, |
56 | - #assert_unicode='warn', # DEBUG | |
57 | - #echo = opts.verbose, | |
58 | - #echo_pool = opts.verbose, | |
59 | - echo=True, # TODO | |
60 | - echo_pool=True # TODO | |
61 | 56 | ) |
62 | 57 | else: |
63 | 58 | if int(cf['database.pool.status']) == 1: |
64 | 59 | db = Database(cf['database.url'], |
65 | 60 | encoding="utf-8", |
66 | 61 | convert_unicode=True, |
67 | - #assert_unicode='warn', # DEBUG | |
68 | 62 | poolclass=QueuePool, |
69 | 63 | pool_size=int(cf['database.pool.size']), |
70 | 64 | max_overflow=int(cf['database.pool.max.overflow']), |
71 | - #echo = opts.verbose, | |
72 | - #echo_pool = opts.verbose, | |
73 | - echo=True, # TODO | |
74 | - echo_pool=True # TODO | |
75 | 65 | ) |
76 | 66 | else: |
77 | 67 | db = Database(cf['database.url'], |
78 | 68 | encoding="utf-8", |
79 | 69 | convert_unicode=True, |
80 | - #assert_unicode='warn', # DEBUG | |
81 | 70 | poolclass=SingletonThreadPool, |
82 | 71 | pool_size=int(cf['database.pool.size']), |
83 | - #echo = opts.verbose, | |
84 | - #echo_pool = opts.verbose, | |
85 | - echo=True, # TODO | |
86 | - echo_pool=True # TODO | |
87 | 72 | ) |
88 | - """ | |
89 | - db = Database(cf['database.url'], | |
90 | - encoding="utf-8", | |
91 | - convert_unicode=True, | |
92 | - #assert_unicode='warn', # DEBUG | |
93 | - poolclass=SingletonThreadPool, | |
94 | - pool_size=int(cf['database.pool.size']), | |
95 | - #echo = opts.verbose, | |
96 | - #echo_pool = opts.verbose, | |
97 | - echo=True, # TODO | |
98 | - echo_pool=True, # TODO | |
99 | - #strategy='threadlocal' | |
100 | - ) | |
101 | - """ | |
102 | 73 | if db is None: |
103 | 74 | raise SilhouetteDBException('Initializing a database error - "Database" failed to create.') |
104 | 75 | reload_mappers(db.get_metadata()) |
@@ -61,14 +61,12 @@ class Performer(ER): | ||
61 | 61 | finally: |
62 | 62 | fp.close() |
63 | 63 | |
64 | - self.logger.info('Received code from the FIFO file. - code=%s' % code) | |
64 | + #self.logger.info('Received code from the FIFO file. - code=%s' % code) | |
65 | 65 | session = self.db.get_session() |
66 | 66 | m_jgs = jobgroup_findbytype_status(session, JOBGROUP_TYPE['SERIAL']) |
67 | 67 | session.close() |
68 | - | |
69 | - self.logger.info('Queued the Job Group from the database. - Number of JobGroup=%d' \ | |
70 | - % len(m_jgs)) | |
71 | - | |
68 | + #self.logger.info('Queued the Job Group from the database. - Number of JobGroup=%d' % len(m_jgs)) | |
69 | + self.logger.info('Activity Information. - [fifo_code=%s, type=serial, jobgroup_num=%d]' % (code, len(m_jgs))) | |
72 | 70 | if code == self.cf["performer.mkfifo.start.code"]: |
73 | 71 | if 0 < len(m_jgs): |
74 | 72 | for m_jg in m_jgs: |
@@ -93,12 +91,13 @@ class Performer(ER): | ||
93 | 91 | t_logger.error(traceback.format_exc()) |
94 | 92 | |
95 | 93 | else: |
96 | - self.logger.info('No Job Group.') | |
94 | + #self.logger.info('No Job Group.') | |
95 | + pass | |
97 | 96 | elif code == self.cf["performer.mkfifo.stop.code"]: |
98 | - self.logger.info('Received stop code from the FIFO file. - code=%s' % code) | |
97 | + self.logger.warning('Received stop code from the FIFO file. - code=%s' % code) | |
99 | 98 | return PROCSUCCESS |
100 | 99 | else: |
101 | - self.logger.info('Received illegal code from the FIFO file. - code=%s' % code) | |
100 | + self.logger.warning('Received illegal code from the FIFO file. - code=%s' % code) | |
102 | 101 | |
103 | 102 | # -- |
104 | 103 | def sigterm_handler(signum, frame): |
@@ -62,8 +62,8 @@ class Scheduler(ER): | ||
62 | 62 | fp = open(self.cf["%s.mkfifo.path" % entity], 'w') |
63 | 63 | try: |
64 | 64 | fp.write(self.cf['%s.mkfifo.start.code' % entity]) |
65 | - self.logger.info('Start code was written. - file=%s : code=%s' | |
66 | - % (self.cf["%s.mkfifo.path" % entity], self.cf['%s.mkfifo.start.code' % entity])) | |
65 | + #self.logger.info('Start code was written. - file=%s : code=%s' % (self.cf["%s.mkfifo.path" % entity], self.cf['%s.mkfifo.start.code' % entity])) | |
66 | + self.logger.info('Activity Information. - [fifo code=%s]' % (self.cf['%s.mkfifo.start.code' % entity])) | |
67 | 67 | finally: |
68 | 68 | fp.close() |
69 | 69 |
@@ -78,7 +78,7 @@ class Scheduler(ER): | ||
78 | 78 | |
79 | 79 | def sigterm_handler(signum, frame): |
80 | 80 | logger = logging.getLogger('pysilhouette.scheduler.signal') |
81 | - logger.info('Stop the schedulerd with signal- pid=%s, signal=%s' % (os.getpid(), signum)) | |
81 | + logger.warning('Stop the schedulerd with signal- pid=%s, signal=%s' % (os.getpid(), signum)) | |
82 | 82 | |
83 | 83 | |
84 | 84 | def main(): |
@@ -116,7 +116,7 @@ def main(): | ||
116 | 116 | finally: |
117 | 117 | if opts.daemon is True and os.path.isfile(opts.pidfile): |
118 | 118 | os.unlink(opts.pidfile) |
119 | - logger.info('Process file has been deleted.. - pidfile=%s' % opts.pidfile) | |
119 | + logger.warning('Process file has been deleted.. - pidfile=%s' % opts.pidfile) | |
120 | 120 | |
121 | 121 | return PROCERROR |
122 | 122 |
@@ -189,7 +189,7 @@ class SimpleWorker(Worker): | ||
189 | 189 | self._cf = cf |
190 | 190 | self._db = db |
191 | 191 | self._jobgroup_id = jobgroup_id |
192 | - self.logger = logging.getLogger('pysilhouette.performer.simpleworker') | |
192 | + self.logger = logging.getLogger('pysilhouette.worker.simpleworker') | |
193 | 193 | |
194 | 194 | def _action(self, session, m_jobs): |
195 | 195 | ret = True |
@@ -361,7 +361,7 @@ class ThreadWorker(threading.Thread, SimpleWorker): | ||
361 | 361 | self._db = db |
362 | 362 | |
363 | 363 | def run(self): |
364 | - self.logger = logging.getLogger('pysilhouette.performer.threadworker') | |
364 | + self.logger = logging.getLogger('pysilhouette.worker.threadworker') | |
365 | 365 | try: |
366 | 366 | self.process() |
367 | 367 | except Exception, e: |