2 # Eloipool - Python Bitcoin pool server
3 # Copyright (C) 2011-2013 Luke Dashjr <luke-jr+eloipool@utopios.org>
4 # Portions written by Peter Leurs <kinlo@triplemining.com>
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 argparser = argparse.ArgumentParser()
22 argparser.add_argument('-c', '--config', help='Config name to load from config_<ARG>.py')
23 args = argparser.parse_args()
25 if not args.config is None:
26 configmod = 'config_%s' % (args.config,)
28 config = importlib.import_module(configmod)
30 if not hasattr(config, 'ServerName'):
31 config.ServerName = 'Unnamed Eloipool'
33 if not hasattr(config, 'ShareTarget'):
34 config.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
38 import logging.handlers
40 rootlogger = logging.getLogger(None)
41 logformat = getattr(config, 'LogFormat', '%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')
42 logformatter = logging.Formatter(logformat)
43 if len(rootlogger.handlers) == 0:
55 'Waker for JSONRPCServer',
56 'Waker for StratumServer',
59 logging.getLogger(infoOnly).setLevel(logging.INFO)
60 if getattr(config, 'LogToSysLog', False):
61 sysloghandler = logging.handlers.SysLogHandler(address = '/dev/log')
62 rootlogger.addHandler(sysloghandler)
63 if hasattr(config, 'LogFile'):
64 if isinstance(config.LogFile, str):
65 filehandler = logging.FileHandler(config.LogFile)
67 filehandler = logging.handlers.TimedRotatingFileHandler(**config.LogFile)
68 filehandler.setFormatter(logformatter)
69 rootlogger.addHandler(filehandler)
71 def RaiseRedFlags(reason):
72 logging.getLogger('redflag').critical(reason)
76 from bitcoin.node import BitcoinLink, BitcoinNode
77 bcnode = BitcoinNode(config.UpstreamNetworkId)
78 bcnode.userAgent += b'Eloipool:0.1/'
79 bcnode.newBlock = lambda blkhash: MM.updateMerkleTree()
84 import jsonrpc.authproxy
85 jsonrpc.authproxy.USER_AGENT = 'Eloipool/0.1'
90 from bitcoin.script import BitcoinScript
91 from bitcoin.txn import Txn
92 from base58 import b58decode
93 from binascii import b2a_hex
94 from struct import pack
98 def makeCoinbaseTxn(coinbaseValue, useCoinbaser = True, prevBlockHex = None):
101 if useCoinbaser and hasattr(config, 'CoinbaserCmd') and config.CoinbaserCmd:
104 cmd = config.CoinbaserCmd
105 cmd = cmd.replace('%d', str(coinbaseValue))
106 cmd = cmd.replace('%p', prevBlockHex or '""')
107 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
108 nout = int(p.stdout.readline())
109 for i in range(nout):
110 amount = int(p.stdout.readline())
111 addr = p.stdout.readline().rstrip(b'\n').decode('utf8')
112 pkScript = BitcoinScript.toAddress(addr)
113 txn.addOutput(amount, pkScript)
116 coinbased = coinbaseValue + 1
117 if coinbased >= coinbaseValue:
118 logging.getLogger('makeCoinbaseTxn').error('Coinbaser failed!')
121 coinbaseValue -= coinbased
123 pkScript = BitcoinScript.toAddress(config.TrackerAddr)
124 txn.addOutput(coinbaseValue, pkScript)
127 # TODO: red flag on dupe coinbase
131 import jsonrpc_getwork
132 from util import Bits2Target
142 server.wakeLongpoll()
143 stratumsrv.updateJob()
146 global MM, networkTarget, server
147 bits = MM.currentBlock[2]
151 networkTarget = Bits2Target(bits)
152 if MM.lastBlock != (None, None, None):
155 jsonrpc_getwork._CheckForDupesHACK = {}
157 server.wakeLongpoll(wantClear=True)
158 stratumsrv.updateJob(wantClear=True)
161 from time import sleep, time
164 def _WorkLogPruner_I(wl):
168 userwork = wl[username]
169 for wli in tuple(userwork.keys()):
170 if now > userwork[wli][1] + 120:
173 WorkLogPruner.logger.debug('Pruned %d jobs' % (pruned,))
175 def WorkLogPruner(wl):
181 WorkLogPruner.logger.error(traceback.format_exc())
182 WorkLogPruner.logger = logging.getLogger('WorkLogPruner')
185 from merklemaker import merkleMaker
187 MM.__dict__.update(config.__dict__)
188 MM.makeCoinbaseTxn = makeCoinbaseTxn
189 MM.onBlockChange = blockChanged
190 MM.onBlockUpdate = updateBlocks
193 from binascii import b2a_hex
194 from copy import deepcopy
195 from math import ceil, log
196 from merklemaker import MakeBlockHeader
197 from struct import pack, unpack
199 from time import time
200 from util import PendingUpstream, RejectedShare, bdiff1target, dblsha, LEhash2int, swap32, target2bdiff, target2pdiff
205 if hasattr(config, 'GotWorkURI'):
206 gotwork = jsonrpc.ServiceProxy(config.GotWorkURI)
208 if not hasattr(config, 'DelayLogForUpstream'):
209 config.DelayLogForUpstream = False
211 if not hasattr(config, 'DynamicTargetting'):
212 config.DynamicTargetting = 0
214 if not hasattr(config, 'DynamicTargetWindow'):
215 config.DynamicTargetWindow = 120
216 config.DynamicTargetGoal *= config.DynamicTargetWindow / 60
218 def submitGotwork(info):
220 gotwork.gotwork(info)
222 checkShare.logger.warning('Failed to submit gotwork\n' + traceback.format_exc())
224 if not hasattr(config, 'GotWorkTarget'):
225 config.GotWorkTarget = 0
227 def clampTarget(target, DTMode):
228 # ShareTarget is the minimum
229 if target is None or target > config.ShareTarget:
230 target = config.ShareTarget
232 # Never target above upstream(s), as we'd lose blocks
233 target = max(target, networkTarget, config.GotWorkTarget)
236 # Ceil target to a power of two :)
237 truebits = log(target, 2)
238 if target <= 2**int(truebits):
239 # Workaround for bug in Python's math.log function
240 truebits = int(truebits)
241 target = 2**ceil(truebits) - 1
243 # Round target to multiple of bdiff 1
244 target = bdiff1target / int(round(target2bdiff(target)))
246 # Return None for ShareTarget to save memory
247 if target == config.ShareTarget:
251 def getTarget(username, now, DTMode = None, RequestedTarget = None):
253 DTMode = config.DynamicTargetting
256 if username in userStatus:
257 status = userStatus[username]
259 # No record, use default target
260 RequestedTarget = clampTarget(RequestedTarget, DTMode)
261 userStatus[username] = [RequestedTarget, now, 0]
262 return RequestedTarget
263 (targetIn, lastUpdate, work) = status
264 if work <= config.DynamicTargetGoal:
265 if now < lastUpdate + config.DynamicTargetWindow and (targetIn is None or targetIn >= networkTarget):
266 # No reason to change it just yet
267 return clampTarget(targetIn, DTMode)
269 # No shares received, reset to minimum
271 getTarget.logger.debug("No shares from %s, resetting to minimum target" % (repr(username),))
272 userStatus[username] = [None, now, 0]
273 return clampTarget(None, DTMode)
275 deltaSec = now - lastUpdate
276 target = targetIn or config.ShareTarget
277 target = int(target * config.DynamicTargetGoal * deltaSec / config.DynamicTargetWindow / work)
278 target = clampTarget(target, DTMode)
279 if target != targetIn:
280 pfx = 'Retargetting %s' % (repr(username),)
281 tin = targetIn or config.ShareTarget
282 getTarget.logger.debug("%s from: %064x (pdiff %s)" % (pfx, tin, target2pdiff(tin)))
283 tgt = target or config.ShareTarget
284 getTarget.logger.debug("%s to: %064x (pdiff %s)" % (pfx, tgt, target2pdiff(tgt)))
285 userStatus[username] = [target, now, 0]
287 getTarget.logger = logging.getLogger('getTarget')
289 def TopTargets(n = 0x10):
290 tmp = list(k for k, v in userStatus.items() if not v[0] is None)
291 tmp.sort(key=lambda k: -userStatus[k][0])
295 tmp2[t] = target2pdiff(t)
298 tgt = userStatus[k][0]
299 print('%-34s %064x %3d' % (k, tgt, t2d(tgt)))
301 def RegisterWork(username, wli, wld, RequestedTarget = None):
303 target = getTarget(username, now, RequestedTarget=RequestedTarget)
304 wld = tuple(wld) + (target,)
305 workLog.setdefault(username, {})[wli] = (wld, now)
306 return target or config.ShareTarget
308 def getBlockHeader(username):
311 hdr = MakeBlockHeader(MRD)
312 workLog.setdefault(username, {})[merkleRoot] = (MRD, time())
313 target = RegisterWork(username, merkleRoot, MRD)
314 return (hdr, workLog[username][merkleRoot], target)
316 def getBlockTemplate(username, p_magic = None, RequestedTarget = None):
317 if server.tls.wantClear:
319 elif p_magic and username not in workLog:
324 MC = MM.getMC(wantClear)
325 (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
326 wliPos = coinbase[0] + 2
327 wliLen = coinbase[wliPos - 1]
328 wli = coinbase[wliPos:wliPos+wliLen]
329 target = RegisterWork(username, wli, MC, RequestedTarget=RequestedTarget)
330 return (MC, workLog[username][wli], target)
332 def getStratumJob(jobid, wantClear = False):
333 MC = MM.getMC(wantClear)
334 (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
336 workLog.setdefault(None, {})[jobid] = (MC, now)
337 return (MC, workLog[None][jobid])
339 def getExistingStratumJob(jobid):
340 wld = workLog[None][jobid]
349 from bitcoin.varlen import varlenEncode, varlenDecode
351 from merklemaker import assembleBlock
353 if not hasattr(config, 'BlockSubmissions'):
354 config.BlockSubmissions = None
357 def blockSubmissionThread(payload, blkhash, share):
358 if config.BlockSubmissions is None:
359 servers = list(a for b in MM.TemplateSources for a in b)
361 servers = list(config.BlockSubmissions)
363 if hasattr(share['merkletree'], 'source_uri'):
365 'access': jsonrpc.ServiceProxy(share['merkletree'].source_uri),
366 'name': share['merkletree'].source,
369 servers = list(a for b in MM.TemplateSources for a in b)
371 myblock = (blkhash, payload[4:36])
372 payload = b2a_hex(payload).decode('ascii')
379 UpstreamBitcoindJSONRPC = TS['access']
381 # BIP 22 standard submitblock
382 reason = UpstreamBitcoindJSONRPC.submitblock(payload)
383 except BaseException as gbterr:
384 gbterr_fmt = traceback.format_exc()
387 # bitcoind 0.5/0.6 getmemorypool
388 reason = UpstreamBitcoindJSONRPC.getmemorypool(payload)
390 # Old BIP 22 draft getmemorypool
391 reason = UpstreamBitcoindJSONRPC.getmemorypool(payload, {})
394 elif reason is False:
396 except BaseException as gmperr:
399 # FIXME: This will show "Method not found" on pre-BIP22 servers
400 RaiseRedFlags(gbterr_fmt)
402 if MM.currentBlock[0] not in myblock and tries > len(servers):
403 RBFs.append( (('next block', MM.currentBlock, now, (gbterr, gmperr)), payload, blkhash, share) )
404 RaiseRedFlags('Giving up on submitting block to upstream \'%s\'' % (TS['name'],))
405 if share['upstreamRejectReason'] is PendingUpstream:
406 share['upstreamRejectReason'] = 'GAVE UP'
407 share['upstreamResult'] = False
414 # At this point, we have a reason back
416 # FIXME: The returned value could be a list of multiple responses
417 msg = 'Upstream \'%s\' block submission failed: %s' % (TS['name'], reason,)
418 if success and reason in ('stale-prevblk', 'bad-prevblk', 'orphan', 'duplicate'):
420 blockSubmissionThread.logger.debug(msg)
422 RBFs.append( (('upstream reject', reason, time()), payload, blkhash, share) )
425 blockSubmissionThread.logger.debug('Upstream \'%s\' accepted block' % (TS['name'],))
427 if share['upstreamRejectReason'] is PendingUpstream:
428 share['upstreamRejectReason'] = reason
429 share['upstreamResult'] = not reason
431 blockSubmissionThread.logger = logging.getLogger('blockSubmission')
433 def checkData(share):
436 (prevBlock, height, bits) = MM.currentBlock
437 sharePrevBlock = data[4:36]
438 if sharePrevBlock != prevBlock:
439 if sharePrevBlock == MM.lastBlock[0]:
440 raise RejectedShare('stale-prevblk')
441 raise RejectedShare('bad-prevblk')
443 if data[72:76] != bits:
444 raise RejectedShare('bad-diffbits')
446 # Note that we should accept miners reducing version to 1 if they don't understand 2 yet
447 # FIXME: When the supermajority is upgraded to version 2, stop accepting 1!
448 if data[1:4] != b'\0\0\0' or data[0] > 2:
449 raise RejectedShare('bad-version')
451 def buildStratumData(share, merkleroot):
452 (prevBlock, height, bits) = MM.currentBlock
457 data += share['ntime'][::-1]
459 data += share['nonce'][::-1]
464 def IsJobValid(wli, wluser = None):
465 if wluser not in workLog:
467 if wli not in workLog[wluser]:
469 (wld, issueT) = workLog[wluser][wli]
470 if time() < issueT - 120:
474 def checkShare(share):
475 shareTime = share['time'] = time()
477 username = share['username']
483 if username not in workLog:
484 raise RejectedShare('unknown-user')
485 MWL = workLog[username]
487 shareMerkleRoot = data[36:68]
488 if 'blkdata' in share:
489 pl = share['blkdata']
490 (txncount, pl) = varlenDecode(pl)
491 cbtxn = bitcoin.txn.Txn(pl)
492 othertxndata = cbtxn.disassemble(retExtra=True)
493 coinbase = cbtxn.getCoinbase()
494 wliPos = coinbase[0] + 2
495 wliLen = coinbase[wliPos - 1]
496 wli = coinbase[wliPos:wliPos+wliLen]
500 wli = shareMerkleRoot
508 buildStratumData(share, b'\0' * 32)
514 raise RejectedShare('unknown-work')
515 (wld, issueT) = MWL[wli]
518 share['issuetime'] = issueT
520 (workMerkleTree, workCoinbase) = wld[1:3]
521 share['merkletree'] = workMerkleTree
523 cbtxn = deepcopy(workMerkleTree.data[0])
524 coinbase = workCoinbase + share['extranonce1'] + share['extranonce2']
525 cbtxn.setCoinbase(coinbase)
527 data = buildStratumData(share, workMerkleTree.withFirst(cbtxn))
528 shareMerkleRoot = data[36:68]
530 if data in DupeShareHACK:
531 raise RejectedShare('duplicate')
532 DupeShareHACK[data] = None
534 blkhash = dblsha(data)
535 if blkhash[28:] != b'\0\0\0\0':
536 raise RejectedShare('H-not-zero')
537 blkhashn = LEhash2int(blkhash)
540 logfunc = getattr(checkShare.logger, 'info' if blkhashn <= networkTarget else 'debug')
541 logfunc('BLKHASH: %64x' % (blkhashn,))
542 logfunc(' TARGET: %64x' % (networkTarget,))
544 # NOTE: this isn't actually needed for MC mode, but we're abusing it for a trivial share check...
545 txlist = workMerkleTree.data
546 txlist = [deepcopy(txlist[0]),] + txlist[1:]
548 cbtxn.setCoinbase(coinbase or workCoinbase)
551 if blkhashn <= networkTarget:
552 logfunc("Submitting upstream")
553 RBDs.append( deepcopy( (data, txlist, share.get('blkdata', None), workMerkleTree, share, wld) ) )
555 payload = assembleBlock(data, txlist)
557 payload = share['data']
558 if len(othertxndata):
559 payload += share['blkdata']
561 payload += assembleBlock(data, txlist)[80:]
562 logfunc('Real block payload: %s' % (b2a_hex(payload).decode('utf8'),))
564 threading.Thread(target=blockSubmissionThread, args=(payload, blkhash, share)).start()
565 bcnode.submitBlock(payload)
566 if config.DelayLogForUpstream:
567 share['upstreamRejectReason'] = PendingUpstream
569 share['upstreamRejectReason'] = None
570 share['upstreamResult'] = True
571 MM.updateBlock(blkhash)
574 if gotwork and blkhashn <= config.GotWorkTarget:
576 coinbaseMrkl = cbtxn.data
577 coinbaseMrkl += blkhash
578 steps = workMerkleTree._steps
579 coinbaseMrkl += pack('B', len(steps))
582 coinbaseMrkl += b"\0\0\0\0"
584 info['hash'] = b2a_hex(blkhash).decode('ascii')
585 info['header'] = b2a_hex(data).decode('ascii')
586 info['coinbaseMrkl'] = b2a_hex(coinbaseMrkl).decode('ascii')
587 thr = threading.Thread(target=submitGotwork, args=(info,))
591 checkShare.logger.warning('Failed to build gotwork request')
593 if 'target' in share:
594 workTarget = share['target']
600 if workTarget is None:
601 workTarget = config.ShareTarget
602 if blkhashn > workTarget:
603 raise RejectedShare('high-hash')
604 share['target'] = workTarget
605 share['_targethex'] = '%064x' % (workTarget,)
607 shareTimestamp = unpack('<L', data[68:72])[0]
608 if shareTime < issueT - 120:
609 raise RejectedShare('stale-work')
610 if shareTimestamp < shareTime - 300:
611 raise RejectedShare('time-too-old')
612 if shareTimestamp > shareTime + 7200:
613 raise RejectedShare('time-too-new')
615 if config.DynamicTargetting and username in userStatus:
616 # NOTE: userStatus[username] only doesn't exist across restarts
617 status = userStatus[username]
618 target = status[0] or config.ShareTarget
619 if target == workTarget:
620 userStatus[username][2] += 1
622 userStatus[username][2] += float(target) / workTarget
626 cbpreLen = len(cbpre)
627 if coinbase[:cbpreLen] != cbpre:
628 raise RejectedShare('bad-cb-prefix')
630 # Filter out known "I support" flags, to prevent exploits
631 for ff in (b'/P2SH/', b'NOP2SH', b'p2sh/CHV', b'p2sh/NOCHV'):
632 if coinbase.find(ff) > max(-1, cbpreLen - len(ff)):
633 raise RejectedShare('bad-cb-flag')
635 if len(coinbase) > 100:
636 raise RejectedShare('bad-cb-length')
638 if shareMerkleRoot != workMerkleTree.withFirst(cbtxn):
639 raise RejectedShare('bad-txnmrklroot')
641 if len(othertxndata):
642 allowed = assembleBlock(data, txlist)[80:]
643 if allowed != share['blkdata']:
644 raise RejectedShare('bad-txns')
645 checkShare.logger = logging.getLogger('checkShare')
648 if '_origdata' in share:
649 share['solution'] = share['_origdata']
651 share['solution'] = b2a_hex(swap32(share['data'])).decode('utf8')
652 for i in loggersShare:
655 def checkAuthentication(username, password):
656 # HTTPServer uses bytes, and StratumServer uses str
657 if hasattr(username, 'decode'): username = username.decode('utf8')
658 if hasattr(password, 'decode'): password = password.decode('utf8')
660 for i in authenticators:
661 if i.checkAuthentication(username, password):
665 def receiveShare(share):
666 # TODO: username => userid
669 except RejectedShare as rej:
670 share['rejectReason'] = str(rej)
672 except BaseException as e:
673 share['rejectReason'] = 'ERROR'
676 if not share.get('upstreamRejectReason', None) is PendingUpstream:
679 def newBlockNotification():
680 logging.getLogger('newBlockNotification').info('Received new block notification')
681 MM.updateMerkleTree()
682 # TODO: Force RESPOND TO LONGPOLLS?
685 def newBlockNotificationSIGNAL(signum, frame):
686 # Use a new thread, in case the signal handler is called with locks held
687 thr = threading.Thread(target=newBlockNotification, name='newBlockNotification via signal %s' % (signum,))
691 from signal import signal, SIGUSR1
692 signal(SIGUSR1, newBlockNotificationSIGNAL)
700 from time import sleep
703 if getattr(config, 'SaveStateFilename', None) is None:
704 config.SaveStateFilename = 'eloipool.worklog'
707 logger = logging.getLogger('stopServers')
709 if hasattr(stopServers, 'already'):
710 logger.debug('Already tried to stop servers before')
712 stopServers.already = True
714 logger.info('Stopping servers...')
715 global bcnode, server
716 servers = (bcnode, server, stratumsrv)
723 logger.error('Failed to stop server %s\n%s' % (s, traceback.format_exc()))
729 sl.append(s.__class__.__name__)
734 logger.error('Servers taking too long to stop (%s), giving up' % (', '.join(sl)))
739 for fd in s._fd.keys():
743 for i in loggersShare:
744 if hasattr(i, 'stop'):
747 def saveState(SAVE_STATE_FILENAME, t = None):
748 logger = logging.getLogger('saveState')
750 # Then, save data needed to resume work
751 logger.info('Saving work state to \'%s\'...' % (SAVE_STATE_FILENAME,))
755 with open(SAVE_STATE_FILENAME, 'wb') as f:
757 pickle.dump(DupeShareHACK, f)
758 pickle.dump(workLog, f)
763 logger.error('Failed to save work\n' + traceback.format_exc())
765 os.unlink(SAVE_STATE_FILENAME)
767 logger.error(('Failed to unlink \'%s\'; resume may have trouble\n' % (SAVE_STATE_FILENAME,)) + traceback.format_exc())
773 saveState(config.SaveStateFilename, t=t)
774 logging.getLogger('exit').info('Goodbye...')
775 os.kill(os.getpid(), signal.SIGTERM)
782 saveState(config.SaveStateFilename, t=t)
783 logging.getLogger('restart').info('Restarting...')
785 os.execv(sys.argv[0], sys.argv)
787 logging.getLogger('restart').error('Failed to exec\n' + traceback.format_exc())
789 def restoreState(SAVE_STATE_FILENAME):
790 if not os.path.exists(SAVE_STATE_FILENAME):
793 global workLog, DupeShareHACK
795 logger = logging.getLogger('restoreState')
796 s = os.stat(SAVE_STATE_FILENAME)
797 logger.info('Restoring saved state from \'%s\' (%d bytes)' % (SAVE_STATE_FILENAME, s.st_size))
799 with open(SAVE_STATE_FILENAME, 'rb') as f:
803 # Future formats, not supported here
807 # Old format, from 2012-02-02 to 2012-02-03
812 if isinstance(t, dict):
813 # Old format, from 2012-02-03 to 2012-02-03
817 # Current format, from 2012-02-03 onward
818 DupeShareHACK = pickle.load(f)
820 if t + 120 >= time():
821 workLog = pickle.load(f)
823 logger.debug('Skipping restore of expired workLog')
825 logger.error('Failed to restore state\n' + traceback.format_exc())
827 logger.info('State restored successfully')
829 logger.info('Total downtime: %g seconds' % (time() - t,))
832 from jsonrpcserver import JSONRPCListener, JSONRPCServer
833 import interactivemode
834 from networkserver import NetworkListener
837 import authentication
838 from stratumserver import StratumServer
841 if __name__ == "__main__":
842 if not hasattr(config, 'ShareLogging'):
843 config.ShareLogging = ()
844 if hasattr(config, 'DbOptions'):
845 logging.getLogger('backwardCompatibility').warn('DbOptions configuration variable is deprecated; upgrade to ShareLogging var before 2013-03-05')
846 config.ShareLogging = list(config.ShareLogging)
847 config.ShareLogging.append( {
849 'engine': 'postgres',
850 'dbopts': config.DbOptions,
851 'statement': "insert into shares (rem_host, username, our_result, upstream_result, reason, solution) values ({Q(remoteHost)}, {username}, {YN(not(rejectReason))}, {YN(upstreamResult)}, {rejectReason}, decode({solution}, 'hex'))",
853 for i in config.ShareLogging:
854 if not hasattr(i, 'keys'):
856 logging.getLogger('backwardCompatibility').warn('Using short-term backward compatibility for ShareLogging[\'%s\']; be sure to update config before 2012-04-04' % (name,))
857 if name == 'postgres':
860 'engine': 'postgres',
861 'dbopts': parameters,
863 elif name == 'logfile':
865 i['thropts'] = parameters
866 if 'filename' in parameters:
867 i['filename'] = parameters['filename']
868 i['thropts'] = dict(i['thropts'])
869 del i['thropts']['filename']
877 fp, pathname, description = imp.find_module(name, sharelogging.__path__)
878 m = imp.load_module(name, fp, pathname, description)
879 lo = getattr(m, name)(**parameters)
880 loggersShare.append(lo)
882 logging.getLogger('sharelogging').error("Error setting up share logger %s: %s", name, sys.exc_info())
884 if not hasattr(config, 'Authentication'):
885 config.Authentication = ({'module': 'allowall'},)
887 for i in config.Authentication:
891 fp, pathname, description = imp.find_module(name, authentication.__path__)
892 m = imp.load_module(name, fp, pathname, description)
893 lo = getattr(m, name)(**parameters)
894 authenticators.append(lo)
896 logging.getLogger('authentication').error("Error setting up authentication module %s: %s", name, sys.exc_info())
899 if not hasattr(config, 'BitcoinNodeAddresses'):
900 config.BitcoinNodeAddresses = ()
901 for a in config.BitcoinNodeAddresses:
902 LSbc.append(NetworkListener(bcnode, a))
904 if hasattr(config, 'UpstreamBitcoindNode') and config.UpstreamBitcoindNode:
905 BitcoinLink(bcnode, dest=config.UpstreamBitcoindNode)
907 import jsonrpc_getblocktemplate
908 import jsonrpc_getwork
909 import jsonrpc_setworkaux
911 server = JSONRPCServer()
912 server.tls = threading.local()
913 server.tls.wantClear = False
914 if hasattr(config, 'JSONRPCAddress'):
915 logging.getLogger('backwardCompatibility').warn('JSONRPCAddress configuration variable is deprecated; upgrade to JSONRPCAddresses list before 2013-03-05')
916 if not hasattr(config, 'JSONRPCAddresses'):
917 config.JSONRPCAddresses = []
918 config.JSONRPCAddresses.insert(0, config.JSONRPCAddress)
920 for a in config.JSONRPCAddresses:
921 LS.append(JSONRPCListener(server, a))
922 if hasattr(config, 'SecretUser'):
923 server.SecretUser = config.SecretUser
924 server.aux = MM.CoinbaseAux
925 server.getBlockHeader = getBlockHeader
926 server.getBlockTemplate = getBlockTemplate
927 server.receiveShare = receiveShare
928 server.RaiseRedFlags = RaiseRedFlags
929 server.ShareTarget = config.ShareTarget
930 server.checkAuthentication = checkAuthentication
932 if hasattr(config, 'TrustedForwarders'):
933 server.TrustedForwarders = config.TrustedForwarders
934 server.ServerName = config.ServerName
936 stratumsrv = StratumServer()
937 stratumsrv.getStratumJob = getStratumJob
938 stratumsrv.getExistingStratumJob = getExistingStratumJob
939 stratumsrv.receiveShare = receiveShare
940 stratumsrv.RaiseRedFlags = RaiseRedFlags
941 stratumsrv.getTarget = getTarget
942 stratumsrv.defaultTarget = config.ShareTarget
943 stratumsrv.IsJobValid = IsJobValid
944 stratumsrv.checkAuthentication = checkAuthentication
945 if not hasattr(config, 'StratumAddresses'):
946 config.StratumAddresses = ()
947 for a in config.StratumAddresses:
948 NetworkListener(stratumsrv, a)
952 restoreState(config.SaveStateFilename)
954 prune_thr = threading.Thread(target=WorkLogPruner, args=(workLog,))
955 prune_thr.daemon = True
958 bcnode_thr = threading.Thread(target=bcnode.serve_forever)
959 bcnode_thr.daemon = True
962 stratum_thr = threading.Thread(target=stratumsrv.serve_forever)
963 stratum_thr.daemon = True
966 server.serve_forever()