2 # Eloipool - Python Bitcoin pool server
3 # Copyright (C) 2011-2013 Luke Dashjr <luke-jr+eloipool@utopios.org>
4 # Portions written by Peter Leurs <kinlo@triplemining.com>
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 argparser = argparse.ArgumentParser()
22 argparser.add_argument('-c', '--config', help='Config name to load from config_<ARG>.py')
23 args = argparser.parse_args()
25 if not args.config is None:
26 configmod = 'config_%s' % (args.config,)
28 config = importlib.import_module(configmod)
30 if not hasattr(config, 'ServerName'):
31 config.ServerName = 'Unnamed Eloipool'
33 if not hasattr(config, 'ShareTarget'):
34 config.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
38 import logging.handlers
40 rootlogger = logging.getLogger(None)
41 logformat = getattr(config, 'LogFormat', '%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')
42 logformatter = logging.Formatter(logformat)
43 if len(rootlogger.handlers) == 0:
55 'Waker for JSONRPCServer',
56 'Waker for StratumServer',
59 logging.getLogger(infoOnly).setLevel(logging.INFO)
60 if getattr(config, 'LogToSysLog', False):
61 sysloghandler = logging.handlers.SysLogHandler(address = '/dev/log')
62 rootlogger.addHandler(sysloghandler)
63 if hasattr(config, 'LogFile'):
64 if isinstance(config.LogFile, str):
65 filehandler = logging.FileHandler(config.LogFile)
67 filehandler = logging.handlers.TimedRotatingFileHandler(**config.LogFile)
68 filehandler.setFormatter(logformatter)
69 rootlogger.addHandler(filehandler)
71 def RaiseRedFlags(reason):
72 logging.getLogger('redflag').critical(reason)
76 from bitcoin.node import BitcoinLink, BitcoinNode
77 bcnode = BitcoinNode(config.UpstreamNetworkId)
78 bcnode.userAgent += b'Eloipool:0.1/'
79 bcnode.newBlock = lambda blkhash: MM.updateMerkleTree()
84 import jsonrpc.authproxy
85 jsonrpc.authproxy.USER_AGENT = 'Eloipool/0.1'
90 from bitcoin.script import BitcoinScript
91 from bitcoin.txn import Txn
92 from base58 import b58decode
93 from binascii import b2a_hex
94 from struct import pack
98 def makeCoinbaseTxn(coinbaseValue, useCoinbaser = True, prevBlockHex = None):
101 if useCoinbaser and hasattr(config, 'CoinbaserCmd') and config.CoinbaserCmd:
104 cmd = config.CoinbaserCmd
105 cmd = cmd.replace('%d', str(coinbaseValue))
106 cmd = cmd.replace('%p', prevBlockHex or '""')
107 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
108 nout = int(p.stdout.readline())
109 for i in range(nout):
110 amount = int(p.stdout.readline())
111 addr = p.stdout.readline().rstrip(b'\n').decode('utf8')
112 pkScript = BitcoinScript.toAddress(addr)
113 txn.addOutput(amount, pkScript)
116 coinbased = coinbaseValue + 1
117 if coinbased >= coinbaseValue:
118 logging.getLogger('makeCoinbaseTxn').error('Coinbaser failed!')
121 coinbaseValue -= coinbased
123 pkScript = BitcoinScript.toAddress(config.TrackerAddr)
124 txn.addOutput(coinbaseValue, pkScript)
127 # TODO: red flag on dupe coinbase
131 import jsonrpc_getwork
132 from util import Bits2Target
142 server.wakeLongpoll()
143 stratumsrv.updateJob()
146 global MM, networkTarget, server
147 bits = MM.currentBlock[2]
151 networkTarget = Bits2Target(bits)
152 if MM.lastBlock != (None, None, None):
155 jsonrpc_getwork._CheckForDupesHACK = {}
157 server.wakeLongpoll(wantClear=True)
158 stratumsrv.updateJob(wantClear=True)
161 from time import sleep, time
164 def _WorkLogPruner_I(wl):
168 userwork = wl[username]
169 for wli in tuple(userwork.keys()):
170 if now > userwork[wli][1] + 120:
173 WorkLogPruner.logger.debug('Pruned %d jobs' % (pruned,))
175 def WorkLogPruner(wl):
181 WorkLogPruner.logger.error(traceback.format_exc())
182 WorkLogPruner.logger = logging.getLogger('WorkLogPruner')
185 from merklemaker import merkleMaker
187 MM.__dict__.update(config.__dict__)
188 MM.makeCoinbaseTxn = makeCoinbaseTxn
189 MM.onBlockChange = blockChanged
190 MM.onBlockUpdate = updateBlocks
193 from binascii import b2a_hex
194 from copy import deepcopy
195 from math import ceil, log
196 from merklemaker import MakeBlockHeader
197 from struct import pack, unpack
199 from time import time
200 from util import PendingUpstream, RejectedShare, bdiff1target, dblsha, LEhash2int, swap32, target2bdiff, target2pdiff
205 if hasattr(config, 'GotWorkURI'):
206 gotwork = jsonrpc.ServiceProxy(config.GotWorkURI)
208 if not hasattr(config, 'DelayLogForUpstream'):
209 config.DelayLogForUpstream = False
211 if not hasattr(config, 'DynamicTargetting'):
212 config.DynamicTargetting = 0
214 if not hasattr(config, 'DynamicTargetWindow'):
215 config.DynamicTargetWindow = 120
216 config.DynamicTargetGoal *= config.DynamicTargetWindow / 60
218 def submitGotwork(info):
220 gotwork.gotwork(info)
222 checkShare.logger.warning('Failed to submit gotwork\n' + traceback.format_exc())
224 if not hasattr(config, 'GotWorkTarget'):
225 config.GotWorkTarget = 0
227 def clampTarget(target, DTMode):
228 # ShareTarget is the minimum
229 if target is None or target > config.ShareTarget:
230 target = config.ShareTarget
232 # Never target above upstream(s), as we'd lose blocks
233 target = max(target, networkTarget, config.GotWorkTarget)
236 # Ceil target to a power of two :)
237 truebits = log(target, 2)
238 if target <= 2**int(truebits):
239 # Workaround for bug in Python's math.log function
240 truebits = int(truebits)
241 target = 2**ceil(truebits) - 1
243 # Round target to multiple of bdiff 1
244 target = bdiff1target / int(round(target2bdiff(target)))
246 # Return None for ShareTarget to save memory
247 if target == config.ShareTarget:
251 def getTarget(username, now, DTMode = None, RequestedTarget = None):
253 DTMode = config.DynamicTargetting
256 if username in userStatus:
257 status = userStatus[username]
259 # No record, use default target
260 RequestedTarget = clampTarget(RequestedTarget, DTMode)
261 userStatus[username] = [RequestedTarget, now, 0]
262 return RequestedTarget
263 (targetIn, lastUpdate, work) = status
264 if work <= config.DynamicTargetGoal:
265 if now < lastUpdate + config.DynamicTargetWindow and (targetIn is None or targetIn >= networkTarget):
266 # No reason to change it just yet
267 return clampTarget(targetIn, DTMode)
269 # No shares received, reset to minimum
271 getTarget.logger.debug("No shares from %s, resetting to minimum target" % (repr(username),))
272 userStatus[username] = [None, now, 0]
273 return clampTarget(None, DTMode)
275 deltaSec = now - lastUpdate
276 target = targetIn or config.ShareTarget
277 target = int(target * config.DynamicTargetGoal * deltaSec / config.DynamicTargetWindow / work)
278 target = clampTarget(target, DTMode)
279 if target != targetIn:
280 pfx = 'Retargetting %s' % (repr(username),)
281 tin = targetIn or config.ShareTarget
282 getTarget.logger.debug("%s from: %064x (pdiff %s)" % (pfx, tin, target2pdiff(tin)))
283 tgt = target or config.ShareTarget
284 getTarget.logger.debug("%s to: %064x (pdiff %s)" % (pfx, tgt, target2pdiff(tgt)))
285 userStatus[username] = [target, now, 0]
287 getTarget.logger = logging.getLogger('getTarget')
289 def TopTargets(n = 0x10):
290 tmp = list(k for k, v in userStatus.items() if not v[0] is None)
291 tmp.sort(key=lambda k: -userStatus[k][0])
295 tmp2[t] = target2pdiff(t)
298 tgt = userStatus[k][0]
299 print('%-34s %064x %3d' % (k, tgt, t2d(tgt)))
301 def RegisterWork(username, wli, wld, RequestedTarget = None):
303 target = getTarget(username, now, RequestedTarget=RequestedTarget)
304 wld = tuple(wld) + (target,)
305 workLog.setdefault(username, {})[wli] = (wld, now)
306 return target or config.ShareTarget
308 def getBlockHeader(username):
311 hdr = MakeBlockHeader(MRD)
312 workLog.setdefault(username, {})[merkleRoot] = (MRD, time())
313 target = RegisterWork(username, merkleRoot, MRD)
314 return (hdr, workLog[username][merkleRoot], target)
316 def getBlockTemplate(username, p_magic = None, RequestedTarget = None):
317 if server.tls.wantClear:
319 elif p_magic and username not in workLog:
324 MC = MM.getMC(wantClear)
325 (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
326 wliPos = coinbase[0] + 2
327 wliLen = coinbase[wliPos - 1]
328 wli = coinbase[wliPos:wliPos+wliLen]
329 target = RegisterWork(username, wli, MC, RequestedTarget=RequestedTarget)
330 return (MC, workLog[username][wli], target)
332 def getStratumJob(jobid, wantClear = False):
333 MC = MM.getMC(wantClear)
334 (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
336 workLog.setdefault(None, {})[jobid] = (MC, now)
337 return (MC, workLog[None][jobid])
339 def getExistingStratumJob(jobid):
340 wld = workLog[None][jobid]
349 from bitcoin.varlen import varlenEncode, varlenDecode
351 from merklemaker import assembleBlock
353 if not hasattr(config, 'BlockSubmissions'):
354 config.BlockSubmissions = None
357 def blockSubmissionThread(payload, blkhash, share):
358 if config.BlockSubmissions is None:
359 servers = list(a for b in MM.TemplateSources for a in b)
361 servers = list(config.BlockSubmissions)
363 if hasattr(share['merkletree'], 'source_uri'):
365 'access': jsonrpc.ServiceProxy(share['merkletree'].source_uri),
366 'name': share['merkletree'].source,
369 servers = list(a for b in MM.TemplateSources for a in b)
371 myblock = (blkhash, payload[4:36])
372 payload = b2a_hex(payload).decode('ascii')
379 UpstreamBitcoindJSONRPC = TS['access']
381 # BIP 22 standard submitblock
382 reason = UpstreamBitcoindJSONRPC.submitblock(payload)
383 except BaseException as gbterr:
384 gbterr_fmt = traceback.format_exc()
387 # bitcoind 0.5/0.6 getmemorypool
388 reason = UpstreamBitcoindJSONRPC.getmemorypool(payload)
390 # Old BIP 22 draft getmemorypool
391 reason = UpstreamBitcoindJSONRPC.getmemorypool(payload, {})
394 elif reason is False:
396 except BaseException as gmperr:
399 # FIXME: This will show "Method not found" on pre-BIP22 servers
400 RaiseRedFlags(gbterr_fmt)
402 if MM.currentBlock[0] not in myblock and tries > len(servers):
403 RBFs.append( (('next block', MM.currentBlock, now, (gbterr, gmperr)), payload, blkhash, share) )
404 RaiseRedFlags('Giving up on submitting block to upstream \'%s\'' % (TS['name'],))
405 if share['upstreamRejectReason'] is PendingUpstream:
406 share['upstreamRejectReason'] = 'GAVE UP'
407 share['upstreamResult'] = False
414 # At this point, we have a reason back
416 # FIXME: The returned value could be a list of multiple responses
417 msg = 'Upstream \'%s\' block submission failed: %s' % (TS['name'], reason,)
418 if success and reason in ('stale-prevblk', 'bad-prevblk', 'orphan', 'duplicate'):
420 blockSubmissionThread.logger.debug(msg)
422 RBFs.append( (('upstream reject', reason, time()), payload, blkhash, share) )
425 blockSubmissionThread.logger.debug('Upstream \'%s\' accepted block' % (TS['name'],))
427 if share['upstreamRejectReason'] is PendingUpstream:
428 share['upstreamRejectReason'] = reason
429 share['upstreamResult'] = not reason
431 blockSubmissionThread.logger = logging.getLogger('blockSubmission')
433 def checkData(share):
436 (prevBlock, height, bits) = MM.currentBlock
437 sharePrevBlock = data[4:36]
438 if sharePrevBlock != prevBlock:
439 if sharePrevBlock == MM.lastBlock[0]:
440 raise RejectedShare('stale-prevblk')
441 raise RejectedShare('bad-prevblk')
443 if data[72:76] != bits:
444 raise RejectedShare('bad-diffbits')
446 # Note that we should accept miners reducing version to 1 if they don't understand 2 yet
447 # FIXME: When the supermajority is upgraded to version 2, stop accepting 1!
448 if data[1:4] != b'\0\0\0' or data[0] > 2:
449 raise RejectedShare('bad-version')
451 def buildStratumData(share, merkleroot):
452 (prevBlock, height, bits) = MM.currentBlock
457 data += share['ntime'][::-1]
459 data += share['nonce'][::-1]
464 def IsJobValid(wli, wluser = None):
465 if wluser not in workLog:
467 if wli not in workLog[wluser]:
469 (wld, issueT) = workLog[wluser][wli]
470 if time() < issueT - 120:
474 def checkShare(share):
475 shareTime = share['time'] = time()
477 username = share['username']
484 if username not in workLog:
485 raise RejectedShare('unknown-user')
486 MWL = workLog[username]
488 shareMerkleRoot = data[36:68]
489 if 'blkdata' in share:
490 pl = share['blkdata']
491 (txncount, pl) = varlenDecode(pl)
492 cbtxn = bitcoin.txn.Txn(pl)
493 othertxndata = cbtxn.disassemble(retExtra=True)
494 coinbase = cbtxn.getCoinbase()
495 wliPos = coinbase[0] + 2
496 wliLen = coinbase[wliPos - 1]
497 wli = coinbase[wliPos:wliPos+wliLen]
501 wli = shareMerkleRoot
509 buildStratumData(share, b'\0' * 32)
513 if None not in workLog:
514 # We haven't yet sent any stratum work for this block
515 raise RejectedShare('unknown-work')
519 raise RejectedShare('unknown-work')
520 (wld, issueT) = MWL[wli]
523 share['issuetime'] = issueT
525 (workMerkleTree, workCoinbase) = wld[1:3]
526 share['merkletree'] = workMerkleTree
528 cbtxn = deepcopy(workMerkleTree.data[0])
529 coinbase = workCoinbase + share['extranonce1'] + share['extranonce2']
530 cbtxn.setCoinbase(coinbase)
532 data = buildStratumData(share, workMerkleTree.withFirst(cbtxn))
533 shareMerkleRoot = data[36:68]
535 if data in DupeShareHACK:
536 raise RejectedShare('duplicate')
537 DupeShareHACK[data] = None
539 blkhash = dblsha(data)
540 if blkhash[28:] != b'\0\0\0\0':
541 raise RejectedShare('H-not-zero')
542 blkhashn = LEhash2int(blkhash)
545 logfunc = getattr(checkShare.logger, 'info' if blkhashn <= networkTarget else 'debug')
546 logfunc('BLKHASH: %64x' % (blkhashn,))
547 logfunc(' TARGET: %64x' % (networkTarget,))
549 # NOTE: this isn't actually needed for MC mode, but we're abusing it for a trivial share check...
550 txlist = workMerkleTree.data
551 txlist = [deepcopy(txlist[0]),] + txlist[1:]
553 cbtxn.setCoinbase(coinbase or workCoinbase)
556 if blkhashn <= networkTarget:
557 logfunc("Submitting upstream")
558 RBDs.append( deepcopy( (data, txlist, share.get('blkdata', None), workMerkleTree, share, wld) ) )
560 payload = assembleBlock(data, txlist)
562 payload = share['data']
563 if len(othertxndata):
564 payload += share['blkdata']
566 payload += assembleBlock(data, txlist)[80:]
567 logfunc('Real block payload: %s' % (b2a_hex(payload).decode('utf8'),))
569 threading.Thread(target=blockSubmissionThread, args=(payload, blkhash, share)).start()
570 bcnode.submitBlock(payload)
571 if config.DelayLogForUpstream:
572 share['upstreamRejectReason'] = PendingUpstream
574 share['upstreamRejectReason'] = None
575 share['upstreamResult'] = True
576 MM.updateBlock(blkhash)
579 if gotwork and blkhashn <= config.GotWorkTarget:
581 coinbaseMrkl = cbtxn.data
582 coinbaseMrkl += blkhash
583 steps = workMerkleTree._steps
584 coinbaseMrkl += pack('B', len(steps))
587 coinbaseMrkl += b"\0\0\0\0"
589 info['hash'] = b2a_hex(blkhash).decode('ascii')
590 info['header'] = b2a_hex(data).decode('ascii')
591 info['coinbaseMrkl'] = b2a_hex(coinbaseMrkl).decode('ascii')
592 thr = threading.Thread(target=submitGotwork, args=(info,))
596 checkShare.logger.warning('Failed to build gotwork request')
598 if 'target' in share:
599 workTarget = share['target']
605 if workTarget is None:
606 workTarget = config.ShareTarget
607 if blkhashn > workTarget:
608 raise RejectedShare('high-hash')
609 share['target'] = workTarget
610 share['_targethex'] = '%064x' % (workTarget,)
612 shareTimestamp = unpack('<L', data[68:72])[0]
613 if shareTime < issueT - 120:
614 raise RejectedShare('stale-work')
615 if shareTimestamp < shareTime - 300:
616 raise RejectedShare('time-too-old')
617 if shareTimestamp > shareTime + 7200:
618 raise RejectedShare('time-too-new')
622 cbpreLen = len(cbpre)
623 if coinbase[:cbpreLen] != cbpre:
624 raise RejectedShare('bad-cb-prefix')
626 # Filter out known "I support" flags, to prevent exploits
627 for ff in (b'/P2SH/', b'NOP2SH', b'p2sh/CHV', b'p2sh/NOCHV'):
628 if coinbase.find(ff) > max(-1, cbpreLen - len(ff)):
629 raise RejectedShare('bad-cb-flag')
631 if len(coinbase) > 100:
632 raise RejectedShare('bad-cb-length')
634 if shareMerkleRoot != workMerkleTree.withFirst(cbtxn):
635 raise RejectedShare('bad-txnmrklroot')
637 if len(othertxndata):
638 allowed = assembleBlock(data, txlist)[80:]
639 if allowed != share['blkdata']:
640 raise RejectedShare('bad-txns')
642 if config.DynamicTargetting and username in userStatus:
643 # NOTE: userStatus[username] only doesn't exist across restarts
644 status = userStatus[username]
645 target = status[0] or config.ShareTarget
646 if target == workTarget:
647 userStatus[username][2] += 1
649 userStatus[username][2] += float(target) / workTarget
650 if isstratum and userStatus[username][2] > config.DynamicTargetGoal * 2:
651 stratumsrv.quickDifficultyUpdate(username)
652 checkShare.logger = logging.getLogger('checkShare')
655 if '_origdata' in share:
656 share['solution'] = share['_origdata']
658 share['solution'] = b2a_hex(swap32(share['data'])).decode('utf8')
659 for i in loggersShare:
662 def checkAuthentication(username, password):
663 # HTTPServer uses bytes, and StratumServer uses str
664 if hasattr(username, 'decode'): username = username.decode('utf8')
665 if hasattr(password, 'decode'): password = password.decode('utf8')
667 for i in authenticators:
668 if i.checkAuthentication(username, password):
672 def receiveShare(share):
673 # TODO: username => userid
676 except RejectedShare as rej:
677 share['rejectReason'] = str(rej)
679 except BaseException as e:
680 share['rejectReason'] = 'ERROR'
683 if not share.get('upstreamRejectReason', None) is PendingUpstream:
686 def newBlockNotification():
687 logging.getLogger('newBlockNotification').info('Received new block notification')
688 MM.updateMerkleTree()
689 # TODO: Force RESPOND TO LONGPOLLS?
692 def newBlockNotificationSIGNAL(signum, frame):
693 # Use a new thread, in case the signal handler is called with locks held
694 thr = threading.Thread(target=newBlockNotification, name='newBlockNotification via signal %s' % (signum,))
698 from signal import signal, SIGUSR1
699 signal(SIGUSR1, newBlockNotificationSIGNAL)
707 from time import sleep
710 if getattr(config, 'SaveStateFilename', None) is None:
711 config.SaveStateFilename = 'eloipool.worklog'
714 logger = logging.getLogger('stopServers')
716 if hasattr(stopServers, 'already'):
717 logger.debug('Already tried to stop servers before')
719 stopServers.already = True
721 logger.info('Stopping servers...')
722 global bcnode, server
723 servers = (bcnode, server, stratumsrv)
730 logger.error('Failed to stop server %s\n%s' % (s, traceback.format_exc()))
736 sl.append(s.__class__.__name__)
741 logger.error('Servers taking too long to stop (%s), giving up' % (', '.join(sl)))
746 for fd in s._fd.keys():
750 for i in loggersShare:
751 if hasattr(i, 'stop'):
754 def saveState(SAVE_STATE_FILENAME, t = None):
755 logger = logging.getLogger('saveState')
757 # Then, save data needed to resume work
758 logger.info('Saving work state to \'%s\'...' % (SAVE_STATE_FILENAME,))
762 with open(SAVE_STATE_FILENAME, 'wb') as f:
764 pickle.dump(DupeShareHACK, f)
765 pickle.dump(workLog, f)
770 logger.error('Failed to save work\n' + traceback.format_exc())
772 os.unlink(SAVE_STATE_FILENAME)
774 logger.error(('Failed to unlink \'%s\'; resume may have trouble\n' % (SAVE_STATE_FILENAME,)) + traceback.format_exc())
780 saveState(config.SaveStateFilename, t=t)
781 logging.getLogger('exit').info('Goodbye...')
782 os.kill(os.getpid(), signal.SIGTERM)
789 saveState(config.SaveStateFilename, t=t)
790 logging.getLogger('restart').info('Restarting...')
792 os.execv(sys.argv[0], sys.argv)
794 logging.getLogger('restart').error('Failed to exec\n' + traceback.format_exc())
796 def restoreState(SAVE_STATE_FILENAME):
797 if not os.path.exists(SAVE_STATE_FILENAME):
800 global workLog, DupeShareHACK
802 logger = logging.getLogger('restoreState')
803 s = os.stat(SAVE_STATE_FILENAME)
804 logger.info('Restoring saved state from \'%s\' (%d bytes)' % (SAVE_STATE_FILENAME, s.st_size))
806 with open(SAVE_STATE_FILENAME, 'rb') as f:
810 # Future formats, not supported here
814 # Old format, from 2012-02-02 to 2012-02-03
819 if isinstance(t, dict):
820 # Old format, from 2012-02-03 to 2012-02-03
824 # Current format, from 2012-02-03 onward
825 DupeShareHACK = pickle.load(f)
827 if t + 120 >= time():
828 workLog = pickle.load(f)
830 logger.debug('Skipping restore of expired workLog')
832 logger.error('Failed to restore state\n' + traceback.format_exc())
834 logger.info('State restored successfully')
836 logger.info('Total downtime: %g seconds' % (time() - t,))
839 from jsonrpcserver import JSONRPCListener, JSONRPCServer
840 import interactivemode
841 from networkserver import NetworkListener
844 import authentication
845 from stratumserver import StratumServer
848 if __name__ == "__main__":
849 if not hasattr(config, 'ShareLogging'):
850 config.ShareLogging = ()
851 if hasattr(config, 'DbOptions'):
852 logging.getLogger('backwardCompatibility').warn('DbOptions configuration variable is deprecated; upgrade to ShareLogging var before 2013-03-05')
853 config.ShareLogging = list(config.ShareLogging)
854 config.ShareLogging.append( {
856 'engine': 'postgres',
857 'dbopts': config.DbOptions,
858 'statement': "insert into shares (rem_host, username, our_result, upstream_result, reason, solution) values ({Q(remoteHost)}, {username}, {YN(not(rejectReason))}, {YN(upstreamResult)}, {rejectReason}, decode({solution}, 'hex'))",
860 for i in config.ShareLogging:
861 if not hasattr(i, 'keys'):
863 logging.getLogger('backwardCompatibility').warn('Using short-term backward compatibility for ShareLogging[\'%s\']; be sure to update config before 2012-04-04' % (name,))
864 if name == 'postgres':
867 'engine': 'postgres',
868 'dbopts': parameters,
870 elif name == 'logfile':
872 i['thropts'] = parameters
873 if 'filename' in parameters:
874 i['filename'] = parameters['filename']
875 i['thropts'] = dict(i['thropts'])
876 del i['thropts']['filename']
884 fp, pathname, description = imp.find_module(name, sharelogging.__path__)
885 m = imp.load_module(name, fp, pathname, description)
886 lo = getattr(m, name)(**parameters)
887 loggersShare.append(lo)
889 logging.getLogger('sharelogging').error("Error setting up share logger %s: %s", name, sys.exc_info())
891 if not hasattr(config, 'Authentication'):
892 config.Authentication = ({'module': 'allowall'},)
894 for i in config.Authentication:
898 fp, pathname, description = imp.find_module(name, authentication.__path__)
899 m = imp.load_module(name, fp, pathname, description)
900 lo = getattr(m, name)(**parameters)
901 authenticators.append(lo)
903 logging.getLogger('authentication').error("Error setting up authentication module %s: %s", name, sys.exc_info())
906 if not hasattr(config, 'BitcoinNodeAddresses'):
907 config.BitcoinNodeAddresses = ()
908 for a in config.BitcoinNodeAddresses:
909 LSbc.append(NetworkListener(bcnode, a))
911 if hasattr(config, 'UpstreamBitcoindNode') and config.UpstreamBitcoindNode:
912 BitcoinLink(bcnode, dest=config.UpstreamBitcoindNode)
914 import jsonrpc_getblocktemplate
915 import jsonrpc_getwork
916 import jsonrpc_setworkaux
918 server = JSONRPCServer()
919 server.tls = threading.local()
920 server.tls.wantClear = False
921 if hasattr(config, 'JSONRPCAddress'):
922 logging.getLogger('backwardCompatibility').warn('JSONRPCAddress configuration variable is deprecated; upgrade to JSONRPCAddresses list before 2013-03-05')
923 if not hasattr(config, 'JSONRPCAddresses'):
924 config.JSONRPCAddresses = []
925 config.JSONRPCAddresses.insert(0, config.JSONRPCAddress)
927 for a in config.JSONRPCAddresses:
928 LS.append(JSONRPCListener(server, a))
929 if hasattr(config, 'SecretUser'):
930 server.SecretUser = config.SecretUser
931 server.aux = MM.CoinbaseAux
932 server.getBlockHeader = getBlockHeader
933 server.getBlockTemplate = getBlockTemplate
934 server.receiveShare = receiveShare
935 server.RaiseRedFlags = RaiseRedFlags
936 server.ShareTarget = config.ShareTarget
937 server.checkAuthentication = checkAuthentication
939 if hasattr(config, 'TrustedForwarders'):
940 server.TrustedForwarders = config.TrustedForwarders
941 server.ServerName = config.ServerName
943 stratumsrv = StratumServer()
944 stratumsrv.getStratumJob = getStratumJob
945 stratumsrv.getExistingStratumJob = getExistingStratumJob
946 stratumsrv.receiveShare = receiveShare
947 stratumsrv.RaiseRedFlags = RaiseRedFlags
948 stratumsrv.getTarget = getTarget
949 stratumsrv.defaultTarget = config.ShareTarget
950 stratumsrv.IsJobValid = IsJobValid
951 stratumsrv.checkAuthentication = checkAuthentication
952 if not hasattr(config, 'StratumAddresses'):
953 config.StratumAddresses = ()
954 for a in config.StratumAddresses:
955 NetworkListener(stratumsrv, a)
959 restoreState(config.SaveStateFilename)
961 prune_thr = threading.Thread(target=WorkLogPruner, args=(workLog,))
962 prune_thr.daemon = True
965 bcnode_thr = threading.Thread(target=bcnode.serve_forever)
966 bcnode_thr.daemon = True
969 stratum_thr = threading.Thread(target=stratumsrv.serve_forever)
970 stratum_thr.daemon = True
973 server.serve_forever()