diff --git a/dlr/dlr.py b/dlr/dlr.py
new file mode 100644
index 0000000..40e2f3d
--- /dev/null
+++ b/dlr/dlr.py
@@ -0,0 +1,96 @@
+# combat simulator for Warlords 3 Darklords Rising
+import random
+
+# before combat
+# add up curse of each side, 4% chance per curse per opponent to remove their bless (apply -1 str) and medals
+# add up posion of each side, 4% chance per poison per opponent to apply -2 str (min 1)
+# add up disease of each side, 4% chance per disease per opponent to apply -1 hits (min 1)
+# add up paralysis of each side ... no effect on combat results, just on movement afterwards
+
+# calculate stack bonuses
+# add your best morale bonus from unit, item, and spell - cap at +5, subtract enemy fear, cap at -1
+# repeat for fortify vs siege
+# repeat for leadership vs chaos
+# cap total between -3 and +5
+
+# calculate unit bonuses
+# base strength, cap at 9
+# add stack bonus, cap at 14
+# add item/spell to hero, cap at 15
+# add banding bonuses, cap at 15
+
+# special attacks
+# calculate net ward (unit's warding plus up to 3 group warding)
+# attack with acid, 10% per (acid minus opponent's acid+ward) to halve opponent strength, rounded down
+# if acid missed, attack with lightning, 10% per (lightning minus opponent's lightning+ward) to halve opponent hits, rounded down
+# (it is arguably a bug that acid hits prevent lightning from happening)
+# attack with assassin, 10% per (assassin minus opponent's assassin+ward) to instant kill
+# make N missile attacks per combat. hits are instant kills, +3 str against fliers, 4-hit targets are immune if not flying
+
+class Unit:
+ strength = 1
+ hits = 1
+ medals = 0
+
+ def __init__(self, strength, hits):
+ self.strength = strength
+ self.hits = hits
+
+# normal combat
+def calculate_fight(a, b, die_size=20):
+ # TODO medals
+
+ # roll success odds
+ p_a = (a.strength / die_size) * (1 - b.strength / die_size)
+ p_b = (b.strength / die_size) * (1 - a.strength / die_size)
+ # round success odds
+ p_a_hits = p_a / (p_a + p_b)
+ p_b_hits = p_b / (p_a + p_b)
+ assert(0.999 < p_a_hits + p_b_hits < 1.001)
+
+ return recurse_hits(p_a_hits, p_b_hits, a.hits, b.hits)
+
+def recurse_hits(p_a, p_b, h_a, h_b):
+ if h_a == 0:
+ return 0
+ if h_b == 0:
+ return 1
+ return p_a * recurse_hits(p_a, p_b, h_a, h_b - 1) + p_b * recurse_hits(p_a, p_b, h_a - 1, h_b)
+
+
+def simulate_fight(a, b):
+ a_hits = a.hits
+ b_hits = b.hits
+
+ while a_hits > 0 and b_hits > 0:
+ # attempt some rolls
+ a_roll = random.randint(1, 20)
+ if a.medals > 0:
+ a_roll = min(a_roll, random.randint(1, 34 - 4*a.medals))
+
+ b_roll = random.randint(1, 20)
+ if b.medals > 0:
+ b_roll = min(b_roll, random.randint(1, 34 - 4*b.medals))
+
+ if a_roll <= a.strength and not b_roll <= b.strength:
+ b_hits = b_hits - 1
+ # check here for trample and slayer
+ elif b_roll <= b.strength and not a_roll <= a.strength:
+ a_hits = a_hits - 1
+ # check here for trample and slayer
+
+ if a_hits <= 0:
+ return 0
+ else:
+ return 1
+
+a = Unit(2,2)
+b = Unit(2,1)
+
+a_wins = 0
+for i in range(10000):
+ a_wins = a_wins + simulate_fight(a, b)
+calc_odds = calculate_fight(a, b)
+print(f'{a.strength}/{a.hits} vs {b.strength}/{b.hits}')
+print(f'monte carlo: {a_wins/100}%')
+print(f'simulated: {calc_odds*100}%')
diff --git a/stat/stat.py b/stat/stat.py
new file mode 100644
index 0000000..540ade8
--- /dev/null
+++ b/stat/stat.py
@@ -0,0 +1,37 @@
+from math import sqrt
+
+"""
+Motivation: sorting items by rating (simple up/down votes)
+
+I first encountered this here:
+-> https://www.evanmiller.org/how-not-to-sort-by-average-rating.html
+
+Naively subtracting up/down is obviously incorrect (e.g. +1000/-900 is sorted above +50/-0).
+
+Comparing raw percentages is more subtly incorrect, but it comes down to the fact that the
+sample sizes are different - 100% positive out of 1 vote can't be meaningfully compared directly
+to 98% positive out of 100 votes.
+-> c.f. Matt Parker's video on why you can't simply subtract percentages
+ (debunking a claim made about 2020 US election results)
+ https://www.youtube.com/watch?v=aokNwKx7gM8
+
+And so, the thing to do is statistics! Using the scores we have, we can construct a confidence
+interval - a range that we're 95% sure contains the "true" rating we'd get if we managed to get
+a vote from everybody. To sort, though, we want a single value - the lower bound of the confidence
+interval is a good choice, since it goes up both when the average goes up but also when we have
+a larger sample size (tighter standard deviation).
+
+The math here comes from a 1927 paper by Edwin B. Wilson.
+-> https://www.jstor.org/stable/2276774 (public domain!)
+
+1998 papers that end up recommending the Wilson 'score' because it's easy to compute, while still
+being a good enough approximation of the exact confidence interval on average and not *too* pessimistic.
+-> https://doi.org/10.2307/2685469
+-> https://doi.org/10.1002/(SICI)1097-0258(19980430)17:8<857::AID-SIM777>3.0.CO;2-E
+"""
+def score(up, down):
+ """Wilson 'score' with λ=2 - lower bound of a ~95.5% confidence interval"""
+ n = up + down
+ if n == 0: return 0.1 # arbitrary. this is above +1/-1, but below +1/-0 or +2/-2
+ return (up+2)/(n+4) - (2*sqrt(1+(up*down)/n))/(n+4)
+
|