#!/meta/h/habs/usr/bin/python3
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import bs4
import requests
import re
import urllib.parse
import mysql.connector
import json
import pprint
import string
import time
import markovify
import geocoder
import itertools
from lrc_private import * # passwords
CALL = 'TrackBot!'
headers = {'User-Agent': 'TrackStatter 1.0'} # don't use 'bot' keyword
con = mysql.connector.connect(
host='ma.sdf.org',
user='habs',
password=DB_PASS,
database='habs')
cur = con.cursor()
pp = pprint.PrettyPrinter(indent=2)
disambig = {
'mohamed farah':('31850','1'),
'mo farah':('31850','1'),
'jennifer simpson':('29937','2'),
'gabriel jennings':('14196','1'),
'ryan hall':('38693','1'),
'abdihakem abdirahman':('212','1'),
'saif saaeed shaheen':('41748','1'),
'ezekiel kemboi':('13062','1'),
'mary decker slaney':('13762','2'),
'john walker':('21802','1'),
'kevin sullivan':('24611','1'),
'david kimani':('9960','1'),
'carl lewis':('6473','1'),
'michael johnson':('30379','1'),
'andrew hunter':('197421','1'),
'alberto juantorena':('985','1'),
'tim montgomery':('43767','1'),
'carlos lopes':('6560','1'),
'alene reta':('1756','1'),
'valeriy borzov':('45353','1'),
'ben johnson':('4774','1'),
'dan o\'brien':('9306','1'),
'daniel o\'brien':('9306','1'),
'david johnson':('9626','1'),
'dave johnson':('9626','1'),
'kim smith':('10284','2'),
'german fernandez':('104543','1'),
'ryan hill':('115521','1'),
'liam adams':('72112','1'),
'jorge torres':('22140','1'),
'abdalaati iguider':('61484','1'),
'alfred kipketer':('144056','1'),
'johnny crain':('107475','1'),
'steve jones':('41941','1'),
'rob de castilla':('37380','1'),
'mo aman':('110549','1'),
'mohammed aman':('110549','1'),
'marc davis':('28103','1'),
'willie ritola':('114561','1'),
'duane solomon':('50683','1'),
'thorsten k\xc3hn':('43610','1'),
'natalia pryshchepa':('77786','2'),
'arzamasova marina':('75074','2'),
'jeffery eggleston':('89891','1'),
'eluid kipchoge':('12282','1'),
'ron clarke':('38241','1'),
'kip keino':('24912','1'),
}
dopes = {
('1756','1'): # Alene Reta
"""Doping offense:
Test: IC, Test Date: Jun 9, 2001
Substance: Nandrolone, Punishment: 2 years ineligibility
Ban start: 2001, Ban end: 2003""",
('13762','2'): # Mary Decker Slaney
"""Doping offense:
Test: IC, Test Date: June 1996
Substance: Testosterone, Punishment: 2 years 3 months ineligibility
Ban start: June 1997, Ban end: September 1999""",
}
import os
nirca_env = {**os.environ,'PYTHONPATH':'/meta/h/habs/letsrun/NIRCA-Database:'}
def most_common(L):
SL = sorted((x, i) for i, x in enumerate(L))
groups = itertools.groupby(SL, key=operator.itemgetter(0))
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(L)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
return count, -min_index
return max(groups, key=_auxfun)[0]
def get_tilas(name):
if name.lower() in disambig:
params = disambig[name.lower()]
soup = bs4.BeautifulSoup(requests.get('http://www.tilastopaja.net/db/at.php',params={'ID':params[0],'Sex':params[1]},cookies=tilastopaja_cookies).text,'html.parser')
return soup
soup = bs4.BeautifulSoup(requests.get('http://www.tilastopaja.net/db/ats.php',params={'Name':name},cookies=tilastopaja_cookies).text,'html.parser')
return soup
def get_athid(name):
if name.lower() in disambig:
return disambig[name.lower()]
aurl = requests.get('http://www.tilastopaja.net/db/ats.php',params={'Name':name},cookies=tilastopaja_cookies).url
aqs = urllib.parse.parse_qs(urllib.parse.urlparse(aurl).query)
return (aqs['ID'][0],aqs['Sex'][0])
def update_nirca():
import subprocess
subprocess.check_output(['/usr/pkg/bin/git','reset','--hard'],cwd='/meta/h/habs/letsrun/NIRCA-Database/')
subprocess.check_output(['/usr/pkg/bin/git','pull'],cwd='/meta/h/habs/letsrun/NIRCA-Database/')
subprocess.check_output(['/meta/h/habs/usr/bin/2to3','--output-dir=/meta/h/habs/letsrun/NIRCA-Database','-W','-n','/meta/h/habs/letsrun/NIRCA-Database'])
def normalize_distance(distunit):
if distunit[0].lower() in ['marathon','mar']:
dist = '42.195'
unit = 'km'
elif distunit[0].lower() in ['hm','half']:
dist = '21.0975'
unit = 'km'
else:
if len(distunit) == 2:
dist = distunit[0]
unit = distunit[1]
else:
dist = ''.join([c for c in distunit[0] if c.isdigit() or c == '.'])
unit = ''.join([c for c in distunit[0] if not c.isdigit() and not c == '.']).lower()
if dist == '': dist = '1'
if unit in ['km','k','kilometer','kilometers','kilometre','kilometres']:
unit = 'km'
elif unit in ['mi','miles','mile']:
unit = 'mi'
elif unit in ['m','meter','meters','metre','metres']:
unit = 'km'
dist = str(float(dist)/1000.0)
else:
unit = "[i]could not determine unit[/i]"
return (dist,unit)
def miles_to_km(dist):
return str(float(dist) * 1.609344)
event_to_id = {
'55': '20',
'60': '30',
'200': '50',
'300': '60',
'400': '70',
'500': '78',
'600': '80',
'800': '90',
'1000': '100',
'1500': '110',
'mile': '120',
'2000': '130',
'3000': '140',
'2m': '150',
'5000': '160',
'55h': '240',
'60h': '250',
'400h': '300',
'hj': '310',
'pv': '320',
'lj': '330',
'tj': '340',
'sp': '350',
'wt': '370',
'hep': '400',
'3000w': '420',
'5000w': '430',
'4x4': '580',
}
def parse(args, times = 2):
resp = ''
if len(args) == 0: return resp
cmd = args[0].lower()
if args[-1].endswith('.') and len(args[-1]) > 1:
args[-1] = args[-1][:len(args[-1])-1]
if cmd == 'pbs' or cmd == 'prs':
athname = ' '.join(args[1:])
try:
pbsoup = get_tilas(athname).find('div',id='pbDiv').table
except:
return '[i]Could not find / disambiguate %s[/i]\r\n\r\n' % athname
resp += '[b]PRs for %s:[/b]\r\n\r\n' % athname
for event in pbsoup.find_all('font',size='2'):
climate = event.find_next('span',onclick=lambda x:x and 'PB' in x)
mark = climate.find_next('td')
date = mark.find_next('td',align='right')
resp += '%s (%s): %s on %s\r\n' % (event.text,climate.text,mark.text.strip(),date.text)
odoor = climate.find_next('span',onclick=lambda x:x and ' '.join(climate['onclick'].split()[:2]) in x)
if odoor is not None:
mark = odoor.find_next('td')
date = mark.find_next('td',align='right')
resp += '%s (%s): %s on %s\r\n' % (event.text,odoor.text,mark.text.strip(),date.text)
elif cmd == 'dope':
athname = ' '.join(args[1:])
try:
athinfo = get_athid(athname)
except:
return '[i]Could not find / disambiguate %s[/i]\r\n\r\n' % athname
if athinfo in dopes:
resp += '[i]Doping offense found for %s:[/i]\r\n\r\n' % athname
resp += dopes[athinfo]
else:
dope = get_tilas(athname).find(string='Doping offence: ')
if dope is None:
resp += '[i]No doping offenses found for %s[/i]\r\n' % athname
else:
resp += '[i]Doping offense found for %s:[/i]\r\n\r\n' % athname
resp += '\r\n'.join(dope.find_parent('td').strings) + '\r\n'
elif cmd == 'compare':
for i in range(len(args)):
if args[i].lower() in ['vs','vs.','versus','and','with']: args[i] = 'vs'
aths = ' '.join(args[1:]).split('vs')
athn1 = aths[0].strip()
athn2 = aths[1].strip()
try:
ath1,gen1 = get_athid(athn1)
except:
return '[i]Could not find / disambiguate %s[/i]\r\n\r\n' % athn1
try:
ath2,gen2 = get_athid(athn2)
except:
return '[i]Could not find / disambiguate %s[/i]\r\n\r\n' % athn2
if gen1 != gen2:
return '[i]Athletes must be of the same gender to do a head-to-head matchup[/i]\r\n\r\n'
if ath1 == ath2:
return '[i]Nice try, but athletes must be different to compare[/i]\r\n\r\n'
if gen1 == '1': phpfile = 'test2.php'
else: phpfile = 'test2w.php'
csoup = bs4.BeautifulSoup(requests.get('http://www.tilastopaja.net/db/'+phpfile,params={'at1':ath1,'at2':ath2,'season':'0'},cookies=tilastopaja_cookies).text,'html.parser')
rows = csoup.find_all('tr')
if len(rows) <= 2:
return 'No head-to-head matchups found between %s and %s\r\n\r\n' % (athn1,athn2)
resp += 'Head-to-head record between [b]%s (left)[/b] and [b]%s (right)[/b]:\r\n\r\n' % (athn1,athn2)
for match in rows[1:len(rows)-1]:
event = match.find_next('font',size='2')
venue = event.find_next('font',size='2')
at1res = venue.find_next('font',size='2')
at2res = at1res.find_next('font',size='2')
resp += '[i]%s: %s:[/i] %s [i]VS[/i] %s\r\n' % (venue.text.strip(),event.text.strip(),at1res.text.strip(),at2res.text.strip())
athtots = rows[-1].find_all('font',size='4')[1:]
ath1wins = int(athtots[0].text)
ath2wins = int(athtots[1].text)
resp += '\r\n[b]%s [i]"%s"[/i] %s[/b] total wins: [b]%s[/b]\r\n' % (athn1.split()[0],'MF' if ath1wins > ath2wins else 'DEVASTATED',' '.join(athn1.split()[1:]),athtots[0].text)
resp += '[b]%s [i]"%s"[/i] %s[/b] total wins: [b]%s[/b]\r\n' % (athn2.split()[0],'MF' if ath2wins > ath1wins else 'DEVASTATED',' '.join(athn2.split()[1:]),athtots[1].text)
elif cmd == 'vdot':
time = args[1]
if time[0] == '0' and len(time) > 1: time = time[1:]
time = ':'.join(['0'+sep for sep in time.split(':')])
if time.count(':') == 0:
time = '00:00:' + time
elif time.count(':') == 1:
time = '00:' + time
dist, unit = normalize_distance(args[2:])
rspfp = requests.post('https://runsmartproject.com/vdot/app/api/find_paces',data={'distance':dist,'unit':unit,'time':time,'pace':'empty','punit':'mi','temp':'','tunit':'F','alt':'','aunit':'ft','advtype':'temperature','predict':'true'})
rspfp = rspfp.json()
if 'paces' not in rspfp or 'vdot' not in rspfp: return 'Error in VDOT calculation\r\n\r\n'
resp += '[b]VDOT for %s %s%s: %s[/b]\r\n' % (args[1],dist,unit,rspfp['vdot'])
equivs = rspfp['paces']['equivs']
resp += 'Equivalent race times based on VDOT:\r\n\r\n'
for equiv in equivs:
resp += '%s: %s\r\n' % (equiv['distance'],equiv['time'])
elif cmd == 'predictions' or cmd == 'prediction':
user = ' '.join(args[1:])
picks = False
picksoup = bs4.BeautifulSoup(requests.get('https://www.letsrun.com/letsruncontest/scoringindivcontest.php',params={'login':user},headers=headers).text,'html.parser')
resp += '[b]LRC Olmypic predictions for user %s:[/b]\r\n' % user
for row in picksoup.find_all('tr')[4:]:
evname = row.find_next('td')
gold = evname.find_next('td')
silver = gold.find_next('td')
bronze = silver.find_next('td')
if gold.text.strip() != '' or silver.text.strip() != '' or bronze.text.strip() != '':
picks = True
resp += '[i]%s:[/i] ' % evname.text
if gold.text.strip() != '': resp += '1. %s ' % gold.text.strip()
if silver.text.strip() != '': resp += '2. %s ' % silver.text.strip()
if bronze.text.strip() != '': resp += '3. %s' % bronze.text.strip()
resp += '\r\n'
if picks == False:
return '[i]No picks found for user %s[/i]\r\n\r\n' % user
elif cmd == 'team':
import lrc_tfrrs as tfrrs
female = False
if args[-1].lower() in ['women','girls','f','female']:
female = True
teamname = args[1:-1]
elif args[-1].lower() in ['men','male','boys','m']:
female = False
teamname = args[1:-1]
else:
female = False
teamname = args[1:]
teamname = ' '.join(teamname)
if female:
if teamname.lower() in tfrrs.tfrrs_f:
teamid = tfrrs.tfrrs_f[teamname.lower()]
else:
return '[i]team not found[/i]\r\n\r\n'
else:
if teamname.lower() in tfrrs.tfrrs_m:
teamid = tfrrs.tfrrs_m[teamname.lower()]
else:
return '[i]team not found[/i]\r\n\r\n'
teamsoup = bs4.BeautifulSoup(requests.get('https://www.tfrrs.org/teams/%s.html' % teamid).text,'html.parser')
resp += 'Season bests for [b]%s %s[/b]:\r\n' % (teamname,'Women' if female else 'Men')
for row in teamsoup.find_all('div',class_='data',limit=3)[2].find_next('table').find_all('tr'):
cols = row.find_all('td')
cols = [col.text.strip().replace('\n','') for col in cols]
if len(cols) == 4:
event,name,year,mark = cols
resp += '[i]%s: %s (%s):[/i] %s\r\n' % (event,name,year,mark)
else:
event,names,mark = cols
resp += '[i]%s: %s:[/i] %s\r\n' % (event,names,mark)
elif cmd == 'nirca':
if args[1].lower() == 'team':
return parse(args[2:].insert(0,'nircateam'))
athname = ' '.join(args[1:])
import subprocess
update_nirca()
rating = subprocess.check_output(['/meta/h/habs/usr/bin/python3','/meta/h/habs/letsrun/NIRCA-Database/Examples/example3.py',athname],cwd='/meta/h/habs/letsrun/NIRCA-Database',env=nirca_env)
rating = str(rating).replace('\\n','\r\n').splitlines(True)
resp += 'NIRCA speed rating for athlete [b]%s[/b]:\r\n' % athname
ratingstr = ''
for lineno in range(len(rating)):
if 'Best Match' in rating[lineno]:
ratingstr = ''.join(rating[lineno:-2])
if ratingstr.strip() == '':
return '[i]athlete %s not found (or has no results this season)[/i]\r\n\r\n' % athname
resp += ratingstr
elif cmd == 'nircateam':
teamname = ''
for teamword in args[1:]:
if teamword.lower() == 'of':
teamname += ' of'
else:
teamname += ' ' + teamword.title()
teamname = teamname[1:]
import subprocess
update_nirca()
rating = subprocess.check_output(['/meta/h/habs/usr/bin/python3','/meta/h/habs/letsrun/NIRCA-Database/Examples/example2.py','-t',teamname],cwd='/meta/h/habs/letsrun/NIRCA-Database',env=nirca_env)
rating = str(rating)[2:-3].replace('\\n','\r\n').splitlines(True)
resp += 'NIRCA speed ratings for team [b]%s[/b]:\r\n' % teamname
resp += ''.join(rating)
elif cmd == 'optimize':
if args[1].isdigit():
distunit = (args[1],args[2])
laptimes = args[3:]
else:
distunit = [args[1]]
laptimes = args[2:]
dist, unit = normalize_distance(distunit)
if unit == 'mi':
unit = 'm'
dist = str(float(miles_to_km(dist))*1000)
elif unit == 'km':
unit = 'm'
dist = str(float(dist)*1000)
newlaptimes = []
for lap in laptimes:
if lap.count(':') == 2:
lap = str(int(lap.split(':')[0])*60*60+int(lap.split(':')[1])*60+int(lap.split(':')[2]))
elif lap.count(':') == 1:
lap = str(int(lap.split(':')[0])*60+int(lap.split(':')[1]))
newlaptimes.append(lap)
laptimes = ' '.join(newlaptimes)
optim = requests.post("http://timescalculator.appspot.com/optim",data={'laptimes':laptimes,'distance':dist}).json()
if 'optimizedTime' not in optim:
resp += '[b]Error calculating optimized time[/b]\r\n'
else:
resp += '[b]Optimized time for %s %s%s based on even pacing:[/b]\r\n' % (optim['inputTime'],dist,unit)
resp += '[b][i]%s[/i][/b]\r\n' % (optim['optimizedTime'])
elif cmd == 'topposters':
cur.execute("select author from letsrun_posts")
results = cur.fetchall()
print(len(results))
print(most_common(results))
pass
elif cmd == 'simulaten':
times = int(args[1])
if times > 100: times = 100
return parse(['simulate'] + args[2:], int(args[1]))
elif cmd == 'simulate':
user = ' '.join(args[1:])
cur.execute("select body from letsrun_posts where author=%s",
(user,))
results = cur.fetchall()
if len(results) < 2:
return '[i]Not enough posts from user %s in the database (since August 2016)[/i]\r\n\r\n' % (user)
corpus = '\n'.join([ re.sub('<.*?>',' ',t[0]) for t in results ])
model = markovify.Text(corpus, state_size = 2)
tbody = ''
bs = 0
while bs < times:
count = 0
body = None
while body is None and count < 10:
body = model.make_sentence()
count += 1
if body is None:
return '[i]Error generating Markov chain for user %s[/i]\r\n\r\n' % (user)
body = str(body.encode('ascii','ignore'))[2:-1]
tbody += body + '\r\n\r\n'
bs += 1
resp += '[i]TrackBot simulation sentence for user [b]%s[/b]:[/i]\r\n\r\n' % (user)
resp += tbody + '\r\n'
times = 10
elif cmd == 'states':
if len(args) != 3: return '[i]Error: Wrong number of arguments (expected event then gender)[/i]\r\n\r\n'
if args[1].lower() not in event_to_id: return '[i]Error: Event %s not found[/i]\r\n\r\n' % (args[1])
resp += '[i]State bests for the [b]%s[/b] for [b]%s[/b]:[/i]\r\n' % (args[1],args[2])
eventid = event_to_id[args[1].lower()]
gender = '0' if args[2] == 'women' else '1'
for indoors in ['0','1']:
statesdict = {}
resp += '\r\n[b]%s:[/b]\r\n' % ('Outdoors' if indoors == '0' else 'Indoors')
soup = bs4.BeautifulSoup(requests.get('http://www.tilastopaja.net/db/alltfull.php',params={'Ind':indoors,'Event':eventid,'Sex':gender},cookies=tilastopaja_cookies).text,'html.parser')
rows = [ [ r.a.text if len(r.find_all('a')) != 0 else r.text for r in row.find_all('td') ] for row in soup.find_all('table')[-1].find_all('tr') ]
rows = [ row for row in rows if len(row) > 10 ]
states = [ row[9].split()[-1] if len(row[9].split()) > 1 and len(row[9].split()[-1]) == 2 and '.' not in row[9].split()[-1] else 'Other' for row in rows ]
for idx, state in enumerate(states):
if state not in statesdict:
statesdict[state] = rows[idx]
for key in statesdict:
resp += '%s: %s by %s on %s at %s\r\n' % (key, statesdict[key][1], statesdict[key][4], statesdict[key][10], statesdict[key][9])
elif cmd == 'workout': # WIP
segments = ' '.join(args[1:]).split(',').strip()
for seg in segments:
if 'x' in seg:
for mseg in [seg.split('x')[1]] * int(seg.split('x')[0]):
pass
resp += '\r\n\r\n'
return resp
def force_force_read_post(postid):
cur.execute("delete from letsrun_calls where id = %s",(str(postid),))
force_read_post(postid)
def force_read_post(postid):
cur.execute("delete from letsrun_posts where id = %s",(str(postid),))
read_post(postid)
def reply_body(postid,threadid,subject,body):
body += '[i]I am a bot. Info: habs.sdf.org/trackbot[/i]'
cur.execute("insert into letsrun_calls (id,thread) values (%s,%s)",(str(postid),str(threadid)))
con.commit()
r = requests.post('https://www.letsrun.com/forum/process_post.php',data={'board':'1','parent':str(postid),'thread':str(threadid),'hide':'','author':'TrackBot','ppass':PASS,'email':'','subject':subject,'body':body},headers=headers)
def reply_cmd(postid,threadid,subject,cmd):
reply_body(postid,threadid,subject,parse(cmd.split()))
def read_post(postid):
print("reading "+str(postid))
cur.execute("select id from letsrun_posts where id = %s",(str(postid),))
if len(cur.fetchall()) >= 1: return
time.sleep(0.1)
reply = bs4.BeautifulSoup(requests.get('https://www.letsrun.com/forum/post.php?reply='+str(postid),headers=headers).text,'html.parser')
authsoup, subsoup, rsoup = reply.find_all('div',class_='original_message_content',limit=3)[0:3]
[q.extract() for q in rsoup.find_all('blockquote')]
rtext = ''.join([str(i) for i in rsoup.contents])
subject = subsoup.string #urllib.parse.quote_plus(subsoup.string)
author = authsoup.string
thread = reply.find('input',attrs={'name':'thread'})['value']
cur.execute("insert into letsrun_posts (id,thread,title,body,author) values (%s,%s,%s,%s,%s)",(str(postid),str(thread),str(subject).strip(),str(rtext).strip(),str(author).strip()))
con.commit()
body = ''
cmds = 0
if thread == '8130337' and author != 'TrackBot':
with open('/meta/h/habs/letsrun/cs.txt','r') as f:
cs = f.read()
body = '[quote][B]'+author+' wrote:[/b]\r\n\r\n'+rtext+'[/quote]\r\n\r\n'
cbot = requests.get("https://www.cleverbot.com/getreply",params={
'key': CLEVERBOT,
'input': rtext,
'cs': cs,
}).json()
body += cbot['output']
with open('/meta/h/habs/letsrun/cs.txt','w') as f:
f.write(cbot['cs'])
body += '\r\n\r\n'
reply_body(postid,thread,subject,body)
else:
for line in rtext.split('
'):
print(line.lower()+"="+CALL.lower())
if line.lower().startswith(CALL.lower()):
print("match")
cmds += 1
args = line[len(CALL):].strip().split()
if cmds <= 10:
body += parse(args)
cmd = args[0]
if body.strip() != '':
print(postid, thread, subject, body)
reply_body(postid,thread,subject,body)
if __name__ == '__main__':
index = bs4.BeautifulSoup(requests.get('https://www.letsrun.com/forum/forum.php?board=1',headers=headers).text,'html.parser')
recent_thread = index.find_all('span',class_='post_title',limit=2)[1].find_next('a')['href']
rtsoup = bs4.BeautifulSoup(requests.get(recent_thread,headers=headers).text,'html.parser')
pager = rtsoup.find('ul',class_='pagination')
if pager is not None:
recent_thread = pager.find_all('li')[-2].a['href']
rtsoup = bs4.BeautifulSoup(requests.get(recent_thread,headers=headers).text,'html.parser')
newestid = int(rtsoup.find('ul',class_='thread').find_all('a',attrs={'name':re.compile('^(?!\s*$).+')})[-1]['name'])
for postid in range(newestid-299,newestid+1):
read_post(postid)
def get_tfrrs_teams():
listsoup = bs4.BeautifulSoup(requests.post('https://www.tfrrs.org/site_search.html',data={'athlete':'ATHLETE NAME','team':' '.join(list(string.ascii_lowercase)),'meet':'MEET NAME'}).text,'html.parser')
male_teams = {}
female_teams = {}
for cell in listsoup.find_all('a',href=lambda x:x.startswith('//www.tfrrs.org/teams/')):
teamid = ''.join([dig for dig in cell['href'] if dig.isdigit()])
gender = cell.text.split()[-1]
teamname = ' '.join(cell.text.split()[:-1])
if gender == '(F)':
female_teams[teamname] = teamid
else:
male_teams[teamname] = teamid
print(female_teams)
print(male_teams)