-
Notifications
You must be signed in to change notification settings - Fork 8
/
spreadsheets.py
640 lines (517 loc) · 24.7 KB
/
spreadsheets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
import pandas as pd
import json
import random
import re
import hextorgb
import openpyxl
#removes emoji data from json, this is needed to avoid instability. previously jsonread.py
def remove_emoji_from_json(input_file_path, output_file_path):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
with open(input_file_path, 'r', encoding='utf-8') as json_file:
data = json.load(json_file)
for key, value in data.items():
if isinstance(value, str):
data[key] = emoji_pattern.sub(r'', value)
with open(output_file_path, 'w') as json_file:
json.dump(data, json_file)
#Adds unique colour to every barony. For cells generation method
def colorRandom(input_file, output_file):
with open(input_file) as f:
data = json.load(f)
for feature in data['features']:
feature['properties']['color'] = hextorgb.color_gen()
with open(output_file, 'w') as f:
json.dump(data, f)
#export the removed emoji json to spreadsheet, previously jsontoxlsprovicnes.py
def json_to_sheet(input_file_path, output_file_path):
#Load the JSON data from the file
with open(input_file_path) as file:
data = json.load(file)
# Extract the "states" and "provinces" fields from the data
states = data["pack"]["states"]
provinces = data["pack"]["provinces"]
culture = data["pack"]["cultures"]
religions = data["pack"]["religions"]
burgs = data["pack"]["burgs"]
# Create a list of dictionaries, where each dictionary represents a row of data for the states
states_rows = []
for cell in states:
if "name" not in cell or "i" not in cell:
continue # skip this row if name or i is missing
row = {
"i": cell["i"],
"name": cell["name"],
"diplomacy": cell.get("diplomacy", ""),
"form": cell.get("form", ""),
"formName": cell.get("formName", "")
}
states_rows.append(row)
# Create a list of dictionaries, where each dictionary represents a row of data for the provinces
provinces_rows = []
suffixes = ["castle", "town", "field", "pool","by","toft","worth","llyn","ay","y","ey","bost","caster","chester","cester","leigh","ley","borough","bury","burgh","wick"]
names_set = set() # to keep track of unique province names
for cell in provinces:
if not isinstance(cell, dict) or "name" not in cell:
continue # skip this row if cell is not a dictionary or name is missing
name = cell["name"]
base_name = name
suffix_idx = 1
while name in names_set: # check if name is already in set
name = base_name + suffixes[suffix_idx - 1] # add a suffix to the name
suffix_idx += 1
names_set.add(name) # add the unique name to the set
row = {
"i": cell["i"],
"state": cell["state"],
"center": cell["center"],
"burg": cell["burg"],
"name": name,
"formName": cell["formName"],
"fullName": cell["fullName"],
"color": cell["color"],
}
provinces_rows.append(row)
religions_rows = []
for cell in religions:
if isinstance(cell, dict):
origins = cell.get("origins")
origin = None
if origins and isinstance(origins, list) and len(origins) > 0:
origin = origins[0]
row = {
"i": cell.get("i"),
"name": cell.get("name"),
"type": cell.get("type"),
"form": cell.get("form"),
"deity": cell.get("deity"),
"center": cell.get("center"),
"origin": origin,
}
religions_rows.append(row)
cultures_rows = []
for cell in culture:
if not isinstance(cell, dict) or "name" not in cell or "i" not in cell:
continue # skip this row if it's not a dictionary or name or i is missing
name_words = cell["name"].split()
row = {
"i": cell["i"],
"name": name_words[0],
}
if "type" in cell:
row["type"] = cell["type"]
if "origins" in cell and isinstance(cell["origins"], list) and len(cell["origins"]) > 0:
row["origin"] = cell["origins"][0]
cultures_rows.append(row)
suffixes = ["castle", "town", "field", "pool"]
names = set()
burgs_rows = []
suffixes = ["castle", "town", "field", "pool","by","toft","worth","llyn","ay","y","ey","bost","caster","chester","cester","leigh","ley","borough","bury","burgh","wick"]
names = set()
burgs_rows = []
for cell in burgs:
if isinstance(cell, dict) and cell:
name = cell.get("name", None)
suffix = ""
while name + suffix in names:
suffix = f"{random.choice(suffixes)}"
if len(names) >= 4 * len(suffixes) * len(burgs):
print("ERROR: Could not generate unique names for all burgs.")
exit()
names.add(name + suffix)
row = {
"i": cell.get("i", None),
"cell": cell.get("cell", None),
"name": name + suffix,
"type": cell.get("type", None),
}
burgs_rows.append(row)
burgs_df = pd.DataFrame(burgs_rows, columns=["i", "cell", "name"])
# Create data frames from the lists of dictionaries
states_df = pd.DataFrame(states_rows, columns=["i", "name","diplomacy","form","formName"])
provinces_df = pd.DataFrame(provinces_rows, columns=["i", "state", "center", "burg", "name", "formName", "fullName", "color"])
cultures_df = pd.DataFrame(cultures_rows, columns=["i", "name","type","origin"])
religion_df = pd.DataFrame(religions_rows, columns=["i", "name", "color", "culture", "type", "form", "deity", "center","origin"])
burgs_df = pd.DataFrame(burgs_rows, columns=["i", "cell", "name","type"])
# Save each data frame to a separate sheet in the same Excel file
with pd.ExcelWriter(output_file_path) as writer:
states_df.to_excel(writer, sheet_name="states", index=False)
provinces_df.to_excel(writer, sheet_name="provinces", index=False)
cultures_df.to_excel(writer, sheet_name="cultures", index=False)
religion_df.to_excel(writer, sheet_name="religion", index=False)
burgs_df.to_excel(writer, sheet_name="burgs", index=False)
#json_to_sheet('noemoji.json','ouput.xlsx')
def combined_data_empires(combined_data):
# Load the workbook
workbook = openpyxl.load_workbook(combined_data)
# Select the sheet to work with
sheet = workbook['states']
# Add new columns for Suzerain and Vassals
sheet.cell(row=1, column=4, value="Suzerain of")
sheet.cell(row=1, column=5, value="Vassals of")
# Extract the column of diplomacy values and save Suzerain and Vassal indices
for i, cell in enumerate(sheet['C'][1:], start=2):
value = cell.value
if isinstance(value, str) and 'Suzerain' in value:
indices = [j for j, x in enumerate(eval(value)) if x == 'Suzerain']
sheet.cell(row=i, column=4, value=str(indices))
elif isinstance(value, str):
sheet.cell(row=i, column=4, value="[]")
else:
sheet.cell(row=i, column=4, value=None)
if isinstance(value, str) and 'Vassal' in value:
indices = [j for j, x in enumerate(eval(value)) if x == 'Vassal']
sheet.cell(row=i, column=5, value=str(indices))
elif isinstance(value, str):
sheet.cell(row=i, column=5, value="[]")
else:
sheet.cell(row=i, column=5, value=None)
# Save the updated workbook
workbook.save(combined_data)
#combined_data_empires("combined_data.xlsx")
def combined_data_empires_id_to_name(combined_data):
# Read the Excel file
file_path = combined_data
wb = openpyxl.load_workbook(file_path)
# Select the 'states' sheet
ws = wb['states']
# Create a new column for the emperor names
ws.insert_cols(6)
ws.cell(row=1, column=6).value = 'emperor'
# Iterate through each row in the worksheet
for row in ws.iter_rows(min_row=2, values_only=True):
# Check if the vassal id is not empty
if row[4] != '[]' and row[4] is not None:
# Extract the vassal id number
vassal_id = int(row[4].strip('[]'))
# Find any matching rows in the worksheet
for match_row in ws.iter_rows(min_row=2, min_col=1, max_col=2, values_only=True):
if match_row[0] == vassal_id:
# Update the emperor name
ws.cell(row=row[0] + 2, column=6).value = match_row[1]
break
# Save the changes to the same file
wb.save(file_path)
#combined_data_empires_id_to_name("combined_data.xlsx")
def update_provincedef_empires_vassalsuzerain(combined_data,provinceDef):
# Open the input file and select the "states" sheet
input_file = openpyxl.load_workbook(combined_data)
states_sheet = input_file["states"]
# Open the output file and select the "provinceDef" sheet
output_file = openpyxl.load_workbook(provinceDef)
provinceDef_sheet = output_file["Sheet1"]
# Loop through each row in the "states" sheet and check if the "emperor" column is not empty
for row in states_sheet.iter_rows(min_row=2, values_only=True):
emperor = row[5]
if emperor:
name = row[1]
# Replace any instance of the name value with the emperor value in the 9th column of the "provinceDef" sheet
for cell in provinceDef_sheet["I"]:
if cell.value == name:
cell.value = emperor
# Save the changes to the output file
output_file.save(provinceDef)
#export the cells geojson data to spreadsheet. previously xlsoutput.py
def cells_geojson_to_sheet(input_file_path, output_file_path):
with open(input_file_path) as f:
data = json.load(f)
# Create empty lists to store the properties and geometries
properties = []
geometries = []
# Loop through each feature in the GeoJSON file
for feature in data["features"]:
# Get the properties and geometry of the feature
properties.append(feature["properties"])
geometries.append(feature["geometry"])
# Convert the lists of properties and geometries to dataframes
properties_df = pd.DataFrame(properties)
geometries_df = pd.DataFrame(geometries)
df = pd.concat([properties_df, geometries_df], axis=1)
# Rename the columns "state" to "Kingdom" and "province" to "County"
df.rename(columns={'state': 'Kingdom', 'province': 'County', 'religion': 'Religion', 'culture': 'Culture'},
inplace=True)
def hex_to_rgb(hex_color):
if not hex_color.startswith("#") or len(hex_color) != 7:
return None
return tuple(int(hex_color[i:i + 2], 16) for i in (1, 3, 5))
df["color"] = df["color"].astype(str)
df[["red", "green", "blue"]] = df["color"].apply(hex_to_rgb).apply(pd.Series)
df = df.drop("color", axis=1)
names = ['Abingdon', 'Albrighton', 'Alcester', 'Almondbury', 'Altrincham', 'Amersham', 'Andover', 'Appleby',
'Ashboume', 'Atherstone', 'Aveton', 'Axbridge', 'Aylesbury', 'Baldock', 'Bamburgh', 'Barton',
'Basingstoke', 'Berden', 'Bere', 'Berkeley', 'Berwick', 'Betley', 'Bideford', 'Bingley', 'Birmingham',
'Blandford', 'Blechingley', 'Bodmin', 'Bolton', 'Bootham', 'Boroughbridge', 'Boscastle', 'Bossinney',
'Bramber', 'Brampton', 'Brasted', 'Bretford', 'Bridgetown', 'Bridlington', 'Bromyard', 'Bruton',
'Buckingham', 'Bungay', 'Burton', 'Calne', 'Cambridge', 'Canterbury', 'Carlisle', 'Castleton', 'Caus',
'Charmouth', 'Chawleigh', 'Chichester', 'Chillington', 'Chinnor', 'Chipping', 'Chisbury', 'Cleobury',
'Clifford', 'Clifton', 'Clitheroe', 'Cockermouth', 'Coleshill', 'Combe', 'Congleton', 'Crafthole',
'Crediton', 'Cuddenbeck', 'Dalton', 'Darlington', 'Dodbrooke', 'Drax', 'Dudley', 'Dunstable', 'Dunster',
'Dunwich', 'Durham', 'Dymock', 'Exeter', 'Exning', 'Faringdon', 'Felton', 'Fenny', 'Finedon', 'Flookburgh',
'Fowey', 'Frampton', 'Gateshead', 'Gatton', 'Godmanchester', 'Grampound', 'Grantham', 'Guildford',
'Halesowen', 'Halton', 'Harbottle', 'Harlow', 'Hatfield', 'Hatherleigh', 'Haydon', 'Helston', 'Henley',
'Hertford', 'Heytesbury', 'Hinckley', 'Hitchin', 'Holme', 'Hornby', 'Horsham', 'Kendal', 'Kenilworth',
'Kilkhampton', 'Kineton', 'Kington', 'Kinver', 'Kirby', 'Knaresborough', 'Knutsford', 'Launceston',
'Leighton', 'Lewes', 'Linton', 'Louth', 'Luton', 'Lyme', 'Lympstone', 'Macclesfield', 'Madeley',
'Malborough', 'Maldon', 'Manchester', 'Manningtree', 'Marazion', 'Marlborough', 'Marshfield', 'Mere',
'Merryfield', 'Middlewich', 'Midhurst', 'Milborne', 'Mitford', 'Modbury', 'Montacute', 'Mousehole',
'Newbiggin', 'Newborough', 'Newbury', 'Newenden', 'Newent', 'Norham', 'Northleach', 'Noss', 'Oakham',
'Olney', 'Orford', 'Ormskirk', 'Oswestry', 'Padstow', 'Paignton', 'Penkneth', 'Penrith', 'Penzance',
'Pershore', 'Petersfield', 'Pevensey', 'Pickering', 'Pilton', 'Pontefract', 'Portsmouth', 'Preston',
'Quatford', 'Reading', 'Redcliff', 'Retford', 'Rockingham', 'Romney', 'Rothbury', 'Rothwell', 'Salisbury',
'Saltash', 'Seaford', 'Seasalter', 'Sherston', 'Shifnal', 'Shoreham', 'Sidmouth', 'Skipsea', 'Skipton',
'Solihull', 'Somerton', 'Southam', 'Southwark', 'Standon', 'Stansted', 'Stapleton', 'Stottesdon',
'Sudbury', 'Swavesey', 'Tamerton', 'Tarporley', 'Tetbury', 'Thatcham', 'Thaxted', 'Thetford', 'Thornbury',
'Tintagel', 'Tiverton', 'Torksey', 'Totnes', 'Towcester', 'Tregoney', 'Trematon', 'Tutbury', 'Uxbridge',
'Wallingford', 'Wareham', 'Warenmouth', 'Wargrave', 'Warton', 'Watchet', 'Watford', 'Wendover', 'Westbury',
'Westcheap', 'Weymouth', 'Whitford', 'Wickwar', 'Wigan', 'Wigmore', 'Winchelsea', 'Winkleigh', 'Wiscombe',
'Witham', 'Witheridge', 'Wiveliscombe', 'Woodbury', 'Yeovil'
]
df['Barony'] = [random.choice(names) + random.choice(names) for i in range(len(df))]
# Save the dataframe to an XLS spreadsheet
df.to_excel(output_file_path, index=False)
#Takes names from the output json data and combined with cells data to generate the provinceDef.xlsx
def nameCorrector(cells_file_path, combined_file_path, updated_file_path):
# Load the first Excel file into a pandas DataFrame
df1 = pd.read_excel(cells_file_path)
# Load the second Excel file into a pandas DataFrame
df2 = pd.read_excel(combined_file_path)
df3 = pd.read_excel(combined_file_path, sheet_name=1)
df4 = pd.read_excel(combined_file_path, sheet_name=2)
df5 = pd.read_excel(combined_file_path, sheet_name=3)
# Create a dictionary from the second DataFrame that maps numbers to names
mapping = dict(zip(df2['i'], df2['name']))
provmapping = dict(zip(df3['i'], df3['name']))
culturemapping = dict(zip(df4['i'], df4['name']))
religionmapping = dict(zip(df5['i'], df5['name']))
# Replace the numbers in the first DataFrame with the associated names
df1['Kingdom'] = df1['Kingdom'].map(mapping)
df1['County'] = df1['County'].map(provmapping)
df1['Religion'] = df1['Religion'].map(religionmapping)
df1['Culture'] = df1['Culture'].map(culturemapping)
# Save the updated first DataFrame to a new Excel file
df1.to_excel(updated_file_path, index=False)
#Rearranges the nameCorrector data into format needed by the Map Filler tool
def provinceDefCells(file_path, output_path):
# Load the existing Excel file
wb = openpyxl.load_workbook(file_path)
# Create a new sheet
ws = wb.create_sheet("Copied Data")
# Get the data from the original sheet
source_sheet = wb["Sheet1"]
# Copy data from columns
for i in range(1, source_sheet.max_row + 1):
cell_value = source_sheet.cell(row=i, column=1).value
if isinstance(cell_value, int):
ws.cell(row=i, column=2, value=cell_value + 1)
else:
ws.cell(row=i, column=2, value=cell_value)
ws.cell(row=i, column=3, value=source_sheet.cell(row=i, column=13).value)
ws.cell(row=i, column=4, value=source_sheet.cell(row=i, column=14).value)
ws.cell(row=i, column=5, value=source_sheet.cell(row=i, column=15).value)
ws.cell(row=i, column=6, value=source_sheet.cell(row=i, column=16).value)
ws.cell(row=i, column=9, value=source_sheet.cell(row=i, column=6).value)
ws.cell(row=i, column=10, value=source_sheet.cell(row=i, column=6).value)
ws.cell(row=i, column=11, value=source_sheet.cell(row=i, column=7).value)
ws.cell(row=i, column=12, value=source_sheet.cell(row=i, column=7).value)
#For Religion Name
value = source_sheet.cell(row=i, column=9).value
if value is not None:
if value.lower() in ["no religion", "no_religion"]:
value = "" # make the cell blank
else:
value = re.sub(r'\W+', '', value).lower() # remove non-alphanumeric characters and spaces
ws.cell(row=i, column=14, value=value)
# Save only the "Copied Data" sheet in the output file
wb_copied_data = openpyxl.Workbook()
ws_copied_data = wb_copied_data.active
ws_copied_data.title = "Copied Data"
for row in ws.iter_rows(values_only=True):
ws_copied_data.append(row)
wb_copied_data.save(output_path)
import os
def terrainGenIdtoName(cells_path, biomes_path):
# Load biomes file
biomes_df = pd.read_excel(biomes_path, header=None)
# Create a dictionary mapping biome IDs to biome names
biome_dict = dict(zip(biomes_df.iloc[:, 1], biomes_df.iloc[:, 0]))
# Load cellsData file
cells_df = pd.read_csv(cells_path)
# Replace biome IDs with biome names using the dictionary
cells_df['biome'] = cells_df['biome'].map(biome_dict)
# Save the updated cellsData file to the same directory
output_path = cells_path
cells_df.to_csv(output_path, index=False)
def terrainGenRGB(townBiomes, provinceDef):
# Load the CSV file into a DataFrame
town_biomes = pd.read_csv(townBiomes)
# Load the Excel file into a DataFrame and select only the RGB columns
province_def = pd.read_excel(provinceDef, usecols=['R', 'G', 'B'])
# Add new columns with the RGB values from province_def
merged = town_biomes.merge(province_def, left_on='i', right_index=True, how='left')
# Save the updated DataFrame back to the CSV file
merged.to_csv(townBiomes, index=False)
#terrainGenRGB('townBiomes.csv','_mapFiller/provinceDef.xlsx')
def terrainGen(cellsData,provinceTerraintxt):
import pandas as pd
import random
# read in the CSV file
df = pd.read_csv(cellsData)
# define the biome-terrain mapping with weights
biome_terrain_map = {
'Marine': {
'plains': 1.0
},
'Hot desert': {
'desert': 0.6,
'desert_mountains': 0.3,
'drylands': 0.1
},
'Cold desert': {
'desert': 0.6,
'desert_mountains': 0.3,
'tundra': 0.1
},
'Savanna': {
'drylands': 1.0
},
'Grassland': {
'steppe': 1.0
},
'Tropical seasonal forest': {
'jungle': 0.85,
'plains': 0.10,
'hills': 0.05
},
'Temperate deciduous forest': {
'forest': 0.9,
'hills': 0.1
},
'Tropical rainforest': {
'jungle': 1.0
},
'Temperate rainforest': {
'forest': 0.9,
'hills': 0.1
},
'Taiga': {
'taiga': 0.9,
'hills': 0.1
},
'Tundra': {
'tundra': 0.9,
'hills': 0.1
},
'Glacier': {
'mountains': 1.0
},
'Wetland': {
'wetlands': 1.0,
}
}
# create a list to hold the assigned terrain for each cell
terrain_list = []
# loop through each row in the DataFrame and assign a terrain based on the biome
for i, row in df.iterrows():
biome = row['biome']
terrain_weights = biome_terrain_map.get(biome)
# check if biome is not found in the mapping
if terrain_weights is None:
assigned_terrain = 'plains'
else:
# check for population and override terrain for specific biomes
if biome == 'Temperate deciduous forest' and row['population'] > 40000:
assigned_terrain = 'farmlands'
elif biome == 'Hot Desert' and row['population'] > 40000:
assigned_terrain = 'floodplains'
else:
assigned_terrain = random.choices(
list(terrain_weights.keys()),
weights=list(terrain_weights.values())
)[0]
terrain_list.append(f"{row['i']}={assigned_terrain}")
# write the assigned terrain for each cell to a text file
with open(provinceTerraintxt, 'w') as f:
f.write('\n'.join(terrain_list))
from PIL import Image
import csv
def state_to_goverment_type():
government_dict = {
"Monarchy": "feudal_government",
"Beylik": "clan_government",
"Despotate": "feudal_government",
"Dominion": "feudal_government",
"Duchy": "feudal_government",
"Emirate": "clan_government",
"Empire": "feudal_government",
"Horde": "tribal_government",
"Grand Duchy": "feudal_government",
"Heptarchy": "tribal_government",
"Khaganate": "tribal_government",
"Khanate": "tribal_government",
"Kingdom": "feudal_government",
"Marches": "feudal_government",
"Principality": "feudal_government",
"Satrapy": "mercenary_government",
"Shogunate": "feudal_government",
"Sultanate": "feudal_government",
"Tsardom": "feudal_government",
"Ulus": "tribal_government",
"Viceroyalty": "feudal_government",
"Republic": "republic_government",
"Chancellery": "republic_government",
"City-state": "republic_government",
"Diarchy": "feudal_government",
"Federation": "republic_government",
"Free City": "republic_government",
"Most Serene Republic": "republic_government",
"Oligarchy": "republic_government",
"Protectorate": "republic_government",
"Tetrarchy": "feudal_government",
"Trade Company": "mercenary_government",
"Triumvirate": "republic_government",
"Union": "republic_government",
"Confederacy": "republic_government",
"Confederation": "republic_government",
"Conglomerate": "republic_government",
"Commonwealth": "republic_government",
"League": "republic_government",
"United Hordes": "tribal_government",
"United Kingdom": "feudal_government",
"United Provinces": "republic_government",
"United Republic": "republic_government",
"United States": "republic_government",
"United Tribes": "tribal_government",
"Theocracy": "theocracy_government",
"Bishopric": "theocracy_government",
"Brotherhood": "holy_order_government",
"Caliphate": "theocracy_government",
"Diocese": "theocracy_government",
"Divine Duchy": "theocracy_government",
"Divine Grand Duchy": "theocracy_government",
"Divine Principality": "theocracy_government",
"Divine Kingdom": "theocracy_government",
"Divine Empire": "theocracy_government",
"Eparchy": "holy_order_government",
"Exarchate": "holy_order_government",
"Holy State": "holy_order_government",
"Imamah": "theocracy_government",
"Patriarchate": "holy_order_government",
"Anarchy": "republic_government",
"Commune": "republic_government",
"Community": "republic_government",
"Council": "republic_government",
"Free Territory": "republic_government",
"Tribes": "tribal_government"
}
#remove_emoji_from_json("emoji.json", "noemoji.json")
#colorRandom("input.geojson","output.geojson")
#json_to_sheet("noemoji.json","combined_data.xlsx")
#cells_geojson_to_sheet("output.geojson","cellsData.xlsx")
#nameCorrector('cellsData.xlsx', 'combined_data.xlsx', 'updated_file.xlsx')
#provinceDefCells("updated_file.xlsx", "provinceDef.xlsx")