Skip to content

Commit

Permalink
Fetch all item icon info at once
Browse files Browse the repository at this point in the history
  • Loading branch information
squaresmile committed Oct 24, 2024
1 parent 7f49d34 commit ab78d5f
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 53 deletions.
70 changes: 35 additions & 35 deletions generate-lookup.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import gc
from io import BytesIO
import json
import requests
from io import BytesIO
from pathlib import Path

import numpy as np
import pandas as pd
import os
import requests


class NpEncoder(json.JSONEncoder):
Expand All @@ -30,19 +30,20 @@ def default(self, obj):
print("Fetching items", end="", flush=True)

item_df = pd.read_csv(ITEM_SHEET_URL)
item_df = item_df[item_df["Image link"].str.contains("Items/") == True]
item_df = item_df[item_df["Image link"].str.contains("Items/") == 1]
item_dict = item_df.set_index("ID")[["NA Name", "Image link"]].T.to_dict()

del item_df

print("... Fetched.\nFetching item icons...", end=" ", flush=True)

for item_id in item_dict:
response = requests.get(f'https://api.atlasacademy.io/nice/JP/item/{item_dict[item_id]["Image link"].split("Items/")[1].split("_")[0]}')
response.raise_for_status()
item_dict[item_id]["rarity"] = response.json()["background"]
nice_items_r = requests.get("https://api.atlasacademy.io/export/JP/nice_item.json")
nice_items_r.raise_for_status()
nice_items = {item["id"]: item for item in nice_items_r.json()}
for item_info in item_dict.values():
item_info["rarity"] = nice_items[
int(item_info["Image link"].split("Items/")[1].split("_")[0])
]["background"]

gc.collect()

print("Fetched.\nFetching dropsheet", end="", flush=True)

Expand All @@ -51,15 +52,10 @@ def default(self, obj):

excel_data = BytesIO(response.content)

image_urls = {}

all_sheets = pd.read_excel(excel_data, sheet_name=None)

print("... Loaded.", flush=True)

del excel_data

gc.collect()

result_dict = {}
rarity_order = {
Expand All @@ -72,7 +68,6 @@ def default(self, obj):
"monument": 7,
"piece": 8,
}
item_sorter = lambda x: (rarity_order.get(x["rarity"], 0), x["id"])

print(f"Sheet names: {DROP_SHEET_NAMES}\nProcesing data...", flush=True)

Expand All @@ -85,10 +80,14 @@ def default(self, obj):
# df.columns[1] # The index in the admin info sheet
df_cleaned.columns = df_cleaned.iloc[0]
df_cleaned = df_cleaned[df_cleaned.iloc[:, 0] != "Item"]
df_cleaned = df_cleaned[df_cleaned.iloc[:, 8] != "1P+1L+1T"].dropna(axis=1, how="all")
df_cleaned = df_cleaned[df_cleaned.iloc[:, 8] != "1P+1L+1T"].dropna(
axis=1, how="all"
)

with pd.option_context("future.no_silent_downcasting", True):
df_cleaned = df_cleaned.fillna(np.nan).replace([np.nan], [None]).reset_index(drop=True)
df_cleaned = (
df_cleaned.fillna(np.nan).replace([np.nan], [None]).reset_index(drop=True)
)

columns = ["No.", "Area", "Quest", "AP", "BP/AP", "AP/Drop", "Drop Chance", "Runs"]

Expand All @@ -113,8 +112,12 @@ def default(self, obj):
"Quest": df_cleaned.iloc[j, index_base_add + 5], # Quest
"AP": df_cleaned.iloc[j, index_base_add + 6], # AP
"BP/AP": df_cleaned.iloc[j, index_base_add + 7], # BP/AP
"AP/Drop": df_cleaned.iloc[j, index_base_add + 8], # AP/Drop
"Drop Chance": df_cleaned.iloc[j, index_base_add + 10], # Drop Chance
"AP/Drop": df_cleaned.iloc[
j, index_base_add + 8
], # AP/Drop
"Drop Chance": df_cleaned.iloc[
j, index_base_add + 10
], # Drop Chance
"Runs": df_cleaned.iloc[j, index_base_add + 12], # Runs
}

Expand All @@ -141,7 +144,9 @@ def default(self, obj):
{
"name": name,
"image": item_dict[ID]["Image link"],
"id": item_dict[ID]["Image link"].split("Items/")[1].split("_")[0],
"id": item_dict[ID]["Image link"]
.split("Items/")[1]
.split("_")[0],
"rarity": rarity,
"data": sub_dict_list,
}
Expand All @@ -150,23 +155,18 @@ def default(self, obj):
if "APDrop" in sheet_name:
sheet_name = sheet_name.replace("APDrop", "AP/Drop")

result_dict[sheet_name] = sorted(sheet_list, key=item_sorter)

print(f"... Done.", flush=True)

result_dict[sheet_name] = sorted(
sheet_list, key=lambda x: (rarity_order.get(x["rarity"], 0), x["id"])
)

mats_file_name = "./assets/mats.json"
print("... Done.", flush=True)

try:
os.makedirs(os.path.dirname(mats_file_name), exist_ok=True)

with open(mats_file_name, "w") as f:
json.dump(result_dict, f, cls=NpEncoder)
except FileNotFoundError:
mats_file_name = input(f"`{mats_file_name}` does not exist; provide alternate file path: ")
mats_file_name = Path("./assets/mats.json").resolve()
mats_file_name.parent.mkdir(parents=True, exist_ok=True)

with open(mats_file_name, "w") as f:
json.dump(result_dict, f, cls=NpEncoder)
with open(mats_file_name, "w") as f:
json.dump(result_dict, f, cls=NpEncoder)


print(f"Wrote drop data to `{mats_file_name}`", flush=True)
21 changes: 3 additions & 18 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,18 +1,3 @@
certifi==2024.8.30
charset-normalizer==3.4.0
click==8.1.7
et-xmlfile==1.1.0
idna==3.10
mypy-extensions==1.0.0
numpy==2.1.2
openpyxl==3.1.5
packaging==24.1
pandas==2.2.3
pathspec==0.12.1
platformdirs==4.3.6
python-dateutil==2.9.0.post0
pytz==2024.2
requests==2.32.3
six==1.16.0
tzdata==2024.2
urllib3==2.2.3
pandas==2.*
openpyxl==3.*
requests==2.*

0 comments on commit ab78d5f

Please sign in to comment.