Sample Notebook
We provide a sample notebook for interacting with the API. The ipynb file for this notebook can be found in [luptai-installation-directory]/src/python/notebooks/model_runner.ipynb
, and can be executed in a Jupyter environment (eg. Jupyter Lab).
Note
To run the notebook the python LUPTAI modules must be in PYTHONPATH
This can be achieved by editing the enivronment variable PYTHONPATH to include the directory that contains LUPTAI python source code. Alternatively, add just for this notebook by uncommenting the below cell and replace the path str with the path to the python source code on your local machine
# import sys
# sys.path.append(r'D:\src\LUPTAI\luptai_core\src\python')
import os
from luptai.model import ModelRunner
from luptai.model.parameters import InputParameters, BatchSettings, MajorMode, PTMode
from luptai.database.scenario_data import ScenarioDataConnection
from luptai.model.model_data.export_data import export_model_data, AttributedActivitiesExport
from luptai.model.model_data.import_data import ResultsImport
Assign parameter values
Adjust each of these variables as needed for your specific run.
# The scenario we are interested in
scenario_id = 'Scenario558'
# Point to the directories that contain the scenario info
working_dir = r'D:\LUPTAI\projects\test_project'
paras_path = os.path.join(working_dir, 'Parameters/default.json')
scenario_path = os.path.join(working_dir, f'Scenarios/{scenario_id}.sqlite')
data_path = os.path.join(working_dir, r'ModelData')
# Assembly directory, where the LUTPAI DLLs and EXEs are located on your local machine
assembly_directory = r'D:\LUPTAI\codebase\luptai\bin'
# Load up the parameters file
input_paras = InputParameters.load_from_json(paras_path)
# Connect to the scenario database
sdc = ScenarioDataConnection(scenario_path)
# The purposes we want to run, a list of the purpose types, as strings, to be used for this run.
# Alternatively use the sdc.get_activities_list function to get all activities available in the scenario.
purposes = ["BANK"]
# purposes = sdc.get_activities_list()
# Modes for the run
major_modes = [MajorMode.WALK_ONLY, MajorMode.PT_AND_WALK]
pt_modes = [PTMode.BUS, PTMode.TRAIN, PTMode.BUSWAY, PTMode.FERRY, PTMode.LIGHT_RAIL]
# Time Periods
time_periods = [input_paras.get_am_peak()]
# init the needed objects
runner = ModelRunner(assembly_directory)
bs = BatchSettings()
Setup for the run
No need to change anything here, it is just setting up the Model inputs based on the above variables
# Populate the run settings for this batch
bs.input_parameters = input_paras
bs.major_modes = major_modes
bs.pt_modes = pt_modes
bs.time_periods = time_periods
bs.project_id = "test"
bs.scenario_id = scenario_id
bs.scenario_model_data_root = data_path
# Before adding purposes we check that they are in the scenario and turn them from strings to PyPurposes
purps_in_scenario = sdc.get_activities_list()
all_purposes = input_paras.get_purposes_as_py_purpose_list()
bs.purposes = [p for p in all_purposes if p.get_activity_type() in purposes and p.get_activity_type() in [ap.get_activity_type() for ap in all_purposes]]
Do the Run
No changes necessary here this just runs the model.
# export the data for the run
export_model_data(sdc, data_path)
# This actually does the run
runner.do_batch_settings_run(bs)
Get the results
Here we save the results to the scenario database, and collect them in a dictionary of dataframes, which we can analyze in this very notebook.
# get a dictionary of dataframes containing the results from the model runs.
results = runner.get_results()
# save the results to the scenario database
for cr in results.keys():
result_file = os.path.join(data_path, f'{cr}.xml')
if os.path.isfile(result_file):
ResultsImport.from_xml(result_file).to_scenario_db(sdc)
Analyse the results
Feel free to do any analysis of the results below
first_result = next(iter(results))
print(f'Results for first 20 nodes of model run {first_result}:')
results[first_result].head(20)