diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..e69de29
diff --git a/.gitignore b/.gitignore
index 15e9211..f031a21 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,5 @@ sandag_rsm/__pycache__
.ipynb_checkpoints
sandag_rsm.egg-info
_version.py
+.DS_Store
+test/data/*
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 35cc926..333916f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,7 +1,7 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.1.0
+ rev: v4.3.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
@@ -9,7 +9,7 @@ repos:
- id: trailing-whitespace
- repo: https://github.com/kynan/nbstripout
- rev: 0.5.0
+ rev: 0.6.1
hooks:
- id: nbstripout
@@ -20,11 +20,11 @@ repos:
args: ["--profile", "black", "--filter-files"]
- repo: https://github.com/psf/black
- rev: 21.12b0
+ rev: 22.10.0
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8
- rev: 4.0.1
+ rev: 5.0.4
hooks:
- id: flake8
diff --git a/README.md b/README.md
index 673e61b..c4d9257 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,16 @@
# RSM
Rapid Strategic Model for the San Diego Association of Governments
+## Source Code Access
+
+The source code for the RSM is stored in this repository. You can access it
+via GitHub, or check out the repository using Git. Some larger files (especially
+for testing) are stored using [git-lfs](https://git-lfs.github.com/) (large file
+storage). This is mostly transparent, but for best results you do need to make
+sure that the LFS extension is installed before you clone the repository. Visit
+[git-lfs](https://git-lfs.github.com/) for platform-specific instructions.
+
+
## Installing
To install, activate the python or conda environment you want to use,
@@ -10,6 +20,13 @@ the cd into the repository directory and run:
python -m pip install -e .
```
+This will make the `sandag_rsm` package available, so you can `import sandag_rsm`
+to access the functions in this tool, without regard for the current working
+directory or pointing the python path to the right place(s). Using the `-e` flag
+installs in `editable` mode, so if you make changes or pull updates from GitHub,
+those updates will be available to Python without re-installing.
+
+
## Code Formatting
This repo use several tools to ensure a consistent code format throughout the project:
@@ -34,7 +51,8 @@ with `git commit --no-verify`.
## Developing with Docker
-To build the docker container, change into the repository root and run:
+This project uses [Docker](https://www.docker.com/). For development, to build
+the docker container, change into the repository root and run:
```shell
docker build --tag sandag_rsm .
@@ -42,11 +60,19 @@ docker build --tag sandag_rsm .
### Jupyter Notebook for Development
-On the host machine, run:
+On the host machine, on linux or macOS run:
```shell
docker run -v $(pwd):/home/mambauser/sandag_rsm -p 8899:8899 \
- -it sandag_rsm jupyter notebook --ip 0.0.0.0 --no-browser --allow-root \
+ -it --rm sandag_rsm jupyter notebook --ip 0.0.0.0 --no-browser --allow-root \
+ --port 8899 --notebook-dir=/home/mambauser
+```
+
+or in `cwd` on Windows, run:
+
+```shell
+docker run -v %cd%:/home/mambauser/sandag_rsm -p 8899:8899 ^
+ -it --rm sandag_rsm jupyter notebook --ip 0.0.0.0 --no-browser --allow-root ^
--port 8899 --notebook-dir=/home/mambauser
```
diff --git a/environment.yaml b/environment.yaml
index 2a01ecd..5799404 100644
--- a/environment.yaml
+++ b/environment.yaml
@@ -10,12 +10,15 @@ dependencies:
- numpy>=1.19
- geopandas
- git
+ - git-lfs
- jupyter
- libpysal
- networkx
- notebook
+ - openmatrix
- pandas
- plotly
+ - pyarrow
- pyproj
- requests=2.25.1
- scikit-learn
diff --git a/notebooks/TranslateDemand.ipynb b/notebooks/TranslateDemand.ipynb
new file mode 100644
index 0000000..822302d
--- /dev/null
+++ b/notebooks/TranslateDemand.ipynb
@@ -0,0 +1,329 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "4fc00298",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import requests\n",
+ "import openmatrix as omx\n",
+ "\n",
+ "from sandag_rsm.translate import translate_demand"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "de5861aa",
+ "metadata": {},
+ "source": [
+ "## Remote I/O"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "1d4896bb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data_dir = './data-dl/'\n",
+ "\n",
+ "os.makedirs(data_dir, exist_ok=True)\n",
+ "\n",
+ "resource_url = 'https://media.githubusercontent.com/media/wsp-sag/client_sandag_rsm_resources/main/original_omx/'\n",
+ "\n",
+ "download_files_vector = [\n",
+ " 'trip_EA.omx',\n",
+ " 'trip_AM.omx',\n",
+ " 'trip_MD.omx',\n",
+ " 'trip_PM.omx',\n",
+ " 'trip_EV.omx',\n",
+ "]\n",
+ "\n",
+ "# for download_file in download_files_vector:\n",
+ "# r = requests.get((resource_url+download_file), allow_redirects=True)\n",
+ "# open((data_dir+download_file), 'w').write(r.content)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "e09fa2ee",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "input_dir = './data-dl/'\n",
+ "output_dir = './data-dl/export/'\n",
+ "matrix_names = ['trip_EA.omx', 'trip_AM.omx', 'trip_MD.omx', 'trip_PM.omx', 'trip_EV.omx']\n",
+ "agg_zone_mapping = './../test/data/taz_crosswalk.csv'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "b23b92f9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "os.makedirs(output_dir, exist_ok=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fe41ea9a",
+ "metadata": {},
+ "source": [
+ "## Aggregate Matrices"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "458ad160-33cf-4149-94ca-b436c2a5aafb",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[04:47.70] INFO: Agregating Matrix: trip_EA.omx ...\n",
+ "[06:23.38] INFO: Agregating Matrix: trip_AM.omx ...\n",
+ "[08:00.64] INFO: Agregating Matrix: trip_MD.omx ...\n",
+ "[09:39.43] INFO: Agregating Matrix: trip_PM.omx ...\n",
+ "[11:16.33] INFO: Agregating Matrix: trip_EV.omx ...\n"
+ ]
+ }
+ ],
+ "source": [
+ "translate_demand(\n",
+ " matrix_names,\n",
+ " agg_zone_mapping,\n",
+ " input_dir,\n",
+ " output_dir\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "62a0c438",
+ "metadata": {},
+ "source": [
+ "## Compare Original and Aggregated Matrices"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "id": "cd5d0436",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "matrix_name = 'trip_AM.omx'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "eb4bcad1-fe01-405c-8da7-96ac040a10d6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "input_matrix = omx.open_file(os.path.join(input_dir, matrix_name), mode=\"r\") \n",
+ "output_matrix = omx.open_file(os.path.join(output_dir, matrix_name), mode=\"r\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "id": "692ecce3-946b-4ca1-ac01-b8610991b8bc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "matrix_cores = input_matrix.list_matrices()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "c3030557-03f4-402c-8da2-306c0b4f268c",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(4996, 4996)"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "input_matrix.shape()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "id": "650baf9b-569f-424b-a703-5cb7c75110d4",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(2012, 2012)"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "output_matrix.shape()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "id": "a9552989-8251-445f-a3b1-4070d66c06fa",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Core: AM_HOV2_H\n",
+ "Input Sum: 90,480\n",
+ "Output Sum: 90,480\n",
+ "\n",
+ "Core: AM_HOV2_L\n",
+ "Input Sum: 101,330\n",
+ "Output Sum: 101,330\n",
+ "\n",
+ "Core: AM_HOV2_M\n",
+ "Input Sum: 229,308\n",
+ "Output Sum: 229,308\n",
+ "\n",
+ "Core: AM_HOV3_H\n",
+ "Input Sum: 81,707\n",
+ "Output Sum: 81,707\n",
+ "\n",
+ "Core: AM_HOV3_L\n",
+ "Input Sum: 38,662\n",
+ "Output Sum: 38,662\n",
+ "\n",
+ "Core: AM_HOV3_M\n",
+ "Input Sum: 98,403\n",
+ "Output Sum: 98,403\n",
+ "\n",
+ "Core: AM_SOV_NT_H\n",
+ "Input Sum: 222,616\n",
+ "Output Sum: 222,616\n",
+ "\n",
+ "Core: AM_SOV_NT_L\n",
+ "Input Sum: 535,342\n",
+ "Output Sum: 535,342\n",
+ "\n",
+ "Core: AM_SOV_NT_M\n",
+ "Input Sum: 426,238\n",
+ "Output Sum: 426,238\n",
+ "\n",
+ "Core: AM_SOV_TR_H\n",
+ "Input Sum: 185,345\n",
+ "Output Sum: 185,345\n",
+ "\n",
+ "Core: AM_SOV_TR_L\n",
+ "Input Sum: 21,590\n",
+ "Output Sum: 21,590\n",
+ "\n",
+ "Core: AM_SOV_TR_M\n",
+ "Input Sum: 16,563\n",
+ "Output Sum: 16,563\n",
+ "\n",
+ "Core: AM_TRK_H\n",
+ "Input Sum: 58,381\n",
+ "Output Sum: 58,381\n",
+ "\n",
+ "Core: AM_TRK_L\n",
+ "Input Sum: 32,998\n",
+ "Output Sum: 32,998\n",
+ "\n",
+ "Core: AM_TRK_M\n",
+ "Input Sum: 14,039\n",
+ "Output Sum: 14,039\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "for core in matrix_cores:\n",
+ " input_core = input_matrix[core].read()\n",
+ " output_core = output_matrix[core].read()\n",
+ " \n",
+ " input_mtx_sum = input_core.sum().sum()\n",
+ " output_mtx_sum = input_core.sum().sum()\n",
+ " \n",
+ " print(f'Core: {core}')\n",
+ " print(f'Input Sum: {input_mtx_sum:,.0f}')\n",
+ " print(f'Output Sum: {output_mtx_sum:,.0f}\\n')\n",
+ " \n",
+ " assert output_mtx_sum == input_mtx_sum"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "id": "da9761de-2fec-4ba5-a5c1-f0f276398195",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "input_matrix.close()\n",
+ "output_matrix.close()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.12"
+ },
+ "toc": {
+ "base_numbering": 1,
+ "nav_menu": {},
+ "number_sections": false,
+ "sideBar": true,
+ "skip_h1_title": false,
+ "title_cell": "Table of Contents",
+ "title_sidebar": "Contents",
+ "toc_cell": false,
+ "toc_position": {},
+ "toc_section_display": true,
+ "toc_window_display": false
+ },
+ "vscode": {
+ "interpreter": {
+ "hash": "6969d5340a2324284ea9e82958789f0af31a889f2a17dbf954a94cd3bfb1e1ef"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/ZoneAggDemo.ipynb b/notebooks/ZoneAggDemo.ipynb
new file mode 100644
index 0000000..310db9d
--- /dev/null
+++ b/notebooks/ZoneAggDemo.ipynb
@@ -0,0 +1,555 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a845c147",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from sandag_rsm.data_load.zones import load_mgra_data\n",
+ "from sandag_rsm.data_load.triplist import load_trip_list, trip_mode_shares_by_mgra, \\\n",
+ " trip_mode_shares_by_taz\n",
+ "from sandag_rsm.poi import poi_taz_mgra, attach_poi_taz_skims\n",
+ "from sandag_rsm.zone_agg import aggregate_zones, viewer, viewer2, \\\n",
+ " aggregate_zones_within_districts, merge_zone_data, make_crosswalk, \\\n",
+ " mark_centroids"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "73a4f97c",
+ "metadata": {
+ "pycharm": {
+ "name": "#%% md\n"
+ }
+ },
+ "source": [
+ "## Remote I/O"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "542ccfa1",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "from sandag_rsm.data_load import get_test_file\n",
+ "data_dir = \"./data-dl/\"\n",
+ "\n",
+ "mgra_filename = \"mgra13_based_input2016.csv.gz\"\n",
+ "skim_filename = \"traffic_skims_AM_mini.omx\"\n",
+ "trips_filename = \"trips_sample.pq\"\n",
+ "\n",
+ "get_test_file([\n",
+ " mgra_filename, \n",
+ " trips_filename, \n",
+ " skim_filename, \n",
+ "], data_dir)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4b5e3239",
+ "metadata": {},
+ "source": [
+ "## Demo"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b8dc27e3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mgra = load_mgra_data(data_dir=data_dir, simplify_tolerance=10, topo=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ae5d7f1c",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "mgra['taz20'] = mgra.taz % 20"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e6d1c5ef",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "trips = load_trip_list(\"trips_sample.pq\", data_dir=data_dir)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4bb2cb35",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "tazs = merge_zone_data(mgra, cluster_id=\"taz\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f3593b1c",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "trip_mode_shares = trip_mode_shares_by_taz(trips, tazs=tazs.index, mgra_gdf=mgra)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e64f7a0b",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "tazs = tazs.join(trip_mode_shares.add_prefix(\"modeshare_\"), on='taz')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "65c404fa",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "poi = poi_taz_mgra(mgra)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "569cfadc",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "poi"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2c974741",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "cluster_factors={'popden':1, 'empden':1, 'modeshare_NM':100, 'modeshare_WT':100}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6cd4cee2",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "tazs, cluster_factors = attach_poi_taz_skims(\n",
+ " tazs,\n",
+ " \"traffic_skims_AM_mini.omx\",\n",
+ " names='AM_SOV_TR_M_TIME',\n",
+ " poi=poi,\n",
+ " data_dir=data_dir,\n",
+ " cluster_factors=cluster_factors,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "772c319e",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "explicit_agg=[\n",
+ "# 571, 588, 606, \n",
+ "# [143, 270, 15],\n",
+ "]\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2f7c2125",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "d1 = tazs.query(\"district27 == 1\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a93bd34a",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "viewer(d1, color='popden', marker_line_width=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "073d0c19",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "viewer(d1, color='outside_pendleton_gate_AM_SOV_TR_M_TIME', marker_line_width=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f66915c5",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "viewer(d1, color='modeshare_WT', marker_line_width=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "61b2c6c7",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "cluster_factors"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3234d883",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "kmeans1 = aggregate_zones(\n",
+ " d1, \n",
+ " cluster_factors=cluster_factors, \n",
+ " n_zones=100,\n",
+ " explicit_agg=explicit_agg,\n",
+ " explicit_col='taz',\n",
+ " use_xy=1e-6,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b50a4bc3",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "viewer2(edges=kmeans1, colors=d1, color_col='empden')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "872ac31b",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "from sandag_rsm.zone_agg import aggregate_zones_within_districts\n",
+ "\n",
+ "kmeans = aggregate_zones_within_districts(\n",
+ " tazs, \n",
+ " cluster_factors=cluster_factors, \n",
+ " n_zones=1000,\n",
+ " use_xy=1e-6,\n",
+ " explicit_agg=explicit_agg,\n",
+ " explicit_col='taz',\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f5d6cd56",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "kmeans = kmeans.reset_index(drop=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7e7b56ea",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "viewer2(edges=kmeans, colors=kmeans, color_col='empden')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "aa31c88d",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "agglom3full = aggregate_zones(\n",
+ " tazs, \n",
+ " cluster_factors=cluster_factors, \n",
+ " n_zones=2000,\n",
+ " method='agglom_adj', \n",
+ " use_xy=1e-4,\n",
+ " explicit_agg=explicit_agg,\n",
+ " explicit_col='taz',\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7e8cec6a",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "taz_crosswalk = make_crosswalk(agglom3full, tazs, old_index='taz').sort_values('taz')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "736eac0a",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "mgra_crosswalk = make_crosswalk(agglom3full, mgra, old_index='MGRA').sort_values('MGRA')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "71956608",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "agglom3full = mark_centroids(agglom3full)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a11469a8",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "mgra_crosswalk.to_csv(\"mgra_crosswalk.csv\", index=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "00c3e83e",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "taz_crosswalk.to_csv(\"taz_crosswalk.csv\", index=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ecdfc521",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "agglom3full.to_csv(\"cluster_zones.csv\", index=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "18d6902a",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "viewer2(edges=agglom3full, colors=agglom3full, color_col='empden')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "391a94af",
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
+ "outputs": [],
+ "source": [
+ "viewer2(edges=agglom3full, colors=agglom3full, color_col='popden')"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.6"
+ },
+ "toc": {
+ "base_numbering": 1,
+ "nav_menu": {},
+ "number_sections": false,
+ "sideBar": true,
+ "skip_h1_title": false,
+ "title_cell": "Table of Contents",
+ "title_sidebar": "Contents",
+ "toc_cell": false,
+ "toc_position": {},
+ "toc_section_display": true,
+ "toc_window_display": false
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/data-dl/.gitignore b/notebooks/data-dl/.gitignore
new file mode 100644
index 0000000..928939a
--- /dev/null
+++ b/notebooks/data-dl/.gitignore
@@ -0,0 +1,5 @@
+*.csv
+*.csv.gz
+*.gpkg
+*.omx
+*.pq
diff --git a/sandag_abm/src/main/emme/init_emme_project.py b/sandag_abm/src/main/emme/init_emme_project.py
new file mode 100644
index 0000000..2809405
--- /dev/null
+++ b/sandag_abm/src/main/emme/init_emme_project.py
@@ -0,0 +1,97 @@
+#///////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2019. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// init_emme_project.py ///
+#//// ///
+#//// Usage: init_emme_project.py [-r root] [-t title] ///
+#//// ///
+#//// [-r root]: Specifies the root directory in which to create ///
+#//// the Emme project. ///
+#//// If omitted, defaults to the current working directory ///
+#//// [-t title]: The title of the Emme project and Emme database. ///
+#//// If omitted, defaults to SANDAG empty database. ///
+#//// [-v emmeversion]: Emme version to use to create the project. ///
+#//// If omitted, defaults to 4.3.7. ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#///////////////////////////////////////////////////////////////////////////////
+
+import inro.emme.desktop.app as _app
+import inro.emme.desktop.types as _ws_types
+import inro.emme.database.emmebank as _eb
+import argparse
+import os
+
+WKT_PROJECTION = 'PROJCS["NAD_1983_NSRS2007_StatePlane_California_VI_FIPS_0406_Ft_US",GEOGCS["GCS_NAD_1983_NSRS2007",DATUM["D_NAD_1983_NSRS2007",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic"],PARAMETER["False_Easting",6561666.666666666],PARAMETER["False_Northing",1640416.666666667],PARAMETER["Central_Meridian",-116.25],PARAMETER["Standard_Parallel_1",32.78333333333333],PARAMETER["Standard_Parallel_2",33.88333333333333],PARAMETER["Latitude_Of_Origin",32.16666666666666],UNIT["Foot_US",0.3048006096012192]];-118608900 -91259500 3048.00609601219;-100000 10000;-100000 10000;3.28083333333333E-03;0.001;0.001;IsHighPrecision'
+
+def init_emme_project(root, title, emmeversion):
+ project_path = _app.create_project(root, "emme_project")
+ desktop = _app.start_dedicated(
+ project=project_path, user_initials="WS", visible=False)
+ project = desktop.project
+ project.name = "SANDAG Emme project"
+ prj_file_path = os.path.join(os.path.dirname(project_path), 'NAD 1983 NSRS2007 StatePlane California VI FIPS 0406 (US Feet).prj')
+ with open(prj_file_path, 'w') as f:
+ f.write(WKT_PROJECTION)
+ project.spatial_reference_file = prj_file_path
+ project.initial_view = _ws_types.Box(6.18187e+06, 1.75917e+06, 6.42519e+06, 1.89371e+06)
+ project_root = os.path.dirname(project_path)
+ dimensions = {
+ 'scalar_matrices': 9999,
+ 'destination_matrices': 999,
+ 'origin_matrices': 999,
+ 'full_matrices': 1600,
+
+ 'scenarios': 10,
+ 'centroids': 5000,
+ 'regular_nodes': 29999,
+ 'links': 90000,
+ 'turn_entries': 13000,
+ 'transit_vehicles': 200,
+ 'transit_lines': 450,
+ 'transit_segments': 40000,
+ 'extra_attribute_values': 28000000,
+
+ 'functions': 99,
+ 'operators': 5000
+ }
+
+ # for Emme version > 4.3.7, add the sola_analyses dimension
+ if emmeversion != '4.3.7':
+ dimensions['sola_analyses'] = 240
+
+ os.mkdir(os.path.join(project_root, "Database"))
+ emmebank = _eb.create(os.path.join(project_root, "Database", "emmebank"), dimensions)
+ emmebank.title = title
+ emmebank.coord_unit_length = 0.000189394 # feet to miles
+ emmebank.unit_of_length = "mi"
+ emmebank.unit_of_cost = "$"
+ emmebank.unit_of_energy = "MJ"
+ emmebank.node_number_digits = 6
+ emmebank.use_engineering_notation = True
+ scenario = emmebank.create_scenario(100)
+ scenario.title = "Empty scenario"
+ emmebank.dispose()
+
+ desktop.data_explorer().add_database(emmebank.path)
+ desktop.add_modeller_toolbox("%<$ProjectPath>%/scripts/sandag_toolbox.mtbx")
+ desktop.add_modeller_toolbox("%<$ProjectPath>%/scripts/solutions.mtbx")
+ project.save()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Create a new empty Emme project and database with Sandag defaults.")
+ parser.add_argument('-r', '--root', help="path to the root ABM folder, default is the working folder",
+ default=os.path.abspath(os.getcwd()))
+ parser.add_argument('-t', '--title', help="the Emmebank title",
+ default="SANDAG empty database")
+ parser.add_argument('-v', '--emmeversion', help='the Emme version', default='4.3.7')
+ args = parser.parse_args()
+
+ init_emme_project(args.root, args.title, args.emmeversion)
diff --git a/sandag_abm/src/main/emme/python_virtualenv.pth b/sandag_abm/src/main/emme/python_virtualenv.pth
new file mode 100644
index 0000000..c169252
--- /dev/null
+++ b/sandag_abm/src/main/emme/python_virtualenv.pth
@@ -0,0 +1,3 @@
+# Inserts defined python_virtualenv site-packages into the python module search path if defined
+#
+import sys, os; r=os.environ.get("PYTHON_VIRTUALENV"); t = 1 if r is None else sys.path.insert(0, os.path.join(r, "Lib\site-packages"));
\ No newline at end of file
diff --git a/sandag_abm/src/main/emme/solutions.mtbx b/sandag_abm/src/main/emme/solutions.mtbx
new file mode 100644
index 0000000..10d5345
Binary files /dev/null and b/sandag_abm/src/main/emme/solutions.mtbx differ
diff --git a/sandag_abm/src/main/emme/solutions_unconsolidated.mtbx b/sandag_abm/src/main/emme/solutions_unconsolidated.mtbx
new file mode 100644
index 0000000..0674c2b
Binary files /dev/null and b/sandag_abm/src/main/emme/solutions_unconsolidated.mtbx differ
diff --git a/sandag_abm/src/main/emme/toolbox/assignment/build_transit_scenario.py b/sandag_abm/src/main/emme/toolbox/assignment/build_transit_scenario.py
new file mode 100644
index 0000000..ec8d4a9
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/assignment/build_transit_scenario.py
@@ -0,0 +1,679 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// build_transit_scenario.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# The build transit scenario tool generates a new scenario in the Transit
+# database (under the Database_transit directory) as a copy of a scenario in
+# the base (traffic assignment) database. The base traffic scenario should have
+# valid results from a traffic assignment for the travel times on links to be
+# available for transit lines in mixed traffic operation.
+#
+#
+# Inputs:
+# period: the corresponding period for the scenario
+# base_scenario_id: the base traffic assignment scenario in the main Emme database
+# scenario_id: the ID to use for the new scenario in the Transit Emme database
+# scenario_title: the title for the new scenario
+# data_table_name: the root name for the source data table for the timed transfer
+# line pairs and the day and regional pass costs.
+# Usually the ScenarioYear
+# overwrite: overwrite the scenario if it already exists.
+#
+#
+# Script example:
+"""
+import inro.modeller as _m
+import os
+modeller = _m.Modeller()
+desktop = modeller.desktop
+
+build_transit_scen = modeller.tool("sandag.assignment.build_transit_scenario")
+transit_assign = modeller.tool("sandag.assignment.transit_assignment")
+load_properties = modeller.tool('sandag.utilities.properties')
+
+project_dir = os.path.dirname(desktop.project_path())
+main_directory = os.path.dirname(project_dir)
+props = load_properties(os.path.join(main_directory, "conf", "sandag_abm.properties"))
+main_emmebank = os.path.join(project_dir, "Database", "emmebank")
+scenario_id = 100
+base_scenario = main_emmebank.scenario(scenario_id)
+
+transit_emmebank = os.path.join(project_dir, "Database_transit", "emmebank")
+
+periods = ["EA", "AM", "MD", "PM", "EV"]
+period_ids = list(enumerate(periods, start=int(scenario_id) + 1))
+num_processors = "MAX-1"
+scenarioYear = str(props["scenarioYear"])
+
+for number, period in period_ids:
+ src_period_scenario = main_emmebank.scenario(number)
+ transit_assign_scen = build_transit_scen(
+ period=period, base_scenario=src_period_scenario,
+ transit_emmebank=transit_emmebank,
+ scenario_id=src_period_scenario.id,
+ scenario_title="%s %s transit assign" % (base_scenario.title, period),
+ data_table_name=scenarioYear, overwrite=True)
+ transit_assign(period, transit_assign_scen, data_table_name=scenarioYear,
+ skims_only=True, num_processors=num_processors)
+"""
+
+
+
+TOOLBOX_ORDER = 21
+
+
+import inro.modeller as _m
+import inro.emme.core.exception as _except
+import inro.emme.database.emmebank as _eb
+import traceback as _traceback
+from copy import deepcopy as _copy
+from collections import defaultdict as _defaultdict
+import contextlib as _context
+
+import os
+import sys
+import math
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+
+
+class BuildTransitNetwork(_m.Tool(), gen_utils.Snapshot):
+
+ period = _m.Attribute(unicode)
+ scenario_id = _m.Attribute(int)
+ base_scenario_id = _m.Attribute(str)
+
+ data_table_name = _m.Attribute(unicode)
+ scenario_title = _m.Attribute(unicode)
+ overwrite = _m.Attribute(bool)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=unicode)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ self.data_table_name = None
+ self.base_scenario = _m.Modeller().scenario
+ self.scenario_id = 100
+ self.scenario_title = ""
+ self.overwrite = False
+ self.attributes = [
+ "period", "scenario_id", "base_scenario_id",
+ "data_table_name", "scenario_title", "overwrite"]
+
+ def page(self):
+ if not self.data_table_name:
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ main_directory = os.path.dirname(project_dir)
+ props = load_properties(os.path.join(main_directory, "conf", "sandag_abm.properties"))
+ self.data_table_name = props["scenarioYear"]
+
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Build transit network"
+ pb.description = """
+ Builds the transit network for the specified period based
+ on existing base (traffic + transit) scenario."""
+ pb.branding_text = "- SANDAG - "
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ options = [("EA", "Early AM"),
+ ("AM", "AM peak"),
+ ("MD", "Mid-day"),
+ ("PM", "PM peak"),
+ ("EV", "Evening")]
+ pb.add_select("period", options, title="Period:")
+
+ root_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ main_emmebank = _eb.Emmebank(os.path.join(root_dir, "Database", "emmebank"))
+ options = [(scen.id, "%s - %s" % (scen.id, scen.title)) for scen in main_emmebank.scenarios()]
+ pb.add_select("base_scenario_id", options,
+ title="Base scenario (with traffic and transit data):",
+ note="With period traffic results from main (traffic assignment) database at: %s" % main_emmebank.path)
+
+ pb.add_text_box("scenario_id", title="ID for transit assignment scenario:")
+ pb.add_text_box("scenario_title", title="Scenario title:", size=80)
+ pb.add_text_box("data_table_name", title="Data table prefix name:", note="Default is the ScenarioYear")
+ pb.add_checkbox("overwrite", title=" ", label="Overwrite existing scenario")
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ root_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ main_emmebank = _eb.Emmebank(os.path.join(root_dir, "Database", "emmebank"))
+ base_scenario = main_emmebank.scenario(self.base_scenario_id)
+ transit_emmebank = _eb.Emmebank(os.path.join(root_dir, "Database_transit", "emmebank"))
+ results = self(
+ self.period, base_scenario, transit_emmebank,
+ self.scenario_id, self.scenario_title,
+ self.data_table_name, self.overwrite)
+ run_msg = "Transit scenario created"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ def __call__(self, period, base_scenario, transit_emmebank, scenario_id, scenario_title,
+ data_table_name, overwrite=False):
+ modeller = _m.Modeller()
+ attrs = {
+ "period": period,
+ "base_scenario_id": base_scenario.id,
+ "transit_emmebank": transit_emmebank.path,
+ "scenario_id": scenario_id,
+ "scenario_title": scenario_title,
+ "data_table_name": data_table_name,
+ "overwrite": overwrite,
+ "self": str(self)
+ }
+ with _m.logbook_trace("Build transit network for period %s" % period, attributes=attrs):
+ gen_utils.log_snapshot("Build transit network", str(self), attrs)
+ copy_scenario = modeller.tool(
+ "inro.emme.data.scenario.copy_scenario")
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ if not period in periods:
+ raise Exception(
+ 'period: unknown value - specify one of %s' % periods)
+
+ transit_assignment = modeller.tool(
+ "sandag.assignment.transit_assignment")
+ if transit_emmebank.scenario(scenario_id):
+ if overwrite:
+ transit_emmebank.delete_scenario(scenario_id)
+ else:
+ raise Exception("scenario_id: scenario %s already exists" % scenario_id)
+
+ scenario = transit_emmebank.create_scenario(scenario_id)
+ scenario.title = scenario_title[:80]
+ scenario.has_traffic_results = base_scenario.has_traffic_results
+ scenario.has_transit_results = base_scenario.has_transit_results
+ for attr in sorted(base_scenario.extra_attributes(), key=lambda x: x._id):
+ dst_attr = scenario.create_extra_attribute(attr.type, attr.name, attr.default_value)
+ dst_attr.description = attr.description
+ for field in base_scenario.network_fields():
+ scenario.create_network_field(field.type, field.name, field.atype, field.description)
+ network = base_scenario.get_network()
+ new_attrs = [
+ ("TRANSIT_LINE", "@xfer_from_day", "Fare for xfer from daypass/trolley"),
+ ("TRANSIT_LINE", "@xfer_from_premium", "Fare for first xfer from premium"),
+ ("TRANSIT_LINE", "@xfer_from_coaster", "Fare for first xfer from coaster"),
+ ("TRANSIT_LINE", "@xfer_regional_pass", "0-fare for regional pass"),
+ ("TRANSIT_SEGMENT", "@xfer_from_bus", "Fare for first xfer from bus"),
+ ("TRANSIT_SEGMENT", "@headway_seg", "Headway adj for special xfers"),
+ ("TRANSIT_SEGMENT", "@transfer_penalty_s", "Xfer pen adj for special xfers"),
+ ("TRANSIT_SEGMENT", "@layover_board", "Boarding cost adj for special xfers"),
+ ("NODE", "@network_adj", "Model: 1=TAP adj, 2=circle, 3=timedxfer"),
+ ("NODE", "@network_adj_src", "Orig src node for timedxfer splits"),
+ ]
+ for elem, name, desc in new_attrs:
+ attr = scenario.create_extra_attribute(elem, name)
+ attr.description = desc
+ network.create_attribute(elem, name)
+ network.create_attribute("TRANSIT_LINE", "xfer_from_bus")
+ self._init_node_id(network)
+
+ transit_passes = gen_utils.DataTableProc("%s_transit_passes" % data_table_name)
+ transit_passes = {row["pass_type"]: row["cost"] for row in transit_passes}
+ day_pass = float(transit_passes["day_pass"]) / 2.0
+ regional_pass = float(transit_passes["regional_pass"]) / 2.0
+ params = transit_assignment.get_perception_parameters(period)
+ mode_groups = transit_assignment.group_modes_by_fare(network, day_pass)
+
+ bus_fares = {}
+ for mode_id, fares in mode_groups["bus"]:
+ for fare, count in fares.items():
+ bus_fares[fare] = bus_fares.get(fare, 0) + count
+ # set nominal bus fare as unweighted average of two most frequent fares
+ bus_fares = sorted(bus_fares.items(), key=lambda x: x[1], reverse=True)
+
+ if len(bus_fares) >= 2:
+ bus_fare = (bus_fares[0][0] + bus_fares[1][0]) / 2
+ elif len(bus_fares) == 1: # unless there is only one fare value, in which case use that one
+ bus_fare = bus_fares[0][0]
+ else:
+ bus_fare = 0
+ # find max premium mode fare
+ premium_fare = 0
+ for mode_id, fares in mode_groups["premium"]:
+ for fare in fares.keys():
+ premium_fare = max(premium_fare, fare)
+ # find max coaster_fare by checking the cumulative fare along each line
+ coaster_fare = 0
+ for line in network.transit_lines():
+ if line.mode.id != "c":
+ continue
+ segments = line.segments()
+ first = segments.next()
+ fare = first["@coaster_fare_board"]
+ for seg in segments:
+ fare += seg["@coaster_fare_inveh"]
+ coaster_fare = max(coaster_fare, fare)
+
+ bus_fare_modes = [x[0] for x in mode_groups["bus"]] # have a bus fare, less than the day pass
+ day_pass_modes = [x[0] for x in mode_groups["day_pass"]] # boarding fare is the same as the day pass
+ premium_fare_modes = ["c"] + [x[0] for x in mode_groups["premium"]] # special premium services not covered by day pass
+
+ for line in list(network.transit_lines()):
+ # remove the "unavailable" lines in this period
+ if line[params["xfer_headway"]] == 0:
+ network.delete_transit_line(line)
+ continue
+ # Adjust fare perception by VOT
+ line[params["fare"]] = line[params["fare"]] / params["vot"]
+ # set the fare increments for transfer combinations with day pass / regional pass
+ if line.mode.id in bus_fare_modes:
+ line["xfer_from_bus"] = max(min(day_pass - line["@fare"], line["@fare"]), 0)
+ line["@xfer_from_day"] = 0.0
+ line["@xfer_from_premium"] = max(min(regional_pass - premium_fare, line["@fare"]), 0)
+ line["@xfer_from_coaster"] = max(min(regional_pass - coaster_fare, line["@fare"]), 0)
+ elif line.mode.id in day_pass_modes:
+ line["xfer_from_bus"] = max(day_pass - bus_fare, 0.0)
+ line["@xfer_from_day"] = 0.0
+ line["@xfer_from_premium"] = max(min(regional_pass - premium_fare, line["@fare"]), 0)
+ line["@xfer_from_coaster"] = max(min(regional_pass - coaster_fare, line["@fare"]), 0)
+ elif line.mode.id in premium_fare_modes:
+ if line["@fare"] > day_pass or line.mode.id == "c":
+ # increment from bus to regional
+ line["xfer_from_bus"] = max(regional_pass - bus_fare, 0)
+ line["@xfer_from_day"] = max(regional_pass - day_pass, 0)
+ else:
+ # some "premium" modes lines are really regular fare
+ # increment from bus to day pass
+ line["xfer_from_bus"] = max(day_pass - bus_fare, 0)
+ line["@xfer_from_day"] = 0.0
+ line["@xfer_from_premium"] = max(regional_pass - premium_fare, 0)
+ line["@xfer_from_coaster"] = max(min(regional_pass - coaster_fare, line["@fare"]), 0)
+
+ for segment in network.transit_segments():
+ line = segment.line
+ segment["@headway_seg"] = line[params["xfer_headway"]]
+ segment["@transfer_penalty_s"] = line["@transfer_penalty"]
+ segment["@xfer_from_bus"] = line["xfer_from_bus"]
+ network.delete_attribute("TRANSIT_LINE", "xfer_from_bus")
+
+ self.taps_to_centroids(network)
+ # changed to allow timed xfers for different periods
+ timed_transfers_with_walk = list(gen_utils.DataTableProc("%s_timed_xfer_%s" % (data_table_name,period)))
+ self.timed_transfers(network, timed_transfers_with_walk, period)
+ #self.connect_circle_lines(network)
+ self.duplicate_tap_adajcent_stops(network)
+ # The fixed guideway travel times are stored in "@trtime_link_xx"
+ # and copied to data2 (ul2) for the ttf
+ # The congested auto times for mixed traffic are in "@auto_time"
+ # (output from traffic assignment) which needs to be copied to auto_time (a.k.a. timau)
+ # (The auto_time attribute is generated from the VDF values which include reliability factor)
+ src_attrs = [params["fixed_link_time"]]
+ dst_attrs = ["data2"]
+ if scenario.has_traffic_results and "@auto_time" in scenario.attributes("LINK"):
+ src_attrs.append("@auto_time")
+ dst_attrs.append("auto_time")
+ values = network.get_attribute_values("LINK", src_attrs)
+ network.set_attribute_values("LINK", dst_attrs, values)
+ scenario.publish_network(network)
+
+ return scenario
+
+ @_m.logbook_trace("Convert TAP nodes to centroids")
+ def taps_to_centroids(self, network):
+ # delete existing traffic centroids
+ for centroid in list(network.centroids()):
+ network.delete_node(centroid, cascade=True)
+
+ node_attrs = network.attributes("NODE")
+ link_attrs = network.attributes("LINK")
+ for node in list(network.nodes()):
+ if node["@tap_id"] > 0:
+ centroid = network.create_node(node["@tap_id"], is_centroid=True)
+ for attr in node_attrs:
+ centroid[attr] = node[attr]
+ for link in node.outgoing_links():
+ connector = network.create_link(centroid, link.j_node, link.modes)
+ connector.vertices = link.vertices
+ for attr in link_attrs:
+ connector[attr] = link[attr]
+ for link in node.incoming_links():
+ connector = network.create_link(link.i_node, centroid, link.modes)
+ connector.vertices = link.vertices
+ for attr in link_attrs:
+ connector[attr] = link[attr]
+ network.delete_node(node, cascade=True)
+
+ @_m.logbook_trace("Duplicate TAP access and transfer access stops")
+ def duplicate_tap_adajcent_stops(self, network):
+ # Expand network by duplicating TAP adjacent stops
+ network.create_attribute("NODE", "tap_stop", False)
+ all_transit_modes = set([mode for mode in network.modes() if mode.type == "TRANSIT"])
+ access_mode = set([network.mode("a")])
+ transfer_mode = network.mode("x")
+ walk_mode = network.mode("w")
+
+ # Mark TAP adjacent stops and split TAP connectors
+ for centroid in network.centroids():
+ out_links = list(centroid.outgoing_links())
+ in_links = list(centroid.incoming_links())
+ for link in out_links + in_links:
+ link.length = 0.0005 # setting length so that connector access time = 0.01
+ for link in out_links:
+ real_stop = link.j_node
+ has_adjacent_transfer_links = False
+ has_adjacent_walk_links = False
+ for stop_link in real_stop.outgoing_links():
+ if stop_link == link.reverse_link:
+ continue
+ if transfer_mode in stop_link.modes :
+ has_adjacent_transfer_links = True
+ if walk_mode in stop_link.modes :
+ has_adjacent_walk_links = True
+
+ if has_adjacent_transfer_links or has_adjacent_walk_links:
+ length = link.length
+ tap_stop = network.split_link(centroid, real_stop, self._get_node_id(), include_reverse=True)
+ tap_stop["@network_adj"] = 1
+ real_stop.tap_stop = tap_stop
+ transit_access_link = network.link(real_stop, tap_stop)
+ for link in transit_access_link, transit_access_link.reverse_link:
+ link.modes = all_transit_modes
+ link.length = 0
+ for p in ["ea", "am", "md", "pm", "ev"]:
+ link["@time_link_" + p] = 0
+ access_link = network.link(tap_stop, centroid)
+ access_link.modes = access_mode
+ access_link.reverse_link.modes = access_mode
+ access_link.length = length
+ access_link.reverse_link.length = length
+
+ line_attributes = network.attributes("TRANSIT_LINE")
+ seg_attributes = network.attributes("TRANSIT_SEGMENT")
+
+ # re-route the transit lines through the new TAP-stops
+ for line in network.transit_lines():
+ # store line and segment data for re-routing
+ line_data = dict((k, line[k]) for k in line_attributes)
+ line_data["id"] = line.id
+ line_data["vehicle"] = line.vehicle
+
+ seg_data = {}
+ itinerary = []
+ tap_adjacent_stops = []
+
+ for seg in line.segments(include_hidden=True):
+ seg_data[(seg.i_node, seg.j_node, seg.loop_index)] = \
+ dict((k, seg[k]) for k in seg_attributes)
+ itinerary.append(seg.i_node.number)
+ if seg.i_node.tap_stop and seg.allow_boardings:
+ # insert tap_stop, real_stop loop after tap_stop
+ real_stop = seg.i_node
+ tap_stop = real_stop.tap_stop
+ itinerary.extend([tap_stop.number, real_stop.number])
+ tap_adjacent_stops.append(len(itinerary) - 1) # index of "real" stop in itinerary
+
+ if tap_adjacent_stops:
+ network.delete_transit_line(line)
+ new_line = network.create_transit_line(
+ line_data.pop("id"),
+ line_data.pop("vehicle"),
+ itinerary)
+ for k, v in line_data.iteritems():
+ new_line[k] = v
+
+ for seg in new_line.segments(include_hidden=True):
+ data = seg_data.get((seg.i_node, seg.j_node, seg.loop_index), {})
+ for k, v in data.iteritems():
+ seg[k] = v
+ for index in tap_adjacent_stops:
+ access_seg = new_line.segment(index - 2)
+ egress_seg = new_line.segment(index - 1)
+ real_seg = new_line.segment(index)
+ for k in seg_attributes:
+ access_seg[k] = egress_seg[k] = real_seg[k]
+ access_seg.allow_boardings = False
+ access_seg.allow_alightings = True
+ access_seg.transit_time_func = 3
+ access_seg.dwell_time = real_seg.dwell_time
+ egress_seg.allow_boardings = True
+ egress_seg.allow_alightings = True
+ egress_seg.transit_time_func = 3
+ egress_seg.dwell_time = 0
+ real_seg.allow_boardings = True
+ real_seg.allow_alightings = False
+ real_seg.dwell_time = 0
+
+ network.delete_attribute("NODE", "tap_stop")
+
+ @_m.logbook_trace("Add timed-transfer links", save_arguments=True)
+ def timed_transfers(self, network, timed_transfers_with_walk, period):
+ no_walk_link_error = "no walk link from line %s to %s"
+ node_not_found_error = "node %s not found in itinerary for line %s; "\
+ "the to_line may end at the transfer stop"
+
+ def find_walk_link(from_line, to_line):
+ to_nodes = set([s.i_node for s in to_line.segments(True)
+ if s.allow_boardings])
+ link_candidates = []
+ for seg in from_line.segments(True):
+ if not s.allow_alightings:
+ continue
+ for link in seg.i_node.outgoing_links():
+ if link.j_node in to_nodes:
+ link_candidates.append(link)
+ if not link_candidates:
+ raise Exception(no_walk_link_error % (from_line, to_line))
+ # if there are multiple connecting links take the shortest one
+ return sorted(link_candidates, key=lambda x: x.length)[0]
+
+ def link_on_line(line, node, near_side_stop):
+ node = network.node(node)
+ if near_side_stop:
+ for seg in line.segments():
+ if seg.j_node == node:
+ return seg.link
+ else:
+ for seg in line.segments():
+ if seg.i_node == node:
+ return seg.link
+ raise Exception(node_not_found_error % (node, line))
+
+ # Group parallel transfers together (same pair of alighting-boarding nodes from the same line)
+ walk_transfers = _defaultdict(lambda: [])
+ for i, transfer in enumerate(timed_transfers_with_walk, start=1):
+ try:
+ from_line = network.transit_line(transfer["from_line"])
+ if not from_line:
+ raise Exception("from_line %s does not exist" % transfer["from_line"])
+ to_line = network.transit_line(transfer["to_line"])
+ if not to_line:
+ raise Exception("to_line %s does not exist" % transfer["to_line"])
+ walk_link = find_walk_link(from_line, to_line)
+ from_link = link_on_line(from_line, walk_link.i_node, near_side_stop=True)
+ to_link = link_on_line(to_line, walk_link.j_node, near_side_stop=False)
+ walk_transfers[(from_link, to_link)].append({
+ "to_line": to_line,
+ "from_line": from_line,
+ "walk_link": walk_link,
+ "wait": transfer["wait_time"],
+ })
+ except Exception as error:
+ new_message = "Timed transfer[%s]: %s" % (i, error.message)
+ raise type(error), type(error)(new_message), sys.exc_info()[2]
+
+ # If there is only one transfer at the location (redundant case)
+ # OR all transfers are from the same line (can have different waits)
+ # OR all transfers are to the same line and have the same wait
+ # Merge all transfers onto the same transfer node
+ network_transfers = []
+ for (from_link, to_link), transfers in walk_transfers.iteritems():
+ walk_links = set([t["walk_link"] for t in transfers])
+ from_lines = set([t["from_line"] for t in transfers])
+ to_lines = set([t["to_line"] for t in transfers])
+ waits = set(t["wait"] for t in transfers)
+ if len(transfers) == 1 or len(from_lines) == 1 or (len(to_lines) == 1 and len(waits) == 1):
+ network_transfers.append({
+ "from_link": from_link,
+ "to_link": to_link,
+ "to_lines": list(to_lines),
+ "from_lines": list(from_lines),
+ "walk_link": walk_links.pop(),
+ "wait": dict((t["to_line"], t["wait"]) for t in transfers)})
+ else:
+ for transfer in transfers:
+ network_transfers.append({
+ "from_link": from_link,
+ "to_link": to_link,
+ "to_lines": [transfer["to_line"]],
+ "from_lines": [transfer["from_line"]],
+ "walk_link": transfer["walk_link"],
+ "wait": {transfer["to_line"]: transfer["wait"]}})
+
+ def split_link(link, node_id, lines, split_links, stop_attr, waits=None):
+ near_side_stop = (stop_attr == "allow_alightings")
+ orig_link = link
+ if link in split_links:
+ link = split_links[link]
+ i_node, j_node = link.i_node, link.j_node
+ length = link.length
+ proportion = min(0.006 / length, 0.2)
+ if near_side_stop:
+ proportion = 1 - proportion
+ new_node = network.split_link(i_node, j_node, node_id, False, proportion)
+ new_node["@network_adj"] = 3
+ new_node["@network_adj_src"] = orig_link.j_node.number if near_side_stop else orig_link.i_node.number
+ in_link = network.link(i_node, new_node)
+ out_link = network.link(new_node, j_node)
+ split_links[orig_link] = in_link if near_side_stop else out_link
+ if near_side_stop:
+ in_link.length = length
+ out_link.length = 0
+ for p in ["ea", "am", "md", "pm", "ev"]:
+ out_link["@trtime_link_" + p] = 0
+ else:
+ out_link.length = length
+ in_link.length = 0
+ for p in ["ea", "am", "md", "pm", "ev"]:
+ in_link["@trtime_link_" + p] = 0
+
+ for seg in in_link.segments():
+ if not near_side_stop:
+ seg.transit_time_func = 3
+ seg["@coaster_fare_inveh"] = 0
+ for seg in out_link.segments():
+ if near_side_stop:
+ seg.transit_time_func = 3
+ seg.allow_alightings = seg.allow_boardings = False
+ seg.dwell_time = 0
+ if seg.line in lines:
+ seg[stop_attr] = True
+ if stop_attr == "allow_boardings":
+ seg["@headway_seg"] = float(waits[seg.line]) * 2
+ return new_node
+
+ # process the transfer points, split links and set attributes
+ split_links = {}
+ for transfer in network_transfers:
+ new_alight_node = split_link(
+ transfer["from_link"], self._get_node_id(), transfer["from_lines"],
+ split_links, "allow_alightings")
+ new_board_node = split_link(
+ transfer["to_link"], self._get_node_id(), transfer["to_lines"],
+ split_links, "allow_boardings", waits=transfer["wait"])
+ walk_link = transfer["walk_link"]
+ transfer_link = network.create_link(
+ new_alight_node, new_board_node, [network.mode("x")])
+ for attr in network.attributes("LINK"):
+ transfer_link[attr] = walk_link[attr]
+
+ @_m.logbook_trace("Add circle line free layover transfers")
+ def connect_circle_lines(self, network):
+ network.create_attribute("NODE", "circle_lines")
+ line_attributes = network.attributes("TRANSIT_LINE")
+ seg_attributes = network.attributes("TRANSIT_SEGMENT")
+
+ def offset_coords(node):
+ rho = math.sqrt(5000)
+ phi = 3 * math.pi / 4 + node.circle_lines * math.pi / 12
+ x = node.x + rho * math.cos(phi)
+ y = node.y + rho * math.sin(phi)
+ node.circle_lines += 1
+ return(x, y)
+
+ transit_lines = list(network.transit_lines())
+ for line in transit_lines:
+ first_seg = line.segment(0)
+ last_seg = line.segment(-1)
+ if first_seg.i_node == last_seg.i_node:
+ # Add new node, offset from existing node
+ start_node = line.segment(0).i_node
+ xfer_node = network.create_node(self._get_node_id(), False)
+ xfer_node["@network_adj"] = 2
+ xfer_node.x, xfer_node.y = offset_coords(start_node)
+ network.create_link(start_node, xfer_node, [line.vehicle.mode])
+ network.create_link(xfer_node, start_node, [line.vehicle.mode])
+
+ # copy transit line data, re-route itinerary to and from new node
+ line_data = dict((k, line[k]) for k in line_attributes)
+ line_data["id"] = line.id
+ line_data["vehicle"] = line.vehicle
+ first_seg.allow_boardings = True
+ first_seg.allow_alightings = False
+ first_seg_data = dict((k, first_seg[k]) for k in seg_attributes)
+ first_seg_data.update({
+ "@headway_seg": 0.01, "dwell_time": 0, "transit_time_func": 3,
+ "@transfer_penalty_s": 0, "@xfer_from_bus": 0, "@layover_board": 1
+ })
+ last_seg.allow_boardings = False
+ last_seg.allow_alightings = True
+ last_seg_data = dict((k, last_seg[k]) for k in seg_attributes)
+ last_seg_data.update({
+ "@headway_seg": 0.01, "dwell_time": 5.0, "transit_time_func": 3
+ # incremental dwell time for layover of 5 min
+ # Note: some lines seem to have a layover of 0, most of 5 mins
+ })
+ seg_data = {
+ (xfer_node, start_node, 1): first_seg_data,
+ (xfer_node, None, 1): last_seg_data}
+ itinerary = [xfer_node.number]
+ for seg in line.segments():
+ seg_data[(seg.i_node, seg.j_node, seg.loop_index)] = dict((k, seg[k]) for k in seg_attributes)
+ itinerary.append(seg.i_node.number)
+ last_seg = line.segment(-1)
+ seg_data[(last_seg.i_node, xfer_node, 1)] = dict((k, last_seg[k]) for k in seg_attributes)
+ seg_data[(last_seg.i_node, xfer_node, 1)]["transit_time_func"] = 3
+ itinerary.extend([last_seg.i_node.number, xfer_node.number])
+
+ network.delete_transit_line(line)
+ new_line = network.create_transit_line(
+ line_data.pop("id"), line_data.pop("vehicle"), itinerary)
+ for k, v in line_data.iteritems():
+ new_line[k] = v
+ for seg in new_line.segments(include_hidden=True):
+ data = seg_data.get((seg.i_node, seg.j_node, seg.loop_index), {})
+ for k, v in data.iteritems():
+ seg[k] = v
+
+ network.delete_attribute("NODE", "circle_lines")
+
+ def _init_node_id(self, network):
+ new_node_id = max(n.number for n in network.nodes())
+ self._new_node_id = math.ceil(new_node_id / 10000.0) * 10000
+
+ def _get_node_id(self):
+ self._new_node_id += 1
+ return self._new_node_id
diff --git a/sandag_abm/src/main/emme/toolbox/assignment/traffic_assignment.py b/sandag_abm/src/main/emme/toolbox/assignment/traffic_assignment.py
new file mode 100644
index 0000000..cdf652e
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/assignment/traffic_assignment.py
@@ -0,0 +1,1087 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// traffic_assignment.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# The Traffic assignment tool runs the traffic assignment and skims per
+# period on the current primary scenario.
+#
+# The traffic assignment is a 15-class assignment with generalized cost on
+# links and BPR-type volume-delay functions which include capacities on links
+# and at intersection approaches. The assignment is run using the
+# fast-converging Second-Order Linear Approximation (SOLA) method in Emme to
+# a relative gap of 5x10-4. The per-link fixed costs include toll values and
+# operating costs which vary by class of demand.
+# Assignment matrices and resulting network flows are always in PCE.
+#
+# Inputs:
+# period: the time-of-day period, one of EA, AM, MD, PM, EV.
+# msa_iteration: global iteration number. If greater than 1, existing flow
+# values must be present and the resulting flows on links and turns will
+# be the weighted average of this assignment and the existing values.
+# relative_gap: minimum relative stopping criteria.
+# max_iterations: maximum iterations stopping criteria.
+# num_processors: number of processors to use for the traffic assignments.
+# select_link: specify one or more select link analysis setups as a list of
+# specifications with three keys:
+# "expression": selection expression to identify the link(s) of interest.
+# "suffix": the suffix to use in the naming of per-class result
+# attributes and matrices, up to 6 characters.
+# "threshold": the minimum number of links which must be encountered
+# for the path selection.
+# Example:
+# select_link = [
+# {"expression": "@tov_id=4578 or @tcov_id=9203", "suffix": "fwy", "threshold": "1"}
+# ]
+# raise_zero_dist: if checked, the distance skim for the SOVGP is checked for
+# any zero values, which would indicate a disconnected zone, in which case
+# an error is raised and the model run is halted.
+#
+# Matrices:
+# All traffic demand and skim matrices.
+# See list of classes under __call__ method, or matrix list under report method.
+#
+# Script example:
+"""
+import inro.modeller as _m
+import os
+import inro.emme.database.emmebank as _eb
+
+modeller = _m.Modeller()
+desktop = modeller.desktop
+traffic_assign = modeller.tool("sandag.assignment.traffic_assignment")
+export_traffic_skims = modeller.tool("sandag.export.export_traffic_skims")
+load_properties = modeller.tool('sandag.utilities.properties')
+project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+main_directory = os.path.dirname(project_dir)
+output_dir = os.path.join(main_directory, "output")
+props = load_properties(os.path.join(main_directory, "conf", "sandag_abm.properties"))
+
+
+main_emmebank = os.path.join(project_dir, "Database", "emmebank")
+scenario_id = 100
+base_scenario = main_emmebank.scenario(scenario_id)
+
+periods = ["EA", "AM", "MD", "PM", "EV"]
+period_ids = list(enumerate(periods, start=int(scenario_id) + 1))
+
+msa_iteration = 1
+relative_gap = 0.0005
+max_assign_iterations = 100
+num_processors = "MAX-1"
+select_link = None # Optional select link specification
+
+for number, period in period_ids:
+ period_scenario = main_emmebank.scenario(number)
+ traffic_assign(period, msa_iteration, relative_gap, max_assign_iterations,
+ num_processors, period_scenario, select_link)
+ omx_file = _join(output_dir, "traffic_skims_%s.omx" % period)
+ if msa_iteration < 4:
+ export_traffic_skims(period, omx_file, base_scenario)
+"""
+
+
+TOOLBOX_ORDER = 20
+
+
+import inro.modeller as _m
+import inro.emme.core.exception as _except
+import traceback as _traceback
+from contextlib import contextmanager as _context
+import numpy
+import array
+import os
+import json as _json
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+
+
+class TrafficAssignment(_m.Tool(), gen_utils.Snapshot):
+
+ period = _m.Attribute(unicode)
+ msa_iteration = _m.Attribute(int)
+ relative_gap = _m.Attribute(float)
+ max_iterations = _m.Attribute(int)
+ num_processors = _m.Attribute(str)
+ select_link = _m.Attribute(unicode)
+ raise_zero_dist = _m.Attribute(bool)
+ stochastic = _m.Attribute(bool)
+ input_directory = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ self.msa_iteration = 1
+ self.relative_gap = 0.0005
+ self.max_iterations = 100
+ self.num_processors = "MAX-1"
+ self.raise_zero_dist = True
+ self.select_link = '[]'
+ self.stochastic = False
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ self.input_directory = os.path.join(os.path.dirname(project_dir), "input")
+ self.attributes = ["period", "msa_iteration", "relative_gap", "max_iterations",
+ "num_processors", "select_link", "raise_zero_dist", "stochastic", "input_directory"]
+ version = os.environ.get("EMMEPATH", "")
+ self._version = version[-5:] if version else ""
+ self._skim_classes_separately = True # Used for debugging only
+ self._stats = {}
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Traffic assignment"
+ pb.description = """
+The Traffic assignment tool runs the traffic assignment and skims per
+period on the current primary scenario.
+
+The traffic assignment is a 15-class assignment with generalized cost on
+links and BPR-type volume-delay functions which include capacities on links
+and at intersection approaches. The assignment is run using the
+fast-converging Second-Order Linear Approximation (SOLA) method in Emme to
+a relative gap of 5x10-4. The per-link fixed costs include toll values and
+operating costs which vary by class of demand.
+Assignment matrices and resulting network flows are always in PCE.
+"""
+ pb.branding_text = "- SANDAG - Assignment"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ options = [("EA","Early AM"),
+ ("AM","AM peak"),
+ ("MD","Mid-day"),
+ ("PM","PM peak"),
+ ("EV","Evening")]
+ pb.add_select("period", options, title="Period:")
+ pb.add_text_box("msa_iteration", title="MSA iteration:", note="If >1 will apply MSA to flows.")
+ pb.add_text_box("relative_gap", title="Relative gap:")
+ pb.add_text_box("max_iterations", title="Max iterations:")
+ dem_utils.add_select_processors("num_processors", pb, self)
+ pb.add_checkbox("raise_zero_dist", title=" ", label="Raise on zero distance value",
+ note="Check for and raise an exception if a zero value is found in the SOVGP_DIST matrix.")
+ pb.add_checkbox(
+ 'stochastic',
+ title=" ",
+ label="Run as a stochastic assignment",
+ note="If the current MSA iteration is the last (4th) one, the SOLA traffic assignment is replaced with a stochastic traffic assignment."
+ )
+ pb.add_select_file('input_directory', 'directory', title='Select input directory')
+ self._add_select_link_interface(pb)
+ return pb.render()
+
+
+ def _add_select_link_interface(self, pb):
+ pb.add_html("""
+""")
+ pb.add_text_box("select_link", multi_line=True)
+ pb.wrap_html(title="Select link(s):",
+ body="""
+
+
Expression
Result suffix
Threshold
+
+
+
+
+
+ Click for help
+
+
+
+ Expression: Emme selection expression to identify the link(s) of interest.
+ Examples and available attributes below.
+
+
+ Result suffix: the suffix to use in the naming of per-class result
+ attributes and matrices, up to 6 characters.
+ Should be unique (existing attributes / matrices will be overwritten).
+
+
+ Threshold: the minimum number of links which must be encountered
+ for the path selection.
+ The default value of 1 indicates an "any" link selection.
+
+
+ Expression selection help: use one (or more) selection criteria of the form
+ attribute=value or attribute=min,max.
+ Multiple criteria may be combined with 'and' ('&'), 'or' ('|'), and
+ 'xor' ('^'). Use 'not' ('!') in front or a criteria to negate it.
+
+ More help on selection expressions
+
+
+
Select by attribute: @selected_link=1
+
Select link by ID (i node, j node): link=1066,1422
+
Select TCOVED ID (two links): @tov_id=4578 or @tcov_id=9203
+
Outgoing connector: ci=1
+
Incoming connector: cj=1
+
Links of type 6 and 7: type=6,7
+
+
+
+ Result link and turn flows will be saved in extra attributes
+ @sel_XX_NAME_SUFFIX, where XX is the period, NAME is
+ the class name, and SUFFIX is the specified result suffix.
+ The selected O-D demand will be saved in SELDEM_XX_NAME_SUFFIX.
+
+
+
+
+
+ Click for available attributes
+
+
+
+
+
""")
+ pb.add_html("""
+""" % {"tool_proxy_tag": pb.tool_proxy_tag, })
+
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ results = self(self.period, self.msa_iteration, self.relative_gap, self.max_iterations,
+ self.num_processors, scenario, self.select_link, self.raise_zero_dist,
+ self.stochastic, self.input_directory)
+ run_msg = "Traffic assignment completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ def __call__(self, period, msa_iteration, relative_gap, max_iterations, num_processors, scenario,
+ select_link=[], raise_zero_dist=True, stochastic=False, input_directory=None):
+ select_link = _json.loads(select_link) if isinstance(select_link, basestring) else select_link
+ attrs = {
+ "period": period,
+ "msa_iteration": msa_iteration,
+ "relative_gap": relative_gap,
+ "max_iterations": max_iterations,
+ "num_processors": num_processors,
+ "scenario": scenario.id,
+ "select_link": _json.dumps(select_link),
+ "raise_zero_dist": raise_zero_dist,
+ "stochastic": stochastic,
+ "input_directory": input_directory,
+ "self": str(self)
+ }
+ self._stats = {}
+ with _m.logbook_trace("Traffic assignment for period %s" % period, attributes=attrs):
+ gen_utils.log_snapshot("Traffic assignment", str(self), attrs)
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ if not period in periods:
+ raise _except.ArgumentError(
+ 'period: unknown value - specify one of %s' % periods)
+ num_processors = dem_utils.parse_num_processors(num_processors)
+ # Main list of assignment classes
+ classes = [
+ { # 0
+ "name": 'SOV_NT_L', "mode": 's', "PCE": 1, "VOT": 8.81, "cost": '@cost_auto',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.SOV", "TOLLDIST"]
+ },
+ { # 1
+ "name": 'SOV_TR_L', "mode": 'S', "PCE": 1, "VOT": 8.81, "cost": '@cost_auto',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.SOV", "TOLLDIST"]
+ },
+ { # 2
+ "name": 'HOV2_L', "mode": 'H', "PCE": 1, "VOT": 8.81, "cost": '@cost_hov2',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.HOV2", "TOLLDIST.HOV2", "HOVDIST"]
+ },
+ { # 3
+ "name": 'HOV3_L', "mode": 'I', "PCE": 1, "VOT": 8.81, "cost": '@cost_hov3',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.HOV3", "TOLLDIST.HOV3", "HOVDIST"]
+ },
+ { # 4
+ "name": 'SOV_NT_M', "mode": 's', "PCE": 1, "VOT": 18.0, "cost": '@cost_auto',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.SOV", "TOLLDIST"]
+ },
+ { # 5
+ "name": 'SOV_TR_M', "mode": 'S', "PCE": 1, "VOT": 18.0, "cost": '@cost_auto',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.SOV", "TOLLDIST"]
+ },
+ { # 6
+ "name": 'HOV2_M', "mode": 'H', "PCE": 1, "VOT": 18.0, "cost": '@cost_hov2',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.HOV2", "TOLLDIST.HOV2", "HOVDIST"]
+ },
+ { # 7
+ "name": 'HOV3_M', "mode": 'I', "PCE": 1, "VOT": 18.0, "cost": '@cost_hov3',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.HOV3", "TOLLDIST.HOV3", "HOVDIST"]
+ },
+ { # 8
+ "name": 'SOV_NT_H', "mode": 's', "PCE": 1, "VOT": 85., "cost": '@cost_auto',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.SOV", "TOLLDIST"]
+ },
+ { # 9
+ "name": 'SOV_TR_H', "mode": 'S', "PCE": 1, "VOT": 85., "cost": '@cost_auto',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.SOV", "TOLLDIST"]
+ },
+ { # 10
+ "name": 'HOV2_H', "mode": 'H', "PCE": 1, "VOT": 85., "cost": '@cost_hov2',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.HOV2", "TOLLDIST.HOV2", "HOVDIST"]
+ },
+ { # 11
+ "name": 'HOV3_H', "mode": 'I', "PCE": 1, "VOT": 85., "cost": '@cost_hov3',
+ "skims": ["TIME", "DIST", "REL", "TOLLCOST.HOV3", "TOLLDIST.HOV3", "HOVDIST"]
+ },
+ { # 12
+ "name": 'TRK_L', "mode": 'T', "PCE": 1.3, "VOT": 67., "cost": '@cost_lgt_truck',
+ "skims": ["TIME", "DIST", "TOLLCOST.TRK_L"]
+ },
+ { # 13
+ "name": 'TRK_M', "mode": 'M', "PCE": 1.5, "VOT": 68., "cost": '@cost_med_truck',
+ "skims": ["TIME", "DIST", "TOLLCOST.TRK_M"]
+ },
+ { # 14
+ "name": 'TRK_H', "mode": 'V', "PCE": 2.5, "VOT": 89., "cost": '@cost_hvy_truck',
+ "skims": ["TIME", "DIST", "TOLLCOST.TRK_H"]
+ },
+ ]
+
+ # change mode to allow sovntp on SR125
+ # TODO: incorporate this into import_network instead
+ # also, consider updating mode definitions
+ self.change_mode_sovntp(scenario)
+
+ if period == "MD" and (msa_iteration == 1 or not scenario.mode('D')):
+ self.prepare_midday_generic_truck(scenario)
+
+ if 1 < msa_iteration < 4:
+ # Link and turn flows
+ link_attrs = ["auto_volume"]
+ turn_attrs = ["auto_volume"]
+ for traffic_class in classes:
+ link_attrs.append("@%s" % (traffic_class["name"].lower()))
+ turn_attrs.append("@p%s" % (traffic_class["name"].lower()))
+ msa_link_flows = scenario.get_attribute_values("LINK", link_attrs)[1:]
+ msa_turn_flows = scenario.get_attribute_values("TURN", turn_attrs)[1:]
+
+ if stochastic:
+ self.run_stochastic_assignment(
+ period,
+ relative_gap,
+ max_iterations,
+ num_processors,
+ scenario,
+ classes,
+ input_directory
+ )
+ else:
+ self.run_assignment(period, relative_gap, max_iterations, num_processors, scenario, classes, select_link)
+
+
+ if 1 < msa_iteration < 4:
+ link_flows = scenario.get_attribute_values("LINK", link_attrs)
+ values = [link_flows.pop(0)]
+ for msa_array, flow_array in zip(msa_link_flows, link_flows):
+ msa_vals = numpy.frombuffer(msa_array, dtype='float32')
+ flow_vals = numpy.frombuffer(flow_array, dtype='float32')
+ result = msa_vals + (1.0 / msa_iteration) * (flow_vals - msa_vals)
+ result_array = array.array('f')
+ result_array.fromstring(result.tostring())
+ values.append(result_array)
+ scenario.set_attribute_values("LINK", link_attrs, values)
+
+ turn_flows = scenario.get_attribute_values("TURN", turn_attrs)
+ values = [turn_flows.pop(0)]
+ for msa_array, flow_array in zip(msa_turn_flows, turn_flows):
+ msa_vals = numpy.frombuffer(msa_array, dtype='float32')
+ flow_vals = numpy.frombuffer(flow_array, dtype='float32')
+ result = msa_vals + (1.0 / msa_iteration) * (flow_vals - msa_vals)
+ result_array = array.array('f')
+ result_array.fromstring(result.tostring())
+ values.append(result_array)
+ scenario.set_attribute_values("TURN", turn_attrs, values)
+
+ self.calc_network_results(period, num_processors, scenario)
+
+ if msa_iteration <= 4:
+ self.run_skims(period, num_processors, scenario, classes)
+ self.report(period, scenario, classes)
+ # Check that the distance matrix is valid (no disconnected zones)
+ # Using SOVGPL class as representative
+ if raise_zero_dist:
+ name = "%s_SOV_TR_H_DIST" % period
+ dist_stats = self._stats[name]
+ if dist_stats[1] == 0:
+ zones = scenario.zone_numbers
+ matrix = scenario.emmebank.matrix(name)
+ data = matrix.get_numpy_data(scenario)
+ row, col = numpy.unravel_index(data.argmin(), data.shape)
+ row, col = zones[row], zones[col]
+ raise Exception("Disconnected zone error: 0 value found in matrix %s from zone %s to %s" % (name, row, col))
+
+ def run_assignment(self, period, relative_gap, max_iterations, num_processors, scenario, classes, select_link):
+ emmebank = scenario.emmebank
+
+ modeller = _m.Modeller()
+ set_extra_function_para = modeller.tool(
+ "inro.emme.traffic_assignment.set_extra_function_parameters")
+ create_attribute = modeller.tool(
+ "inro.emme.data.extra_attribute.create_extra_attribute")
+ traffic_assign = modeller.tool(
+ "inro.emme.traffic_assignment.sola_traffic_assignment")
+ net_calc = gen_utils.NetworkCalculator(scenario)
+
+ if period in ["AM", "PM"]:
+ # For freeway links in AM and PM periods, convert VDF to type 25
+ net_calc("vdf", "25", "vdf=10")
+
+ p = period.lower()
+ assign_spec = self.base_assignment_spec(
+ relative_gap, max_iterations, num_processors)
+ with _m.logbook_trace("Prepare traffic data for period %s" % period):
+ with _m.logbook_trace("Input link attributes"):
+ # set extra attributes for the period for VDF
+ # ul1 = @time_link (period)
+ # ul2 = transit flow -> volad (for assignment only)
+ # ul3 = @capacity_link (period)
+ el1 = "@green_to_cycle"
+ el2 = "@sta_reliability"
+ el3 = "@capacity_inter"
+ set_extra_function_para(el1, el2, el3, emmebank=emmebank)
+
+ # set green to cycle to el1=@green_to_cycle for VDF
+ att_name = "@green_to_cycle_%s" % p
+ att = scenario.extra_attribute(att_name)
+ new_att_name = "@green_to_cycle"
+ create_attribute("LINK", new_att_name, att.description,
+ 0, overwrite=True, scenario=scenario)
+ net_calc(new_att_name, att_name, "modes=d")
+ # set static reliability to el2=@sta_reliability for VDF
+ att_name = "@sta_reliability_%s" % p
+ att = scenario.extra_attribute(att_name)
+ new_att_name = "@sta_reliability"
+ create_attribute("LINK", new_att_name, att.description,
+ 0, overwrite=True, scenario=scenario)
+ net_calc(new_att_name, att_name, "modes=d")
+ # set capacity_inter to el3=@capacity_inter for VDF
+ att_name = "@capacity_inter_%s" % p
+ att = scenario.extra_attribute(att_name)
+ new_att_name = "@capacity_inter"
+ create_attribute("LINK", new_att_name, att.description,
+ 0, overwrite=True, scenario=scenario)
+ net_calc(new_att_name, att_name, "modes=d")
+ # set link time
+ net_calc("ul1", "@time_link_%s" % p, "modes=d")
+ net_calc("ul3", "@capacity_link_%s" % p, "modes=d")
+ # set number of lanes (not used in VDF, just for reference)
+ net_calc("lanes", "@lane_%s" % p, "modes=d")
+ if period in ["EA", "MD", "EV"]:
+ # For links with signals inactive in the off-peak periods, convert VDF to type 11
+ net_calc("vdf", "11", "modes=d and @green_to_cycle=0 and @traffic_control=4,5 and vdf=24")
+ # # Set HOV2 cost attribute
+ # create_attribute("LINK", "@cost_hov2_%s" % p, "toll (non-mngd) + cost for HOV2",
+ # 0, overwrite=True, scenario=scenario)
+ # net_calc("@cost_hov2_%s" % p, "@cost_hov_%s" % p, "modes=d")
+ # net_calc("@cost_hov2_%s" % p, "@cost_auto_%s" % p, "@lane_restriction=3")
+
+ with _m.logbook_trace("Transit line headway and background traffic"):
+ # set headway for the period
+ hdw = {"ea": "@headway_op",
+ "am": "@headway_am",
+ "md": "@headway_op",
+ "pm": "@headway_pm",
+ "ev": "@headway_op"}
+ net_calc("hdw", hdw[p], {"transit_line": "all"})
+
+ # transit vehicle as background flow with periods
+ period_hours = {'ea': 3, 'am': 3, 'md': 6.5, 'pm': 3.5, 'ev': 5}
+ expression = "(60 / hdw) * vauteq * %s" % (period_hours[p])
+ net_calc("ul2", "0", "modes=d")
+ net_calc("ul2", expression,
+ selections={"link": "modes=d", "transit_line": "hdw=0.02,9999"},
+ aggregation="+")
+
+ with _m.logbook_trace("Per-class flow attributes"):
+ for traffic_class in classes:
+ demand = 'mf"%s_%s"' % (period, traffic_class["name"])
+ link_cost = "%s_%s" % (traffic_class["cost"], p) if traffic_class["cost"] else "@cost_operating"
+
+ att_name = "@%s" % (traffic_class["name"].lower())
+ att_des = "%s %s link volume" % (period, traffic_class["name"])
+ link_flow = create_attribute("LINK", att_name, att_des, 0, overwrite=True, scenario=scenario)
+ att_name = "@p%s" % (traffic_class["name"].lower())
+ att_des = "%s %s turn volume" % (period, traffic_class["name"])
+ turn_flow = create_attribute("TURN", att_name, att_des, 0, overwrite=True, scenario=scenario)
+
+ class_spec = {
+ "mode": traffic_class["mode"],
+ "demand": demand,
+ "generalized_cost": {
+ "link_costs": link_cost, "perception_factor": 1.0 / traffic_class["VOT"]
+ },
+ "results": {
+ "link_volumes": link_flow.id, "turn_volumes": turn_flow.id,
+ "od_travel_times": None
+ }
+ }
+ assign_spec["classes"].append(class_spec)
+ if select_link:
+ for class_spec in assign_spec["classes"]:
+ class_spec["path_analyses"] = []
+ for sub_spec in select_link:
+ expr = sub_spec["expression"]
+ suffix = sub_spec["suffix"]
+ threshold = sub_spec["threshold"]
+ if not expr and not suffix:
+ continue
+ with _m.logbook_trace("Prepare for select link analysis '%s' - %s" % (expr, suffix)):
+ slink = create_attribute("LINK", "@slink_%s" % suffix, "selected link for %s" % suffix, 0,
+ overwrite=True, scenario=scenario)
+ net_calc(slink.id, "1", expr)
+ with _m.logbook_trace("Initialize result matrices and extra attributes"):
+ for traffic_class, class_spec in zip(classes, assign_spec["classes"]):
+ att_name = "@sl_%s_%s" % (traffic_class["name"].lower(), suffix)
+ att_des = "%s %s '%s' sel link flow"% (period, traffic_class["name"], suffix)
+ link_flow = create_attribute("LINK", att_name, att_des, 0, overwrite=True, scenario=scenario)
+ att_name = "@psl_%s_%s" % (traffic_class["name"].lower(), suffix)
+ att_des = "%s %s '%s' sel turn flow" % (period, traffic_class["name"], suffix)
+ turn_flow = create_attribute("TURN", att_name, att_des, 0, overwrite=True, scenario=scenario)
+
+ name = "SELDEM_%s_%s_%s" % (period, traffic_class["name"], suffix)
+ desc = "Selected demand for %s %s %s" % (period, traffic_class["name"], suffix)
+ seldem = dem_utils.create_full_matrix(name, desc, scenario=scenario)
+
+ # add select link analysis
+ class_spec["path_analyses"].append({
+ "link_component": slink.id,
+ "turn_component": None,
+ "operator": "+",
+ "selection_threshold": { "lower": threshold, "upper": 999999},
+ "path_to_od_composition": {
+ "considered_paths": "SELECTED",
+ "multiply_path_proportions_by": {"analyzed_demand": True, "path_value": False}
+ },
+ "analyzed_demand": None,
+ "results": {
+ "selected_link_volumes": link_flow.id,
+ "selected_turn_volumes": turn_flow.id,
+ "od_values": seldem.named_id
+ }
+ })
+ # Run assignment
+ traffic_assign(assign_spec, scenario, chart_log_interval=2)
+ return
+
+ def run_stochastic_assignment(
+ self, period, relative_gap, max_iterations, num_processors, scenario,
+ classes, input_directory):
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ main_directory = os.path.dirname(input_directory)
+ props = load_properties(os.path.join(main_directory, "conf", "sandag_abm.properties"))
+ distribution_type = props['stochasticHighwayAssignment.distributionType']
+ replications = props['stochasticHighwayAssignment.replications']
+ a_parameter = props['stochasticHighwayAssignment.aParameter']
+ b_parameter = props['stochasticHighwayAssignment.bParameter']
+ seed = props['stochasticHighwayAssignment.seed']
+
+ emmebank = scenario.emmebank
+
+ modeller = _m.Modeller()
+ set_extra_function_para = modeller.tool(
+ "inro.emme.traffic_assignment.set_extra_function_parameters")
+ create_attribute = modeller.tool(
+ "inro.emme.data.extra_attribute.create_extra_attribute")
+ traffic_assign = modeller.tool(
+ "solutions.stochastic_traffic_assignment")
+ net_calc = gen_utils.NetworkCalculator(scenario)
+
+ if period in ["AM", "PM"]:
+ # For freeway links in AM and PM periods, convert VDF to type 25
+ net_calc("vdf", "25", "vdf=10")
+
+ p = period.lower()
+ assign_spec = self.base_assignment_spec(
+ relative_gap, max_iterations, num_processors)
+ assign_spec['background_traffic'] = {
+ "link_component": None,
+ "turn_component": None,
+ "add_transit_vehicles": True
+ }
+ with _m.logbook_trace("Prepare traffic data for period %s" % period):
+ with _m.logbook_trace("Input link attributes"):
+ # set extra attributes for the period for VDF
+ # ul1 = @time_link (period)
+ # ul2 = transit flow -> volad (for assignment only)
+ # ul3 = @capacity_link (period)
+ el1 = "@green_to_cycle"
+ el2 = "@sta_reliability"
+ el3 = "@capacity_inter"
+ set_extra_function_para(el1, el2, el3, emmebank=emmebank)
+
+ # set green to cycle to el1=@green_to_cycle for VDF
+ att_name = "@green_to_cycle_%s" % p
+ att = scenario.extra_attribute(att_name)
+ new_att_name = "@green_to_cycle"
+ create_attribute("LINK", new_att_name, att.description,
+ 0, overwrite=True, scenario=scenario)
+ net_calc(new_att_name, att_name, "modes=d")
+ # set static reliability to el2=@sta_reliability for VDF
+ att_name = "@sta_reliability_%s" % p
+ att = scenario.extra_attribute(att_name)
+ new_att_name = "@sta_reliability"
+ create_attribute("LINK", new_att_name, att.description,
+ 0, overwrite=True, scenario=scenario)
+ net_calc(new_att_name, att_name, "modes=d")
+ # set capacity_inter to el3=@capacity_inter for VDF
+ att_name = "@capacity_inter_%s" % p
+ att = scenario.extra_attribute(att_name)
+ new_att_name = "@capacity_inter"
+ create_attribute("LINK", new_att_name, att.description,
+ 0, overwrite=True, scenario=scenario)
+ net_calc(new_att_name, att_name, "modes=d")
+ # set link time
+ net_calc("ul1", "@time_link_%s" % p, "modes=d")
+ net_calc("ul3", "@capacity_link_%s" % p, "modes=d")
+ # set number of lanes (not used in VDF, just for reference)
+ net_calc("lanes", "@lane_%s" % p, "modes=d")
+ if period in ["EA", "MD", "EV"]:
+ # For links with signals inactive in the off-peak periods, convert VDF to type 11
+ net_calc("vdf", "11", "modes=d and @green_to_cycle=0 and @traffic_control=4,5 and vdf=24")
+ # # Set HOV2 cost attribute
+ # create_attribute("LINK", "@cost_hov2_%s" % p, "toll (non-mngd) + cost for HOV2",
+ # 0, overwrite=True, scenario=scenario)
+ # net_calc("@cost_hov2_%s" % p, "@cost_hov_%s" % p, "modes=d")
+ # net_calc("@cost_hov2_%s" % p, "@cost_auto_%s" % p, "@lane_restriction=3")
+
+ with _m.logbook_trace("Transit line headway and background traffic"):
+ # set headway for the period: format is (attribute_name, period duration in hours)
+ hdw = {"ea": ("@headway_op", 3),
+ "am": ("@headway_am", 3),
+ "md": ("@headway_op", 6.5),
+ "pm": ("@headway_pm", 3.5),
+ "ev": ("@headway_op", 5)}
+ net_calc('ul2', '0', {'link': 'all'})
+ net_calc('hdw', '9999.99', {'transit_line': 'all'})
+ net_calc(
+ 'hdw', "{hdw} / {p} ".format(hdw=hdw[p][0], p=hdw[p][1]),
+ {"transit_line": "%s=0.02,9999" % hdw[p][0]}
+ )
+
+ with _m.logbook_trace("Per-class flow attributes"):
+ for traffic_class in classes:
+ demand = 'mf"%s_%s"' % (period, traffic_class["name"])
+ link_cost = "%s_%s" % (traffic_class["cost"], p) if traffic_class["cost"] else "@cost_operating"
+
+ att_name = "@%s" % (traffic_class["name"].lower())
+ att_des = "%s %s link volume" % (period, traffic_class["name"])
+ link_flow = create_attribute("LINK", att_name, att_des, 0, overwrite=True, scenario=scenario)
+ att_name = "@p%s" % (traffic_class["name"].lower())
+ att_des = "%s %s turn volume" % (period, traffic_class["name"])
+ turn_flow = create_attribute("TURN", att_name, att_des, 0, overwrite=True, scenario=scenario)
+
+ class_spec = {
+ "mode": traffic_class["mode"],
+ "demand": demand,
+ "generalized_cost": {
+ "link_costs": link_cost, "perception_factor": 1.0 / traffic_class["VOT"]
+ },
+ "results": {
+ "link_volumes": link_flow.id, "turn_volumes": turn_flow.id,
+ "od_travel_times": None
+ }
+ }
+ assign_spec["classes"].append(class_spec)
+
+ # Run assignment
+ traffic_assign(
+ assign_spec,
+ dist_par={'type': distribution_type, 'A': a_parameter, 'B': b_parameter},
+ replications=replications,
+ seed=seed,
+ orig_func=False,
+ random_term='ul2',
+ compute_travel_times=False,
+ scenario=scenario
+ )
+
+ with _m.logbook_trace("Reset transit line headways"):
+ # set headway for the period
+ hdw = {"ea": "@headway_op",
+ "am": "@headway_am",
+ "md": "@headway_op",
+ "pm": "@headway_pm",
+ "ev": "@headway_op"}
+ net_calc("hdw", hdw[p], {"transit_line": "all"})
+ return
+
+ def calc_network_results(self, period, num_processors, scenario):
+ modeller = _m.Modeller()
+ create_attribute = modeller.tool(
+ "inro.emme.data.extra_attribute.create_extra_attribute")
+ net_calc = gen_utils.NetworkCalculator(scenario)
+ emmebank = scenario.emmebank
+ p = period.lower()
+ # ul2 is the total flow (volau + volad) in the skim assignment
+ with _m.logbook_trace("Calculation of attributes for skims"):
+ link_attributes = [
+ ("@hovdist", "distance for HOV"),
+ ("@tollcost", "Toll cost for SOV autos"),
+ ("@h2tollcost", "Toll cost for hov2"),
+ ("@h3tollcost", "Toll cost for hov3"),
+ ("@trk_ltollcost", "Toll cost for light trucks"),
+ ("@trk_mtollcost", "Toll cost for medium trucks"),
+ ("@trk_htollcost", "Toll cost for heavy trucks"),
+ ("@mlcost", "Manage lane cost in cents"),
+ ("@tolldist", "Toll distance"),
+ ("@h2tolldist", "Toll distance for hov2"),
+ ("@h3tolldist", "Toll distance for hov3"),
+ ("@reliability", "Reliability factor"),
+ ("@reliability_sq", "Reliability factor squared"),
+ ("@auto_volume", "traffic link volume (volau)"),
+ ("@auto_time", "traffic link time (timau)"),
+ ]
+ for name, description in link_attributes:
+ create_attribute("LINK", name, description,
+ 0, overwrite=True, scenario=scenario)
+ create_attribute("TURN", "@auto_time_turn", "traffic turn time (ptimau)",
+ overwrite=True, scenario=scenario)
+
+ net_calc("@hovdist", "length", {"link": "@lane_restriction=2,3"})
+ net_calc("@tollcost", "@cost_auto_%s - @cost_operating" % p)
+ net_calc("@h2tollcost", "@cost_hov2_%s - @cost_operating" % p, {"link": "@lane_restriction=3,4"})
+ net_calc("@h3tollcost", "@cost_hov3_%s - @cost_operating" % p, {"link": "@lane_restriction=4"})
+ net_calc("@trk_ltollcost", "@cost_lgt_truck_%s - @cost_operating" % p)
+ net_calc("@trk_mtollcost", "@cost_med_truck_%s - @cost_operating" % p)
+ net_calc("@trk_htollcost", "@cost_hvy_truck_%s - @cost_operating" % p)
+ net_calc("@mlcost", "@toll_%s" % p, {"link": "not @lane_restriction=4"})
+ net_calc("@tolldist", "length", {"link": "@lane_restriction=2,4"})
+ net_calc("@h2tolldist", "length", {"link": "@lane_restriction=3,4"})
+ net_calc("@h3tolldist", "length", {"link": "@lane_restriction=4"})
+ net_calc("@auto_volume", "volau", {"link": "modes=d"})
+ net_calc("ul2", "volau+volad", {"link": "modes=d"})
+ vdfs = [f for f in emmebank.functions() if f.type == "VOLUME_DELAY"]
+ exf_pars = emmebank.extra_function_parameters
+ for function in vdfs:
+ expression = function.expression
+ for exf_par in ["el1", "el2", "el3"]:
+ expression = expression.replace(exf_par, getattr(exf_pars, exf_par))
+ # split function into time component and reliability component
+ time_expr, reliability_expr = expression.split("*(1+@sta_reliability+")
+ net_calc("@auto_time", time_expr, {"link": "vdf=%s" % function.id[2:]})
+ net_calc("@reliability", "(@sta_reliability+" + reliability_expr,
+ {"link": "vdf=%s" % function.id[2:]})
+
+ net_calc("@reliability_sq", "@reliability**2", {"link": "modes=d"})
+ net_calc("@auto_time_turn", "ptimau*(ptimau.gt.0)",
+ {"incoming_link": "all", "outgoing_link": "all"})
+
+ def run_skims(self, period, num_processors, scenario, classes):
+ modeller = _m.Modeller()
+ traffic_assign = modeller.tool(
+ "inro.emme.traffic_assignment.sola_traffic_assignment")
+ emmebank = scenario.emmebank
+ p = period.lower()
+ analysis_link = {
+ "TIME": "@auto_time",
+ "DIST": "length",
+ "HOVDIST": "@hovdist",
+ "TOLLCOST.SOV": "@tollcost",
+ "TOLLCOST.HOV2": "@h2tollcost",
+ "TOLLCOST.HOV3": "@h3tollcost",
+ "TOLLCOST.TRK_L": "@trk_ltollcost",
+ "TOLLCOST.TRK_M": "@trk_mtollcost",
+ "TOLLCOST.TRK_H": "@trk_htollcost",
+ "MLCOST": "@mlcost",
+ "TOLLDIST": "@tolldist",
+ "TOLLDIST.HOV2": "@h2tolldist",
+ "TOLLDIST.HOV3": "@h3tolldist",
+ "REL": "@reliability_sq"
+ }
+ analysis_turn = {"TIME": "@auto_time_turn"}
+ with self.setup_skims(period, scenario):
+ if period == "MD":
+ gen_truck_mode = 'D'
+ classes.append({
+ "name": 'TRK', "mode": gen_truck_mode, "PCE": 1, "VOT": 67., "cost": '',
+ "skims": ["TIME"]
+ })
+ skim_spec = self.base_assignment_spec(0, 0, num_processors, background_traffic=False)
+ for traffic_class in classes:
+ if not traffic_class["skims"]:
+ continue
+ class_analysis = []
+ if "GENCOST" in traffic_class["skims"]:
+ od_travel_times = 'mf"%s_%s_%s"' % (period, traffic_class["name"], "GENCOST")
+ traffic_class["skims"].remove("GENCOST")
+ else:
+ od_travel_times = None
+ for skim_type in traffic_class["skims"]:
+ skim_name = skim_type.split(".")[0]
+ class_analysis.append({
+ "link_component": analysis_link.get(skim_type),
+ "turn_component": analysis_turn.get(skim_type),
+ "operator": "+",
+ "selection_threshold": {"lower": None, "upper": None},
+ "path_to_od_composition": {
+ "considered_paths": "ALL",
+ "multiply_path_proportions_by":
+ {"analyzed_demand": False, "path_value": True}
+ },
+ "results": {
+ "od_values": 'mf"%s_%s_%s"' % (period, traffic_class["name"], skim_name),
+ "selected_link_volumes": None,
+ "selected_turn_volumes": None
+ }
+ })
+ if traffic_class["cost"]:
+ link_cost = "%s_%s" % (traffic_class["cost"], p)
+ else:
+ link_cost = "@cost_operating"
+ skim_spec["classes"].append({
+ "mode": traffic_class["mode"],
+ "demand": 'ms"zero"', # 0 demand for skim with 0 iteration and fix flow in ul2 in vdf
+ "generalized_cost": {
+ "link_costs": link_cost, "perception_factor": 1.0 / traffic_class["VOT"]
+ },
+ "results": {
+ "link_volumes": None, "turn_volumes": None,
+ "od_travel_times": {"shortest_paths": od_travel_times}
+ },
+ "path_analyses": class_analysis,
+ })
+
+ # skim assignment
+ if self._skim_classes_separately:
+ # Debugging check
+ skim_classes = skim_spec["classes"][:]
+ for kls in skim_classes:
+ skim_spec["classes"] = [kls]
+ traffic_assign(skim_spec, scenario)
+ else:
+ traffic_assign(skim_spec, scenario)
+
+ # compute diagonal value for TIME and DIST
+ with _m.logbook_trace("Compute diagonal values for period %s" % period):
+ num_cells = len(scenario.zone_numbers) ** 2
+ for traffic_class in classes:
+ class_name = traffic_class["name"]
+ skims = traffic_class["skims"]
+ with _m.logbook_trace("Class %s" % class_name):
+ for skim_type in skims:
+ skim_name = skim_type.split(".")[0]
+ name = '%s_%s_%s' % (period, class_name, skim_name)
+ matrix = emmebank.matrix(name)
+ data = matrix.get_numpy_data(scenario)
+ if skim_name == "TIME" or skim_name == "DIST":
+ numpy.fill_diagonal(data, 999999999.0)
+ data[numpy.diag_indices_from(data)] = 0.5 * numpy.nanmin(data[::,12::], 1)
+ internal_data = data[12::, 12::] # Exclude the first 12 zones, external zones
+ self._stats[name] = (name, internal_data.min(), internal_data.max(), internal_data.mean(), internal_data.sum(), 0)
+ elif skim_name == "REL":
+ data = numpy.sqrt(data)
+ else:
+ self._stats[name] = (name, data.min(), data.max(), data.mean(), data.sum(), 0)
+ numpy.fill_diagonal(data, 0.0)
+ matrix.set_numpy_data(data, scenario)
+ return
+
+ def base_assignment_spec(self, relative_gap, max_iterations, num_processors, background_traffic=True):
+ base_spec = {
+ "type": "SOLA_TRAFFIC_ASSIGNMENT",
+ "background_traffic": None,
+ "classes": [],
+ "stopping_criteria": {
+ "max_iterations": int(max_iterations), "best_relative_gap": 0.0,
+ "relative_gap": float(relative_gap), "normalized_gap": 0.0
+ },
+ "performance_settings": {"number_of_processors": num_processors},
+ }
+ if background_traffic:
+ base_spec["background_traffic"] = {
+ "link_component": "ul2", # ul2 = transit flow of the period
+ "turn_component": None,
+ "add_transit_vehicles": False
+ }
+ return base_spec
+
+ @_context
+ def setup_skims(self, period, scenario):
+ emmebank = scenario.emmebank
+ with _m.logbook_trace("Extract skims for period %s" % period):
+ # temp_functions converts to skim-type VDFs
+ with temp_functions(emmebank):
+ backup_attributes = {"LINK": ["data2", "auto_volume", "auto_time", "additional_volume"]}
+ with gen_utils.backup_and_restore(scenario, backup_attributes):
+ yield
+
+ def prepare_midday_generic_truck(self, scenario):
+ modeller = _m.Modeller()
+ create_mode = modeller.tool(
+ "inro.emme.data.network.mode.create_mode")
+ delete_mode = modeller.tool(
+ "inro.emme.data.network.mode.delete_mode")
+ change_link_modes = modeller.tool(
+ "inro.emme.data.network.base.change_link_modes")
+ with _m.logbook_trace("Preparation for generic truck skim"):
+ gen_truck_mode = 'D'
+ truck_mode = scenario.mode(gen_truck_mode)
+ if not truck_mode:
+ truck_mode = create_mode(
+ mode_type="AUX_AUTO", mode_id=gen_truck_mode,
+ mode_description="all trucks", scenario=scenario)
+ change_link_modes(modes=[truck_mode], action="ADD",
+ selection="modes=vVmMtT", scenario=scenario)
+
+ #added by RSG (nagendra.dhakar@rsginc.com) for collapsed assignment classes testing
+ #this adds non-transponder SOV mode to SR-125 links
+ # TODO: move this to the network_import step for consistency and foward-compatibility
+ def change_mode_sovntp(self, scenario):
+ modeller = _m.Modeller()
+ change_link_modes = modeller.tool(
+ "inro.emme.data.network.base.change_link_modes")
+ with _m.logbook_trace("Preparation for sov ntp assignment"):
+ gen_sov_mode = 's'
+ sov_mode = scenario.mode(gen_sov_mode)
+ change_link_modes(modes=[sov_mode], action="ADD",
+ selection="@lane_restriction=4", scenario=scenario)
+
+ def report(self, period, scenario, classes):
+ emmebank = scenario.emmebank
+ text = ['
']
+ matrices = []
+ for traffic_class in classes:
+ matrices.extend(["%s_%s" % (traffic_class["name"], s.split(".")[0]) for s in traffic_class["skims"]])
+ num_zones = len(scenario.zone_numbers)
+ num_cells = num_zones ** 2
+ text.append("""
+ Number of zones: %s. Number of O-D pairs: %s.
+ Values outside -9999999, 9999999 are masked in summaries. """ % (num_zones, num_cells))
+ text.append("%-25s %9s %9s %9s %13s %9s" % ("name", "min", "max", "mean", "sum", "mask num"))
+ for name in matrices:
+ name = period + "_" + name
+ matrix = emmebank.matrix(name)
+ stats = self._stats.get(name)
+ if stats is None:
+ data = matrix.get_numpy_data(scenario)
+ data = numpy.ma.masked_outside(data, -9999999, 9999999, copy=False)
+ stats = (name, data.min(), data.max(), data.mean(), data.sum(), num_cells-data.count())
+ text.append("%-25s %9.4g %9.4g %9.4g %13.7g %9d" % stats)
+ text.append("
")
+ title = 'Traffic impedance summary for period %s' % period
+ report = _m.PageBuilder(title)
+ report.wrap_html('Matrix details', " ".join(text))
+ _m.logbook_write(title, report.render())
+
+ @_m.method(return_type=unicode)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ @_m.method(return_type=unicode)
+ def get_link_attributes(self):
+ export_utils = _m.Modeller().module("inro.emme.utility.export_utilities")
+ return export_utils.get_link_attributes(_m.Modeller().scenario)
+
+
+@_context
+def temp_functions(emmebank):
+ change_function = _m.Modeller().tool(
+ "inro.emme.data.function.change_function")
+ orig_expression = {}
+ with _m.logbook_trace("Set functions to skim parameter"):
+ for func in emmebank.functions():
+ if func.prefix=="fd":
+ exp = func.expression
+ orig_expression[func] = exp
+ if "volau+volad" in exp:
+ exp = exp.replace("volau+volad", "ul2")
+ change_function(func, exp, emmebank)
+ try:
+ yield
+ finally:
+ with _m.logbook_trace("Reset functions to assignment parameters"):
+ for func, expression in orig_expression.iteritems():
+ change_function(func, expression, emmebank)
+
diff --git a/sandag_abm/src/main/emme/toolbox/assignment/transit_assignment.py b/sandag_abm/src/main/emme/toolbox/assignment/transit_assignment.py
new file mode 100644
index 0000000..cd23ba6
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/assignment/transit_assignment.py
@@ -0,0 +1,785 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// transit_assignment.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# The Transit assignment tool runs the transit assignment and skims for each
+# period on the current primary scenario.
+#
+# The Build transit network tool must be run first to prepare the scenario for
+# assignment. Note that this tool must be run with the Transit database
+# (under the Database_transit directory) open (as the active database in the
+# Emme desktop).
+#
+#
+# Inputs:
+# period: the time-of-day period, one of EA, AM, MD, PM, EV.
+# scenario: Transit assignment scenario
+# skims_only: Only run assignment for skim matrices, if True only two assignments
+# are run to generate the skim matrices for the BUS and ALL skim classes.
+# Otherwise, all 15 assignments are run to generate the total network flows.
+# num_processors: number of processors to use for the traffic assignments.
+#
+# Matrices:
+# All transit demand and skim matrices.
+# See list of matrices under report method.
+#
+# Script example:
+"""
+import inro.modeller as _m
+import os
+modeller = _m.Modeller()
+desktop = modeller.desktop
+
+build_transit_scen = modeller.tool("sandag.assignment.build_transit_scenario")
+transit_assign = modeller.tool("sandag.assignment.transit_assignment")
+load_properties = modeller.tool('sandag.utilities.properties')
+
+project_dir = os.path.dirname(desktop.project_path())
+main_directory = os.path.dirname(project_dir)
+props = load_properties(os.path.join(main_directory, "conf", "sandag_abm.properties"))
+main_emmebank = os.path.join(project_dir, "Database", "emmebank")
+scenario_id = 100
+base_scenario = main_emmebank.scenario(scenario_id)
+
+transit_emmebank = os.path.join(project_dir, "Database_transit", "emmebank")
+
+periods = ["EA", "AM", "MD", "PM", "EV"]
+period_ids = list(enumerate(periods, start=int(scenario_id) + 1))
+num_processors = "MAX-1"
+scenarioYear = str(props["scenarioYear"])
+
+for number, period in period_ids:
+ src_period_scenario = main_emmebank.scenario(number)
+ transit_assign_scen = build_transit_scen(
+ period=period, base_scenario=src_period_scenario,
+ transit_emmebank=transit_emmebank,
+ scenario_id=src_period_scenario.id,
+ scenario_title="%s %s transit assign" % (base_scenario.title, period),
+ data_table_name=scenarioYear, overwrite=True)
+ transit_assign(period, transit_assign_scen, data_table_name=scenarioYear,
+ skims_only=True, num_processors=num_processors)
+
+omx_file = os.path.join(output_dir, "transit_skims.omx")
+export_transit_skims(omx_file, periods, transit_scenario)
+"""
+
+
+TOOLBOX_ORDER = 21
+
+
+import inro.modeller as _m
+import inro.emme.core.exception as _except
+import traceback as _traceback
+from copy import deepcopy as _copy
+from collections import defaultdict as _defaultdict, OrderedDict
+import contextlib as _context
+import numpy
+
+import os
+import sys
+import math
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+
+
+class TransitAssignment(_m.Tool(), gen_utils.Snapshot):
+
+ period = _m.Attribute(unicode)
+ scenario = _m.Attribute(_m.InstanceType)
+ data_table_name = _m.Attribute(unicode)
+ assignment_only = _m.Attribute(bool)
+ skims_only = _m.Attribute(bool)
+ num_processors = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=unicode)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ self.assignment_only = False
+ self.skims_only = False
+ self.scenario = _m.Modeller().scenario
+ self.num_processors = "MAX-1"
+ self.attributes = [
+ "period", "scenario", "data_table_name", "assignment_only", "skims_only", "num_processors"]
+ self._dt_db = _m.Modeller().desktop.project.data_tables()
+ self._matrix_cache = {} # used to hold data for reporting and post-processing of skims
+
+ def from_snapshot(self, snapshot):
+ super(TransitAssignment, self).from_snapshot(snapshot)
+ # custom from_snapshot to load scenario and database objects
+ self.scenario = _m.Modeller().emmebank.scenario(self.scenario)
+ return self
+
+ def page(self):
+ if not self.data_table_name:
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ main_directory = os.path.dirname(project_dir)
+ props = load_properties(os.path.join(main_directory, "conf", "sandag_abm.properties"))
+ self.data_table_name = props["scenarioYear"]
+
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Transit assignment"
+ pb.description = """Assign transit demand for the selected time period."""
+ pb.branding_text = "- SANDAG - "
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+ options = [("EA", "Early AM"),
+ ("AM", "AM peak"),
+ ("MD", "Mid-day"),
+ ("PM", "PM peak"),
+ ("EV", "Evening")]
+ pb.add_select("period", options, title="Period:")
+ pb.add_select_scenario("scenario",
+ title="Transit assignment scenario:")
+ pb.add_text_box("data_table_name", title="Data table prefix name:", note="Default is the ScenarioYear")
+ pb.add_checkbox("assignment_only", title=" ", label="Only assign trips (no skims)")
+ pb.add_checkbox("skims_only", title=" ", label="Only run assignments relevant for skims")
+ dem_utils.add_select_processors("num_processors", pb, self)
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ results = self(
+ self.period, self.scenario, self.data_table_name,
+ self.assignment_only, self.skims_only, self.num_processors)
+ run_msg = "Transit assignment completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ def __call__(self, period, scenario, data_table_name, assignment_only=False, skims_only=False,
+ num_processors="MAX-1"):
+ attrs = {
+ "period": period,
+ "scenario": scenario.id,
+ "data_table_name": data_table_name,
+ "assignment_only": assignment_only,
+ "skims_only": skims_only,
+ "num_processors": num_processors,
+ "self": str(self)
+ }
+ self.scenario = scenario
+ if not scenario.has_traffic_results:
+ raise Exception("missing traffic assignment results for period %s scenario %s" % (period, scenario))
+ emmebank = scenario.emmebank
+ with self.setup(attrs):
+ gen_utils.log_snapshot("Transit assignment", str(self), attrs)
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ if not period in periods:
+ raise Exception('period: unknown value - specify one of %s' % periods)
+ num_processors = dem_utils.parse_num_processors(num_processors)
+ params = self.get_perception_parameters(period)
+ network = scenario.get_partial_network(
+ element_types=["TRANSIT_LINE"], include_attributes=True)
+ coaster_mode = network.mode("c")
+ params["coaster_fare_percep"] = 0
+ for line in list(network.transit_lines()):
+ # get the coaster fare perception for use in journey levels
+ if line.mode == coaster_mode:
+ params["coaster_fare_percep"] = line[params["fare"]]
+ break
+
+ transit_passes = gen_utils.DataTableProc("%s_transit_passes" % data_table_name)
+ transit_passes = {row["pass_type"]: row["cost"] for row in transit_passes}
+ day_pass = float(transit_passes["day_pass"]) / 2.0
+ regional_pass = float(transit_passes["regional_pass"]) / 2.0
+
+ self.run_assignment(period, params, network, day_pass, skims_only, num_processors)
+
+ if not assignment_only:
+ # max_fare = day_pass for local bus and regional_pass for premium modes
+ self.run_skims("BUS", period, params, day_pass, num_processors, network)
+ self.run_skims("PREM", period, params, regional_pass, num_processors, network)
+ self.run_skims("ALLPEN", period, params, regional_pass, num_processors, network)
+ self.mask_allpen(period)
+ self.report(period)
+
+ @_context.contextmanager
+ def setup(self, attrs):
+ self._matrix_cache = {} # initialize cache at beginning of run
+ emmebank = self.scenario.emmebank
+ period = attrs["period"]
+ with _m.logbook_trace("Transit assignment for period %s" % period, attributes=attrs):
+ with gen_utils.temp_matrices(emmebank, "FULL", 3) as matrices:
+ matrices[0].name = "TEMP_IN_VEHICLE_COST"
+ matrices[1].name = "TEMP_LAYOVER_BOARD"
+ matrices[2].name = "TEMP_PERCEIVED_FARE"
+ try:
+ yield
+ finally:
+ self._matrix_cache = {} # clear cache at end of run
+
+ def get_perception_parameters(self, period):
+ perception_parameters = {
+ "EA": {
+ "vot": 0.27,
+ "init_wait": 1.5,
+ "xfer_wait": 3.0,
+ "walk": 2.0,
+ "init_headway": "@headway_rev_op",
+ "xfer_headway": "@headway_op",
+ "fare": "@fare_per_op",
+ "in_vehicle": "@vehicle_per_op",
+ "fixed_link_time": "@trtime_link_ea"
+ },
+ "AM": {
+ "vot": 0.27,
+ "init_wait": 1.5,
+ "xfer_wait": 3.0,
+ "walk": 2.0,
+ "init_headway": "@headway_rev_am",
+ "xfer_headway": "@headway_am",
+ "fare": "@fare_per_pk",
+ "in_vehicle": "@vehicle_per_pk",
+ "fixed_link_time": "@trtime_link_am"
+ },
+ "MD": {
+ "vot": 0.27,
+ "init_wait": 1.5,
+ "xfer_wait": 3.0,
+ "walk": 2.0,
+ "init_headway": "@headway_rev_op",
+ "xfer_headway": "@headway_op",
+ "fare": "@fare_per_op",
+ "in_vehicle": "@vehicle_per_op",
+ "fixed_link_time": "@trtime_link_md"
+ },
+ "PM": {
+ "vot": 0.27,
+ "init_wait": 1.5,
+ "xfer_wait": 3.0,
+ "walk": 2.0,
+ "init_headway": "@headway_rev_pm",
+ "xfer_headway": "@headway_pm",
+ "fare": "@fare_per_pk",
+ "in_vehicle": "@vehicle_per_pk",
+ "fixed_link_time": "@trtime_link_pm"
+ },
+ "EV": {
+ "vot": 0.27,
+ "init_wait": 1.5,
+ "xfer_wait": 3.0,
+ "walk": 2.0,
+ "init_headway": "@headway_rev_op",
+ "xfer_headway": "@headway_op",
+ "fare": "@fare_per_op",
+ "in_vehicle": "@vehicle_per_op",
+ "fixed_link_time": "@trtime_link_ev"
+ }
+ }
+ return perception_parameters[period]
+
+ def group_modes_by_fare(self, network, day_pass_cost):
+ # Identify all the unique boarding fare values
+ fare_set = {mode.id: _defaultdict(lambda:0)
+ for mode in network.modes()
+ if mode.type == "TRANSIT"}
+ for line in network.transit_lines():
+ fare_set[line.mode.id][line["@fare"]] += 1
+ del fare_set['c'] # remove coaster mode, this fare is handled separately
+ # group the modes relative to day_pass
+ mode_groups = {
+ "bus": [], # have a bus fare, less than 1/2 day pass
+ "day_pass": [], # boarding fare is the same as 1/2 day pass
+ "premium": [] # special premium services not covered by day pass
+ }
+ for mode_id, fares in fare_set.items():
+ try:
+ max_fare = max(fares.keys())
+ except ValueError:
+ continue # an empty set means this mode is unused in this period
+ if numpy.isclose(max_fare, day_pass_cost, rtol=0.0001):
+ mode_groups["day_pass"].append((mode_id, fares))
+ elif max_fare < day_pass_cost:
+ mode_groups["bus"].append((mode_id, fares))
+ else:
+ mode_groups["premium"].append((mode_id, fares))
+ return mode_groups
+
+ def all_modes_journey_levels(self, params, network, day_pass_cost):
+ transfer_penalty = {"on_segments": {"penalty": "@transfer_penalty_s", "perception_factor": 5.0}}
+ transfer_wait = {
+ "effective_headways": "@headway_seg",
+ "headway_fraction": 0.5,
+ "perception_factor": params["xfer_wait"],
+ "spread_factor": 1.0
+ }
+ mode_groups = self.group_modes_by_fare(network, day_pass_cost)
+
+ def get_transition_rules(next_level):
+ rules = []
+ for name, group in mode_groups.items():
+ for mode_id, fares in group:
+ rules.append({"mode": mode_id, "next_journey_level": next_level[name]})
+ rules.append({"mode": "c", "next_journey_level": next_level["coaster"]})
+ return rules
+
+ journey_levels = [
+ {
+ "description": "base",
+ "destinations_reachable": False,
+ "transition_rules": get_transition_rules({"bus": 1, "day_pass": 2, "premium": 3, "coaster": 4}),
+ "boarding_time": {"global": {"penalty": 0, "perception_factor": 1}},
+ "waiting_time": {
+ "effective_headways": params["init_headway"], "headway_fraction": 0.5,
+ "perception_factor": params["init_wait"], "spread_factor": 1.0
+ },
+ "boarding_cost": {
+ "on_lines": {"penalty": "@fare", "perception_factor": params["fare"]},
+ "on_segments": {"penalty": "@coaster_fare_board", "perception_factor": params["coaster_fare_percep"]},
+ },
+ },
+ {
+ "description": "boarded_bus",
+ "destinations_reachable": True,
+ "transition_rules": get_transition_rules({"bus": 2, "day_pass": 2, "premium": 5, "coaster": 5}),
+ "boarding_time": transfer_penalty,
+ "waiting_time": transfer_wait,
+ "boarding_cost": {
+ # xfer from bus fare is on segments so circle lines get free transfer
+ "on_segments": {"penalty": "@xfer_from_bus", "perception_factor": params["fare"]},
+ },
+ },
+ {
+ "description": "day_pass",
+ "destinations_reachable": True,
+ "transition_rules": get_transition_rules({"bus": 2, "day_pass": 2, "premium": 5, "coaster": 5}),
+ "boarding_time": transfer_penalty,
+ "waiting_time": transfer_wait,
+ "boarding_cost": {
+ "on_lines": {"penalty": "@xfer_from_day", "perception_factor": params["fare"]},
+ },
+ },
+ {
+ "description": "boarded_premium",
+ "destinations_reachable": True,
+ "transition_rules": get_transition_rules({"bus": 5, "day_pass": 5, "premium": 5, "coaster": 5}),
+ "boarding_time": transfer_penalty,
+ "waiting_time": transfer_wait,
+ "boarding_cost": {
+ "on_lines": {"penalty": "@xfer_from_premium", "perception_factor": params["fare"]},
+ },
+ },
+ {
+ "description": "boarded_coaster",
+ "destinations_reachable": True,
+ "transition_rules": get_transition_rules({"bus": 5, "day_pass": 5, "premium": 5, "coaster": 5}),
+ "boarding_time": transfer_penalty,
+ "waiting_time": transfer_wait,
+ "boarding_cost": {
+ "on_lines": {"penalty": "@xfer_from_coaster", "perception_factor": params["fare"]},
+ },
+ },
+ {
+ "description": "regional_pass",
+ "destinations_reachable": True,
+ "transition_rules": get_transition_rules({"bus": 5, "day_pass": 5, "premium": 5, "coaster": 5}),
+ "boarding_time": transfer_penalty,
+ "waiting_time": transfer_wait,
+ "boarding_cost": {
+ "on_lines": {"penalty": "@xfer_regional_pass", "perception_factor": params["fare"]},
+ },
+ }
+ ]
+ return journey_levels
+
+ def filter_journey_levels_by_mode(self, modes, journey_levels):
+ # remove rules for unused modes from provided journey_levels
+ # (restrict to provided modes)
+ journey_levels = _copy(journey_levels)
+ for level in journey_levels:
+ rules = level["transition_rules"]
+ rules = [r for r in rules if r["mode"] in modes]
+ level["transition_rules"] = rules
+ # count level transition rules references to find unused levels
+ num_levels = len(journey_levels)
+ level_count = [0] * len(journey_levels)
+
+ def follow_rule(next_level):
+ level_count[next_level] += 1
+ if level_count[next_level] > 1:
+ return
+ for rule in journey_levels[next_level]["transition_rules"]:
+ follow_rule(rule["next_journey_level"])
+
+ follow_rule(0)
+ # remove unreachable levels
+ # and find new index for transition rules for remaining levels
+ level_map = {i:i for i in range(num_levels)}
+ for level_id, count in reversed(list(enumerate(level_count))):
+ if count == 0:
+ for index in range(level_id, num_levels):
+ level_map[index] -= 1
+ del journey_levels[level_id]
+ # re-index remaining journey_levels
+ for level in journey_levels:
+ for rule in level["transition_rules"]:
+ next_level = rule["next_journey_level"]
+ rule["next_journey_level"] = level_map[next_level]
+ return journey_levels
+
+ @_m.logbook_trace("Transit assignment by demand set", save_arguments=True)
+ def run_assignment(self, period, params, network, day_pass_cost, skims_only, num_processors):
+ modeller = _m.Modeller()
+ scenario = self.scenario
+ emmebank = scenario.emmebank
+ assign_transit = modeller.tool(
+ "inro.emme.transit_assignment.extended_transit_assignment")
+
+ walk_modes = ["a", "w", "x"]
+ local_bus_mode = ["b"]
+ premium_modes = ["c", "l", "e", "p", "r", "y", "o"]
+
+ # get the generic all-modes journey levels table
+ journey_levels = self.all_modes_journey_levels(params, network, day_pass_cost)
+ local_bus_journey_levels = self.filter_journey_levels_by_mode(local_bus_mode, journey_levels)
+ premium_modes_journey_levels = self.filter_journey_levels_by_mode(premium_modes, journey_levels)
+ # All modes transfer penalty assignment uses penalty of 15 minutes
+ for level in journey_levels[1:]:
+ level["boarding_time"] = {"global": {"penalty": 15, "perception_factor": 1}}
+
+ base_spec = {
+ "type": "EXTENDED_TRANSIT_ASSIGNMENT",
+ "modes": [],
+ "demand": "",
+ "waiting_time": {
+ "effective_headways": params["init_headway"], "headway_fraction": 0.5,
+ "perception_factor": params["init_wait"], "spread_factor": 1.0
+ },
+ # Fare attributes
+ "boarding_cost": {"global": {"penalty": 0, "perception_factor": 1}},
+ "boarding_time": {"global": {"penalty": 0, "perception_factor": 1}},
+ "in_vehicle_cost": {"penalty": "@coaster_fare_inveh",
+ "perception_factor": params["coaster_fare_percep"]},
+ "in_vehicle_time": {"perception_factor": params["in_vehicle"]},
+ "aux_transit_time": {"perception_factor": params["walk"]},
+ "aux_transit_cost": None,
+ "journey_levels": [],
+ "flow_distribution_between_lines": {"consider_total_impedance": False},
+ "flow_distribution_at_origins": {
+ "fixed_proportions_on_connectors": None,
+ "choices_at_origins": "OPTIMAL_STRATEGY"
+ },
+ "flow_distribution_at_regular_nodes_with_aux_transit_choices": {
+ "choices_at_regular_nodes": "OPTIMAL_STRATEGY"
+ },
+ #"circular_lines": {
+ # "stay": True
+ #},
+ "connector_to_connector_path_prohibition": None,
+ "od_results": {"total_impedance": None},
+ "performance_settings": {"number_of_processors": num_processors}
+ }
+
+ skim_parameters = OrderedDict([
+ ("BUS", {
+ "modes": walk_modes + local_bus_mode,
+ "journey_levels": local_bus_journey_levels
+ }),
+ ("PREM", {
+ "modes": walk_modes + premium_modes,
+ "journey_levels": premium_modes_journey_levels
+ }),
+ ("ALLPEN", {
+ "modes": walk_modes + local_bus_mode + premium_modes,
+ "journey_levels": journey_levels
+ }),
+ ])
+
+ if skims_only:
+ access_modes = ["WLK"]
+ else:
+ access_modes = ["WLK", "PNR", "KNR"]
+ add_volumes = False
+ for a_name in access_modes:
+ for mode_name, parameters in skim_parameters.iteritems():
+ spec = _copy(base_spec)
+ name = "%s_%s%s" % (period, a_name, mode_name)
+ spec["modes"] = parameters["modes"]
+ spec["demand"] = 'mf"%s"' % name
+ spec["journey_levels"] = parameters["journey_levels"]
+ assign_transit(spec, class_name=name, add_volumes=add_volumes, scenario=self.scenario)
+ add_volumes = True
+
+ @_m.logbook_trace("Extract skims", save_arguments=True)
+ def run_skims(self, name, period, params, max_fare, num_processors, network):
+ modeller = _m.Modeller()
+ scenario = self.scenario
+ emmebank = scenario.emmebank
+ matrix_calc = modeller.tool(
+ "inro.emme.matrix_calculation.matrix_calculator")
+ network_calc = modeller.tool(
+ "inro.emme.network_calculation.network_calculator")
+ matrix_results = modeller.tool(
+ "inro.emme.transit_assignment.extended.matrix_results")
+ path_analysis = modeller.tool(
+ "inro.emme.transit_assignment.extended.path_based_analysis")
+ strategy_analysis = modeller.tool(
+ "inro.emme.transit_assignment.extended.strategy_based_analysis")
+
+ class_name = "%s_WLK%s" % (period, name)
+ skim_name = "%s_%s" % (period, name)
+ self.run_skims.logbook_cursor.write(name="Extract skims for %s, using assignment class %s" % (name, class_name))
+
+ with _m.logbook_trace("First and total wait time, number of boardings, fares, total walk time, in-vehicle time"):
+ # First and total wait time, number of boardings, fares, total walk time, in-vehicle time
+ spec = {
+ "type": "EXTENDED_TRANSIT_MATRIX_RESULTS",
+ "actual_first_waiting_times": 'mf"%s_FIRSTWAIT"' % skim_name,
+ "actual_total_waiting_times": 'mf"%s_TOTALWAIT"' % skim_name,
+ "total_impedance": 'mf"%s_GENCOST"' % skim_name,
+ "by_mode_subset": {
+ "modes": [mode.id for mode in network.modes() if mode.type == "TRANSIT" or mode.type == "AUX_TRANSIT"],
+ "avg_boardings": 'mf"%s_XFERS"' % skim_name,
+ "actual_in_vehicle_times": 'mf"%s_TOTALIVTT"' % skim_name,
+ "actual_in_vehicle_costs": 'mf"TEMP_IN_VEHICLE_COST"',
+ "actual_total_boarding_costs": 'mf"%s_FARE"' % skim_name,
+ "perceived_total_boarding_costs": 'mf"TEMP_PERCEIVED_FARE"',
+ "actual_aux_transit_times": 'mf"%s_TOTALWALK"' % skim_name,
+ },
+ }
+ matrix_results(spec, class_name=class_name, scenario=scenario, num_processors=num_processors)
+ with _m.logbook_trace("Distance and in-vehicle time by mode"):
+ mode_combinations = [
+ ("BUS", ["b"], ["IVTT", "DIST"]),
+ ("LRT", ["l"], ["IVTT", "DIST"]),
+ ("CMR", ["c"], ["IVTT", "DIST"]),
+ ("EXP", ["e", "p"], ["IVTT", "DIST"]),
+ ("BRT", ["r", "y"], ["DIST"]),
+ ("BRTRED", ["r"], ["IVTT"]),
+ ("BRTYEL", ["y"], ["IVTT"]),
+ ("TIER1", ["o"], ["IVTT", "DIST"]),
+ ]
+ for mode_name, modes, skim_types in mode_combinations:
+ dist = 'mf"%s_%sDIST"' % (skim_name, mode_name) if "DIST" in skim_types else None
+ ivtt = 'mf"%s_%sIVTT"' % (skim_name, mode_name) if "IVTT" in skim_types else None
+ spec = {
+ "type": "EXTENDED_TRANSIT_MATRIX_RESULTS",
+ "by_mode_subset": {
+ "modes": modes,
+ "distance": dist,
+ "actual_in_vehicle_times": ivtt,
+ },
+ }
+ matrix_results(spec, class_name=class_name, scenario=scenario, num_processors=num_processors)
+ # Sum total distance
+ spec = {
+ "type": "MATRIX_CALCULATION",
+ "constraint": None,
+ "result": 'mf"%s_TOTDIST"' % skim_name,
+ "expression": ('mf"{0}_BUSDIST" + mf"{0}_LRTDIST" + mf"{0}_CMRDIST"'
+ ' + mf"{0}_EXPDIST" + mf"{0}_BRTDIST" + mf"{0}_TIER1DIST"').format(skim_name),
+ }
+ matrix_calc(spec, scenario=scenario, num_processors=num_processors)
+
+ # convert number of boardings to number of transfers
+ # and subtract transfers to the same line at layover points
+ with _m.logbook_trace("Number of transfers and total fare"):
+ spec = {
+ "trip_components": {"boarding": "@layover_board"},
+ "sub_path_combination_operator": "+",
+ "sub_strategy_combination_operator": "average",
+ "selected_demand_and_transit_volumes": {
+ "sub_strategies_to_retain": "ALL",
+ "selection_threshold": {"lower": -999999, "upper": 999999}
+ },
+ "results": {
+ "strategy_values": 'TEMP_LAYOVER_BOARD',
+ },
+ "type": "EXTENDED_TRANSIT_STRATEGY_ANALYSIS"
+ }
+ strategy_analysis(spec, class_name=class_name, scenario=scenario, num_processors=num_processors)
+ spec = {
+ "type": "MATRIX_CALCULATION",
+ "constraint":{
+ "by_value": {
+ "od_values": 'mf"%s_XFERS"' % skim_name,
+ "interval_min": 1, "interval_max": 9999999,
+ "condition": "INCLUDE"},
+ },
+ "result": 'mf"%s_XFERS"' % skim_name,
+ "expression": '(%s_XFERS - 1 - TEMP_LAYOVER_BOARD).max.0' % skim_name,
+ }
+ matrix_calc(spec, scenario=scenario, num_processors=num_processors)
+
+ # sum in-vehicle cost and boarding cost to get the fare paid
+ spec = {
+ "type": "MATRIX_CALCULATION",
+ "constraint": None,
+ "result": 'mf"%s_FARE"' % skim_name,
+ "expression": '(%s_FARE + TEMP_IN_VEHICLE_COST).min.%s' % (skim_name, max_fare),
+ }
+ matrix_calc(spec, scenario=scenario, num_processors=num_processors)
+
+ # walk access time - get distance and convert to time with 3 miles / hr
+ with _m.logbook_trace("Walk time access, egress and xfer"):
+ path_spec = {
+ "portion_of_path": "ORIGIN_TO_INITIAL_BOARDING",
+ "trip_components": {"aux_transit": "length",},
+ "path_operator": "+",
+ "path_selection_threshold": {"lower": 0, "upper": 999999 },
+ "path_to_od_aggregation": {
+ "operator": "average",
+ "aggregated_path_values": 'mf"%s_ACCWALK"' % skim_name,
+ },
+ "type": "EXTENDED_TRANSIT_PATH_ANALYSIS"
+ }
+ path_analysis(path_spec, class_name=class_name, scenario=scenario, num_processors=num_processors)
+
+ # walk egress time - get distance and convert to time with 3 miles/ hr
+ path_spec = {
+ "portion_of_path": "FINAL_ALIGHTING_TO_DESTINATION",
+ "trip_components": {"aux_transit": "length",},
+ "path_operator": "+",
+ "path_selection_threshold": {"lower": 0, "upper": 999999 },
+ "path_to_od_aggregation": {
+ "operator": "average",
+ "aggregated_path_values": 'mf"%s_EGRWALK"' % skim_name
+ },
+ "type": "EXTENDED_TRANSIT_PATH_ANALYSIS"
+ }
+ path_analysis(path_spec, class_name=class_name, scenario=scenario, num_processors=num_processors)
+
+ spec_list = [
+ { # walk access time - convert to time with 3 miles/ hr
+ "type": "MATRIX_CALCULATION",
+ "constraint": None,
+ "result": 'mf"%s_ACCWALK"' % skim_name,
+ "expression": '60.0 * %s_ACCWALK / 3.0' % skim_name,
+ },
+ { # walk egress time - convert to time with 3 miles/ hr
+ "type": "MATRIX_CALCULATION",
+ "constraint": None,
+ "result": 'mf"%s_EGRWALK"' % skim_name,
+ "expression": '60.0 * %s_EGRWALK / 3.0' % skim_name,
+ },
+ { # transfer walk time = total - access - egress
+ "type": "MATRIX_CALCULATION",
+ "constraint": None,
+ "result": 'mf"%s_XFERWALK"' % skim_name,
+ "expression": '({name}_TOTALWALK - {name}_ACCWALK - {name}_EGRWALK).max.0'.format(name=skim_name),
+ }]
+ matrix_calc(spec_list, scenario=scenario, num_processors=num_processors)
+
+ # transfer wait time
+ with _m.logbook_trace("Wait time - xfer"):
+ spec = {
+ "type": "MATRIX_CALCULATION",
+ "constraint":{
+ "by_value": {
+ "od_values": 'mf"%s_TOTALWAIT"' % skim_name,
+ "interval_min": 0, "interval_max": 9999999,
+ "condition": "INCLUDE"},
+ },
+ "result": 'mf"%s_XFERWAIT"' % skim_name,
+ "expression": '({name}_TOTALWAIT - {name}_FIRSTWAIT).max.0'.format(name=skim_name),
+ }
+ matrix_calc(spec, scenario=scenario, num_processors=num_processors)
+
+ with _m.logbook_trace("Calculate dwell time"):
+ with gen_utils.temp_attrs(scenario, "TRANSIT_SEGMENT", ["@dwt_for_analysis"]):
+ values = scenario.get_attribute_values("TRANSIT_SEGMENT", ["dwell_time"])
+ scenario.set_attribute_values("TRANSIT_SEGMENT", ["@dwt_for_analysis"], values)
+
+ spec = {
+ "trip_components": {"in_vehicle": "@dwt_for_analysis"},
+ "sub_path_combination_operator": "+",
+ "sub_strategy_combination_operator": "average",
+ "selected_demand_and_transit_volumes": {
+ "sub_strategies_to_retain": "ALL",
+ "selection_threshold": {"lower": -999999, "upper": 999999}
+ },
+ "results": {
+ "strategy_values": 'mf"%s_DWELLTIME"' % skim_name,
+ },
+ "type": "EXTENDED_TRANSIT_STRATEGY_ANALYSIS"
+ }
+ strategy_analysis(spec, class_name=class_name, scenario=scenario, num_processors=num_processors)
+
+ expr_params = _copy(params)
+ expr_params["xfers"] = 15.0
+ expr_params["name"] = skim_name
+ spec = {
+ "type": "MATRIX_CALCULATION",
+ "constraint": None,
+ "result": 'mf"%s_GENCOST"' % skim_name,
+ "expression": ("{xfer_wait} * {name}_TOTALWAIT "
+ "- ({xfer_wait} - {init_wait}) * {name}_FIRSTWAIT "
+ "+ 1.0 * {name}_TOTALIVTT + 0.5 * {name}_BUSIVTT"
+ "+ (1 / {vot}) * (TEMP_PERCEIVED_FARE + {coaster_fare_percep} * TEMP_IN_VEHICLE_COST)"
+ "+ {xfers} *({name}_XFERS.max.0) "
+ "+ {walk} * {name}_TOTALWALK").format(**expr_params)
+ }
+ matrix_calc(spec, scenario=scenario, num_processors=num_processors)
+ return
+
+ def mask_allpen(self, period):
+ # Reset skims to 0 if not both local and premium
+ skims = [
+ "FIRSTWAIT", "TOTALWAIT", "DWELLTIME", "BUSIVTT", "XFERS", "TOTALWALK",
+ "LRTIVTT", "CMRIVTT", "EXPIVTT", "LTDEXPIVTT", "BRTREDIVTT", "BRTYELIVTT", "TIER1IVTT",
+ "GENCOST", "XFERWAIT", "FARE",
+ "ACCWALK", "XFERWALK", "EGRWALK", "TOTALIVTT",
+ "BUSDIST", "LRTDIST", "CMRDIST", "EXPDIST", "BRTDIST" , "TIER1DIST"]
+ localivt_skim = self.get_matrix_data(period + "_ALLPEN_BUSIVTT")
+ totalivt_skim = self.get_matrix_data(period + "_ALLPEN_TOTALIVTT")
+ has_premium = numpy.greater((totalivt_skim - localivt_skim), 0)
+ has_both = numpy.greater(localivt_skim, 0) * has_premium
+ for skim in skims:
+ mat_name = period + "_ALLPEN_" + skim
+ data = self.get_matrix_data(mat_name)
+ self.set_matrix_data(mat_name, data * has_both)
+
+ def get_matrix_data(self, name):
+ data = self._matrix_cache.get(name)
+ if data is None:
+ matrix = self.scenario.emmebank.matrix(name)
+ data = matrix.get_numpy_data(self.scenario)
+ self._matrix_cache[name] = data
+ return data
+
+ def set_matrix_data(self, name, data):
+ matrix = self.scenario.emmebank.matrix(name)
+ self._matrix_cache[name] = data
+ matrix.set_numpy_data(data, self.scenario)
+
+ def report(self, period):
+ text = ['
']
+ init_matrices = _m.Modeller().tool("sandag.initialize.initialize_matrices")
+ matrices = init_matrices.get_matrix_names("transit_skims", [period], self.scenario)
+ num_zones = len(self.scenario.zone_numbers)
+ num_cells = num_zones ** 2
+ text.append(
+ "Number of zones: %s. Number of O-D pairs: %s. "
+ "Values outside -9999999, 9999999 are masked in summaries. " % (num_zones, num_cells))
+ text.append("%-25s %9s %9s %9s %13s %9s" % ("name", "min", "max", "mean", "sum", "mask num"))
+ for name in matrices:
+ data = self.get_matrix_data(name)
+ data = numpy.ma.masked_outside(data, -9999999, 9999999, copy=False)
+ stats = (name, data.min(), data.max(), data.mean(), data.sum(), num_cells-data.count())
+ text.append("%-25s %9.4g %9.4g %9.4g %13.7g %9d" % stats)
+ text.append("
")
+ title = 'Transit impedance summary for period %s' % period
+ report = _m.PageBuilder(title)
+ report.wrap_html('Matrix details', " ".join(text))
+ _m.logbook_write(title, report.render())
diff --git a/sandag_abm/src/main/emme/toolbox/assignment/transit_select_analysis.py b/sandag_abm/src/main/emme/toolbox/assignment/transit_select_analysis.py
new file mode 100644
index 0000000..8f4b387
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/assignment/transit_select_analysis.py
@@ -0,0 +1,217 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// transit_select_analysis.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# This tool runs select type network analysis on the results of one or more
+# transit assignments. It is run as a post-process tool after the assignment
+# tools are complete, using the saved transit strategies. Any number of
+# analyses can be run without needing to rerun the assignments.
+#
+#
+# Inputs:
+# Trip components for selection: pick one or more extra attributes which
+# identify the network elements of interest by trip component:
+# in_vehicle
+# aux_transit
+# initial_boarding
+# transfer_boarding
+# transfer_alighting
+# final_alighting
+# Result suffix: the suffix to use in the naming of per-class result
+# attributes and matrices, up to 6 characters.
+# Threshold: the minimum number of elements which must be encountered
+# for the path selection.
+# Scenario: the scenario to analyse.
+#
+#
+# Script example:
+"""
+import inro.modeller as _m
+import os
+modeller = _m.Modeller()
+desktop = modeller.desktop
+
+select_link = modeller.tool("sandag.assignment.transit_select_link")
+
+project_dir = os.path.dirname(desktop.project_path())
+main_directory = os.path.dirname(project_dir)
+
+transit_emmebank = os.path.join(project_dir, "Database_transit", "emmebank")
+
+periods = ["EA", "AM", "MD", "PM", "EV"]
+period_ids = list(enumerate(periods, start=int(scenario_id) + 1))
+
+suffix = "LRT"
+threshold = 1
+num_processors = "MAX-1"
+selection = {
+ "in_vehicle": None,
+ "aux_transit": None,
+ "initial_boarding": "@selected_line",
+ "transfer_boarding": None,
+ "transfer_alighting": None,
+ "final_alighting": None,
+}
+
+for number, period in period_ids:
+ scenario = transit_emmebank.scenario(number)
+ select_link(selection, suffix, threshold, scenario, num_processors)
+"""
+
+TOOLBOX_ORDER = 25
+
+
+import inro.modeller as _m
+import inro.emme.core.exception as _except
+import traceback as _traceback
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+
+
+class TransitSelectAnalysis(_m.Tool(), gen_utils.Snapshot):
+
+ in_vehicle = _m.Attribute(_m.InstanceType)
+ aux_transit = _m.Attribute(_m.InstanceType)
+ initial_boarding = _m.Attribute(_m.InstanceType)
+ transfer_boarding = _m.Attribute(_m.InstanceType)
+ transfer_alighting = _m.Attribute(_m.InstanceType)
+ final_alighting = _m.Attribute(_m.InstanceType)
+
+ suffix = _m.Attribute(str)
+ threshold = _m.Attribute(int)
+ num_processors = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ self.threshold = 1
+ self.num_processors = "MAX-1"
+ self.attributes = [
+ "in_vehicle", "aux_transit", "initial_boarding", "transfer_boarding",
+ "transfer_alighting", "final_alighting", "suffix", "threshold",
+ "num_processors"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Transit select analysis"
+ pb.description = """
+ Run select type of analysis (select link, select node, select line ...) on
+ the results of the transit assignment(s) using a path-based analysis.
+ Can be used after a transit assignment has been completed."""
+ pb.branding_text = "- SANDAG - Assignment"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ with pb.section("Trip components for selection:"):
+ domains = ["LINK", "NODE", "TRANSIT_SEGMENT", "TRANSIT_LINE"]
+ pb.add_select_extra_attribute("in_vehicle", title="In-vehicle", filter=domains, allow_none=True)
+ pb.add_select_extra_attribute("aux_transit", title="Auxilary transit", filter=domains, allow_none=True)
+ pb.add_select_extra_attribute("initial_boarding", title="Initial boarding", filter=domains, allow_none=True)
+ pb.add_select_extra_attribute("transfer_boarding", title="Transfer boarding", filter=domains, allow_none=True)
+ pb.add_select_extra_attribute("transfer_alighting", title="Transfer alighting", filter=domains, allow_none=True)
+ pb.add_select_extra_attribute("final_alighting", title="Final alighting", filter=domains, allow_none=True)
+
+ pb.add_text_box("suffix", title="Suffix for results (matrices and attributes):", size=6,
+ note="The suffix to use in the naming of per-class result attributes and matrices, up to 6 characters. "
+ "Should be unique (existing attributes / matrices will be overwritten).")
+ pb.add_text_box("threshold", title="Threshold for selection:",
+ note="The minimum number of links which must be encountered for the path selection. "
+ "The default value of 1 indicates an 'any' link selection.")
+ dem_utils.add_select_processors("num_processors", pb, self)
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ selection = {
+ "in_vehicle": self.in_vehicle,
+ "aux_transit": self.aux_transit,
+ "initial_boarding": self.initial_boarding,
+ "transfer_boarding": self.transfer_boarding,
+ "transfer_alighting": self.transfer_alighting,
+ "final_alighting": self.final_alighting,
+ }
+ scenario = _m.Modeller().scenario
+ results = self(selection, self.suffix, self.threshold, scenario, self.num_processors)
+ run_msg = "Traffic assignment completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ def __call__(self, selection, suffix, threshold, scenario, num_processors):
+ attrs = {
+ "selection": selection,
+ "suffix": suffix,
+ "threshold": threshold,
+ "scenario": scenario.id,
+ "num_processors": num_processors
+ }
+ with _m.logbook_trace("Transit select analysis %s" % suffix, attributes=attrs):
+ attrs.update(dict((k,v) for k,v in attrs["selection"].iteritems()))
+ gen_utils.log_snapshot("Transit select analysis", str(self), attrs)
+
+ path_analysis = _m.Modeller().tool(
+ "inro.emme.transit_assignment.extended.path_based_analysis")
+ create_attribute = _m.Modeller().tool(
+ "inro.emme.data.extra_attribute.create_extra_attribute")
+
+ spec = {
+ "portion_of_path": "COMPLETE",
+ "trip_components": selection,
+ "path_operator": "+",
+ "path_selection_threshold": {"lower": threshold, "upper": 999999},
+ "path_to_od_aggregation": None,
+ "constraint": None,
+ "analyzed_demand": None,
+ "results_from_retained_paths": None,
+ "path_to_od_statistics": None,
+ "path_details": None,
+ "type": "EXTENDED_TRANSIT_PATH_ANALYSIS"
+ }
+ strategies = scenario.transit_strategies
+ classes = [x.name for x in strategies.strat_files()]
+ if not classes:
+ raise Exception("Results for multi-class transit assignment not available")
+
+ for class_name in classes:
+ with _m.logbook_trace("Analysis for class %s" % class_name):
+ seldem_name = "SELDEM_%s_%s" % (class_name, suffix)
+ desc = "Selected demand for %s %s" % (class_name, suffix)
+ seldem = dem_utils.create_full_matrix(seldem_name, desc, scenario=scenario)
+ results_from_retained_paths = {
+ "paths_to_retain": "SELECTED",
+ "demand": seldem.named_id,
+ }
+ attributes = [
+ ("transit_volumes", "TRANSIT_SEGMENT", "@seltr_%s_%s", "%s '%s' sel segment flow"),
+ ("aux_transit_volumes", "LINK", "@selax_%s_%s", "%s '%s' sel aux transit flow"),
+ ("total_boardings", "TRANSIT_SEGMENT", "@selbr_%s_%s", "%s '%s' sel boardings"),
+ ("total_alightings", "TRANSIT_SEGMENT", "@selal_%s_%s", "%s '%s' sel alightings"),
+ ]
+ mode_name = class_name.lower()[3:]
+ for key, domain, name, desc in attributes:
+ attr = create_attribute(domain, name % (mode_name, suffix), desc % (class_name, suffix),
+ 0, overwrite=True, scenario=scenario)
+ results_from_retained_paths[key] = attr.id
+ spec["results_from_retained_paths"] = results_from_retained_paths
+ path_analysis(spec, class_name=class_name, scenario=scenario, num_processors=num_processors)
+
+ @_m.method(return_type=unicode)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
diff --git a/sandag_abm/src/main/emme/toolbox/build_toolbox.py b/sandag_abm/src/main/emme/toolbox/build_toolbox.py
new file mode 100644
index 0000000..1a9d1b7
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/build_toolbox.py
@@ -0,0 +1,411 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// build_toolbox.py ///
+#//// ///
+#//// Generates an mtbx (Emme Modeller Toolbox), based on the structure ///
+#//// of the Python source tree. ///
+#//// ///
+#//// Usage: build_toolbox.py [-s source_folder] [-p toolbox_path] ///
+#//// ///
+#//// [-p toolbox_path]: Specifies the name of the MTBX file. ///
+#//// If omitted,defaults to "sandag_toolbox.mtbx" ///
+#//// [-s source_folder]: The location of the source code folder. ///
+#//// If omitted, defaults to the working directory. ///
+#//// [-l] [--link] Build the toolbox with references to the files ///
+#//// Use with developing or debugging scripts, changes to the ///
+#//// scripts can be used with a "Refresh" of the toolbox ///
+#//// [-c] [--consolidate] Build the toolbox with copies of the ///
+#//// scripts included inside the toolbox. ///
+#//// Use to have a "frozen" version of the scripts with node ///
+#//// changes available. ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Example:
+# python "T:\projects\sr13\develop\emme_conversion\git\sandag_abm\ABM_EMME\src\main\emme\toolbox\build_toolbox.py" --link
+# -p "T:\projects\sr14\abm2_test\abm_runs\14_2_0\2035D_Hyperloop\emme_project\Scripts\sandag_toolbox.mtbx"
+# -s T:\projects\sr13\develop\emme_conversion\git\sandag_abm\ABM_EMME\src\main\emme\toolbox
+
+
+import os
+import re
+from datetime import datetime
+import subprocess
+import sqlite3.dbapi2 as sqllib
+import base64
+import pickle
+
+
+def check_namespace(ns):
+ if not re.match("^[a-zA-Z][a-zA-Z0-9_]*$", ns):
+ raise Exception("Namespace '%s' is invalid" % ns)
+
+
+def get_emme_version():
+ emme_process = subprocess.Popen(['Emme', '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = emme_process.communicate()[0]
+ return output.split(',')[0]
+
+
+def usc_transform(value):
+ try:
+ return unicode(value)
+ except Exception:
+ return unicode(str(value), encoding="raw-unicode-escape")
+
+
+class BaseNode(object):
+ def __init__(self, namespace, title):
+ check_namespace(namespace)
+ self.namespace = namespace
+ self.title = title
+ self.element_id = None
+ self.parent = None
+ self.root = None
+ self.children = []
+
+ def add_folder(self, namespace):
+ node = FolderNode(namespace, parent=self)
+ self.children.append(node)
+ return node
+
+ def add_tool(self, script_path, namespace):
+ try:
+ node = ToolNode(namespace, script_path, parent=self)
+ self.children.append(node)
+ with open(script_path, 'r') as f:
+ for line in f:
+ if line.startswith("TOOLBOX_ORDER"):
+ node.order = int(line.split("=")[1])
+ if line.startswith("TOOLBOX_TITLE"):
+ title = line.split("=")[1].strip()
+ node.title = title[1:-1] # exclude first and last quotes
+ except Exception, e:
+ print script_path, namespace
+ print type(e), str(e)
+ return None
+ return node
+
+ def consolidate(self):
+ for child in self.children:
+ child.consolidate()
+
+ def set_toolbox_order(self):
+ self.element_id = self.root.next_id()
+ self.children.sort(key=lambda x: x.order)
+ for child in self.children:
+ child.set_toolbox_order()
+
+
+class ElementTree(BaseNode):
+
+ def __init__(self, namespace, title):
+ super(ElementTree, self).__init__(namespace, title)
+ self.next_element_id = 0
+ self.begin = str(datetime.now())
+ self.version = "Emme %s" % get_emme_version()
+ self.root = self
+
+ def next_id(self):
+ self.next_element_id += 1
+ return self.next_element_id
+
+
+class FolderNode(BaseNode):
+
+ def __init__(self, namespace, parent):
+ title = namespace.replace("_", " ").capitalize()
+ super(FolderNode, self).__init__(namespace, title)
+ self.parent = parent
+ self.root = parent.root
+ self.element_id = None
+
+ @property
+ def order(self):
+ child_order = [child.order for child in self.children if child.order is not None]
+ if child_order:
+ return min(child_order)
+ return None
+
+
+class ToolNode():
+
+ def __init__(self, namespace, script_path, parent):
+ check_namespace(namespace)
+ self.namespace = namespace
+ self.title = namespace.replace("_", " ").capitalize()
+
+ self.root = parent.root
+ self.parent = parent
+ self.element_id = None
+ self.order = None
+
+ self.script = script_path
+ self.extension = '.py'
+ self.code = ''
+
+ def consolidate(self):
+ with open(self.script, 'r') as f:
+ code = f.read()
+ self.code = usc_transform(base64.b64encode(pickle.dumps(code)))
+ self.script = ''
+
+ def set_toolbox_order(self):
+ self.element_id = self.root.next_id()
+
+class MTBXDatabase():
+ FORMAT_MAGIC_NUMBER = 'B8C224F6_7C94_4E6F_8C2C_5CC06F145271'
+ TOOLBOX_MAGIC_NUMBER = 'TOOLBOX_C6809332_CD61_45B3_9060_411D825669F8'
+ CATEGORY_MAGIC_NUMBER = 'CATEGORY_984876A0_3350_4374_B47C_6D9C5A47BBC8'
+ TOOL_MAGIC_NUMBER = 'TOOL_1AC06B56_6A54_431A_9515_0BF77013646F'
+
+ def __init__(self, filepath, title):
+ if os.path.exists(filepath):
+ os.remove(filepath)
+
+ self.db = sqllib.connect(filepath)
+
+ self._create_attribute_table()
+ self._create_element_table()
+ self._create_document_table()
+ self._create_triggers()
+
+ self._initialize_documents_table(title)
+
+ def _create_attribute_table(self):
+ sql = """CREATE TABLE attributes(
+ element_id INTEGER REFERENCES elements(element_id),
+ name VARCHAR,
+ value VARCHAR,
+ PRIMARY KEY(element_id, name));"""
+
+ self.db.execute(sql)
+
+ def _create_element_table(self):
+ sql = """CREATE TABLE elements(
+ element_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ parent_id INTEGER REFERENCES elements(element_id),
+ document_id INTEGER REFERENCES documents(document_id),
+ tag VARCHAR,
+ text VARCHAR,
+ tail VARCHAR);"""
+
+ self.db.execute(sql)
+
+ def _create_document_table(self):
+ sql = """CREATE TABLE documents(
+ document_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ title VARCHAR);"""
+
+ self.db.execute(sql)
+
+ def _create_triggers(self):
+ sql = """CREATE TRIGGER documents_delete
+ BEFORE DELETE on documents
+ FOR EACH ROW BEGIN
+ DELETE FROM elements WHERE document_id = OLD.document_id;
+ END"""
+
+ self.db.execute(sql)
+
+ sql = """CREATE TRIGGER elements_delete
+ BEFORE DELETE on elements
+ FOR EACH ROW BEGIN
+ DELETE FROM attributes WHERE element_id = OLD.element_id;
+ END"""
+
+ self.db.execute(sql)
+
+ def _initialize_documents_table(self, title):
+ sql = """INSERT INTO documents (document_id, title)
+ VALUES (1, '%s');""" % title
+
+ self.db.execute(sql)
+ self.db.commit()
+
+ def populate_tables_from_tree(self, tree):
+
+ #Insert into the elements table
+ column_string = "element_id, document_id, tag, text, tail"
+ value_string = "{id}, 1, '{title}', '', ''".format(
+ id=tree.element_id, title=tree.title)
+ sql = """INSERT INTO elements (%s)
+ VALUES (%s);""" % (column_string, value_string)
+ self.db.execute(sql)
+
+ #Insert into the attributes table
+ column_string = "element_id, name, value"
+ atts = {'major': '',
+ 'format': MTBXDatabase.FORMAT_MAGIC_NUMBER,
+ 'begin': tree.begin,
+ 'version': tree.version,
+ 'maintenance': '',
+ 'minor': '',
+ 'name': tree.title,
+ 'description': '',
+ 'namespace': tree.namespace,
+ MTBXDatabase.TOOLBOX_MAGIC_NUMBER: 'True'}
+ for key, val in atts.iteritems():
+ value_string = "{id}, '{name}', '{value}'".format(
+ id=tree.element_id, name=key, value=val)
+ sql = """INSERT INTO attributes (%s)
+ VALUES (%s);""" % (column_string, value_string)
+ self.db.execute(sql)
+
+ self.db.commit()
+
+ #Handle children nodes
+ for child in tree.children:
+ if isinstance(child, ToolNode):
+ self._insert_tool(child)
+ else:
+ self._insert_folder(child)
+
+ def _insert_folder(self, node):
+ #Insert into the elements table
+ column_string = "element_id, parent_id, document_id, tag, text, tail"
+ value_string = "{id}, {parent}, 1, '{title}', '', ''".format(
+ id=node.element_id, parent=node.parent.element_id, title=node.title)
+ sql = """INSERT INTO elements (%s)
+ VALUES (%s);""" % (column_string, value_string)
+ self.db.execute(sql)
+
+ #Insert into the attributes table
+ column_string = "element_id, name, value"
+ atts = {'namespace': node.namespace,
+ 'description': '',
+ 'name': node.title,
+ 'children': [c.element_id for c in node.children],
+ MTBXDatabase.CATEGORY_MAGIC_NUMBER: 'True'}
+ for key, val in atts.iteritems():
+ value_string = "{id}, '{name}', '{value}'".format(
+ id=node.element_id, name=key, value=val)
+ sql = """INSERT INTO attributes (%s)
+ VALUES (%s);""" % (column_string, value_string)
+ self.db.execute(sql)
+
+ self.db.commit()
+
+ #Handle children nodes
+ for child in node.children:
+ if isinstance(child, ToolNode):
+ self._insert_tool(child)
+ else:
+ self._insert_folder(child)
+
+ def _insert_tool(self, node):
+ #Insert into the elements table
+ column_string = "element_id, parent_id, document_id, tag, text, tail"
+ value_string = "{id}, {parent}, 1, '{title}', '', ''".format(
+ id=node.element_id, parent=node.parent.element_id, title=node.title)
+
+ sql = """INSERT INTO elements (%s)
+ VALUES (%s);""" % (column_string, value_string)
+ self.db.execute(sql)
+
+ #Insert into the attributes table
+ column_string = "element_id, name, value"
+ atts = {'code': node.code,
+ 'description': '',
+ 'script': node.script,
+ 'namespace': node.namespace,
+ 'python_suffix': node.extension,
+ 'name': node.title,
+ MTBXDatabase.TOOL_MAGIC_NUMBER: 'True'}
+ for key, val in atts.iteritems():
+ value_string = "{id}, '{name}', '{value!s}'".format(
+ id=node.element_id, name=key, value=val)
+ sql = """INSERT INTO attributes (%s)
+ VALUES (?, ?, ?);""" % column_string
+ self.db.execute(sql, (node.element_id, key, val))
+
+ self.db.commit()
+
+
+def build_toolbox(toolbox_file, source_folder, title, namespace, consolidate):
+ print "------------------------"
+ print " Build Toolbox Utility"
+ print "------------------------"
+ print ""
+ print "toolbox: %s" % toolbox_file
+ print "source folder: %s" % source_folder
+ print "title: %s" % title
+ print "namespace: %s" % namespace
+ print ""
+
+ print "Loading toolbox structure"
+ tree = ElementTree(namespace, title)
+ explore_source_folder(source_folder, tree)
+ tree.set_toolbox_order()
+ print "Done. Found %s elements." % (tree.next_element_id)
+ if consolidate:
+ print "Consolidating code..."
+ tree.consolidate()
+ print "Consolidate done"
+
+ print ""
+ print "Building MTBX file..."
+ mtbx = MTBXDatabase(toolbox_file, title)
+ mtbx.populate_tables_from_tree(tree)
+ print "Build MTBX file done."
+
+
+def explore_source_folder(root_folder_path, parent_node):
+ folders = []
+ files = []
+ for item in os.listdir(root_folder_path):
+ itempath = os.path.join(root_folder_path, item)
+ if os.path.isfile(itempath):
+ name, extension = os.path.splitext(item)
+ if extension != '.py':
+ continue # skip non-Python files
+ if os.path.normpath(itempath) == os.path.normpath(os.path.abspath(__file__)):
+ continue # skip this file
+ files.append((name, extension))
+ else:
+ folders.append(item)
+
+ for foldername in folders:
+ folderpath = os.path.join(root_folder_path, foldername)
+ folder_node = parent_node.add_folder(namespace=foldername)
+ explore_source_folder(folderpath, folder_node)
+
+ for filename, ext in files:
+ script_path = os.path.join(root_folder_path, filename + ext)
+ parent_node.add_tool(script_path, namespace=filename)
+
+
+if __name__ == "__main__":
+ '''
+ Usage: build_toolbox.py [-p toolbox_path] [-s source_folder] [-l] [-c]
+ '''
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-s', '--src', help= "Path to the source code folder. Default is the working folder.")
+ parser.add_argument('-p', '--path', help= "Output file path. Default is 'sandag_toolbox.mtbx' in the source code folder.")
+ parser.add_argument('-l', '--link', help= "Link the python source files from their current location (instead of consolidate (compile) the toolbox).", action= 'store_true')
+ parser.add_argument('-c', '--consolidate', help= "Consolidate (compile) the toolbox (default option).", action= 'store_true')
+
+ args = parser.parse_args()
+
+ source_folder = args.src or os.path.dirname(os.path.abspath(__file__))
+ folder_name = os.path.split(source_folder)[1]
+ toolbox_file = args.path or "sandag_toolbox.mtbx"
+ title = "SANDAG toolbox"
+ namespace = "sandag"
+ consolidate = args.consolidate
+ link = args.link
+ if consolidate and link:
+ raise Exception("-l and -c (--link and --consolidate) are mutually exclusive options")
+ if not consolidate and not link:
+ consolidate = True # default if neither is specified
+
+ build_toolbox(toolbox_file, source_folder, title, namespace, consolidate)
diff --git a/sandag_abm/src/main/emme/toolbox/diagnostic/mode_choice_diagnostic.py b/sandag_abm/src/main/emme/toolbox/diagnostic/mode_choice_diagnostic.py
new file mode 100644
index 0000000..f9f1ab1
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/diagnostic/mode_choice_diagnostic.py
@@ -0,0 +1,669 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright RSG, 2019-2020. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// import/mode_choice_diagnostic.py ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Diagnostic tool for the SANDAG activity-based travel model mode choice results.
+# This script first generates synthetic population files for target markets.
+# Users may input target market parameters via the "syn_pop_attributes.yaml" file.
+# Users must additionally input origin and destination MAZs (i.e. MGRAs) via the
+# "origin_mgra.csv" and "destination_mgra.csv" files.
+#
+# Once all synthetic population files have been created, the script creates a copy of
+# the "sandag_abm.properties" file and modifies specific property parameters so that
+# it is compatible with a the mode choice diagnostic tool. The modified properties
+# file is renamed as "sandag_abm_mcd.properties"
+#
+# Finally, the mode choice diagnostic tool is run via "runSandagAbm_MCDiagnostic.cmd"
+# The mode choice diagnostic tool uses the synthetic population files as inputs and
+# outputs a tour file with utilities and probabilities for each tour mode.
+#
+# Files referenced:
+# input\mcd\destination_mgra.csv
+# input\mcd\origin_mgra.csv
+# input\mcd\syn_pop_attributes.yaml
+# output\mcd\mcd_households.csv
+# output\mcd\mcd_persons.csv
+# output\mcd\mcd_output_households.csv
+# output\mcd\mcd_output_persons.csv
+# output\mcd\mcd_work_location.csv
+# output\mcd\mcd_tour_file.csv
+# conf\sandag_abm.properties
+# bin\runSandagAbm_MCDiagnostic.cmd
+
+import inro.modeller as _m
+
+import pandas as pd
+import collections, os
+import shutil as _shutil
+import yaml
+import warnings
+import traceback as _traceback
+import tempfile as _tempfile
+import subprocess as _subprocess
+
+warnings.filterwarnings("ignore")
+
+_join = os.path.join
+_dir = os.path.dirname
+
+class mode_choice_diagnostic(_m.Tool()):
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ project_dir = _dir(_m.Modeller().desktop.project.path)
+ self.main_directory = _dir(project_dir)
+ self.properties_path = _join(_dir(project_dir), "conf")
+ self.mcd_out_path = _join(_dir(project_dir), "output", "mcd")
+ self.syn_pop_attributes_path = _join(_dir(project_dir), "input", "mcd", "syn_pop_attributes.yaml")
+ self.origin_mgra_path = _join(_dir(project_dir), "input", "mcd", "origin_mgra.csv")
+ self.destination_mgra_path = _join(_dir(project_dir), "input", "mcd", "destination_mgra.csv")
+ self.household_df = pd.DataFrame()
+ self.household_out_df = pd.DataFrame()
+ self.person_df = pd.DataFrame()
+ self.person_out_df = pd.DataFrame()
+ self.work_location_df = pd.DataFrame()
+ self.tour_df = pd.DataFrame()
+ self.household_attributes = {}
+ self.person_attributes = {}
+ self.tour_attributes = {}
+ self._log_level = "ENABLED"
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Mode Choice Diagnostic Tool"
+ pb.description = """
+ Diagnostic tool for the activity-based travel model mode choice results.
+
+
+ This tool first generates synthetic population files for specified target markets.
+ Users may edit target market attributes via a configuration file.
+ Users may additionally select origin and destination MAZs (i.e. MGRAs) of interest via
+ input CSV files.
+ The configuration file and MAZ selection CSV files are read from the following locations:
+
+
input\mcd\syn_pop_attributes.yaml
+
input\mcd\origin_mgra.csv
+
input\mcd\destination_mgra.csv
+
+ The synthetic population generator outputs the following files:
+
+
output\mcd\mcd_households.csv
+
output\mcd\mcd_persons.csv
+
output\mcd\mcd_output_households.csv
+
output\mcd\mcd_output_persons.csv
+
output\mcd\mcd_work_location.csv
+
output\mcd\mcd_tour_file.csv
+
+ Once all synthetic population files have been created, the script creates a copy of
+ the "sandag_abm.properties" file and modifies specific property parameters so that
+ it is compatible with the mode choice diagnostic tool. The modified properties
+ file is renamed and output as "conf\sandag_abm_mcd.properties"
+
+ Finally, the mode choice diagnostic tool is run via runSandagAbm_MCDiagnostic.cmd
+ The mode choice diagnostic tool uses the synthetic population files as inputs and
+ outputs a tour file with utilities and probabilities for each tour mode. The tour file
+ is output as "output\mcd\indivTourData_5.csv"
+
' % output)
+ except _subprocess.CalledProcessError as error:
+ report.add_html('Output:
%s
' % error.output)
+ raise
+ finally:
+ err_file.close()
+ with open(err_file_path, 'r') as f:
+ error_msg = f.read()
+ os.remove(err_file_path)
+ if error_msg:
+ report.add_html('Error message(s):
%s
' % error_msg)
+ try:
+ # No raise on writing report error
+ # due to observed issue with runs generating reports which cause
+ # errors when logged
+ _m.logbook_write("Process run %s report" % name, report.render())
+ except Exception as error:
+ print _time.strftime("%Y-%M-%d %H:%m:%S")
+ print "Error writing report '%s' to logbook" % name
+ print error
+ print _traceback.format_exc(error)
+ if self._log_level == "DISABLE_ON_ERROR":
+ _m.logbook_level(_m.LogbookLevel.NONE)
+ else:
+ _subprocess.check_call(command, cwd=self.main_directory, shell=True)
+
+ def set_global_logbook_level(self, props):
+ self._log_level = props.get("RunModel.LogbookLevel", "ENABLED")
+ log_all = _m.LogbookLevel.ATTRIBUTE | _m.LogbookLevel.VALUE | _m.LogbookLevel.COOKIE | _m.LogbookLevel.TRACE | _m.LogbookLevel.LOG
+ log_states = {
+ "ENABLED": log_all,
+ "DISABLE_ON_ERROR": log_all,
+ "NO_EXTERNAL_REPORTS": log_all,
+ "NO_REPORTS": _m.LogbookLevel.ATTRIBUTE | _m.LogbookLevel.COOKIE | _m.LogbookLevel.TRACE | _m.LogbookLevel.LOG,
+ "TITLES_ONLY": _m.LogbookLevel.TRACE | _m.LogbookLevel.LOG,
+ "DISABLED": _m.LogbookLevel.NONE,
+ }
+ _m.logbook_write("Setting logbook level to %s" % self._log_level)
+ try:
+ _m.logbook_level(log_states[self._log_level])
+ except KeyError:
+ raise Exception("properties.RunModel.LogLevel: value must be one of %s" % ",".join(log_states.keys()))
+
+ def move_mcd_files(self):
+
+ out_directory = _join(self.main_directory, "output")
+
+ hh_data = "householdData_5.csv"
+ ind_tour = "indivTourData_5.csv"
+ ind_trip = "indivTripData_5.csv"
+ joint_tour = "jointTourData_5.csv"
+ joint_trip = "jointTripData_5.csv"
+ per_data = "personData_5.csv"
+ mgra_park = "mgraParkingCost.csv"
+
+ files = [hh_data, ind_tour, ind_trip, joint_tour, joint_trip, per_data, mgra_park]
+
+ for file in files:
+ src = _join(out_directory, file)
+ if not os.path.exists(src):
+ raise Exception("missing output file '%s'" % (src))
+ dst = _join(self.mcd_out_path, file)
+ _shutil.move(src, dst)
+
+ def check_shp(self):
+
+ in_directory = _join(self.main_directory, "input", "mcd")
+ out_directory = self.mcd_out_path
+
+ shp_names = ["tapcov", "rtcov"]
+
+ for shp in shp_names:
+
+ files_to_move = [f for f in os.listdir(in_directory) if shp in f]
+
+ for file in files_to_move:
+
+ src = _join(in_directory, file)
+ dst = _join(out_directory, file)
+ if not os.path.exists(src):
+ raise Exception("missing shapefile '%s'" % (src))
+ _shutil.move(src, dst)
\ No newline at end of file
diff --git a/sandag_abm/src/main/emme/toolbox/export/export_data_loader_matrices.py b/sandag_abm/src/main/emme/toolbox/export/export_data_loader_matrices.py
new file mode 100644
index 0000000..c0df738
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/export/export_data_loader_matrices.py
@@ -0,0 +1,302 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// export/export_data_loader_matrices.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Exports the matrix results to OMX and csv files for use by the Java Data
+# export process and the Data loader to the reporting database.
+#
+#
+# Inputs:
+# output_dir: the output directory for the created files
+# base_scenario_id: scenario ID for the base scenario (same used in the Import network tool)
+# transit_scenario_id: scenario ID for the base transit scenario
+#
+# Files created:
+# CSV format files
+# ../report/trucktrip.csv
+# ../report/eetrip.csv
+# ../report/eitrip.csv
+# OMX format files
+# trip_pp.omx
+#
+#
+# Script example:
+"""
+ import os
+ import inro.emme.database.emmebank as _eb
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ main_emmebank = _eb.Emmebank(os.path.join(main_directory, "emme_project", "Database", "emmebank"))
+ transit_emmebank = _eb.Emmebank(os.path.join(main_directory, "emme_project", "Database", "emmebank"))
+ output_dir = os.path.join(main_directory, "output")
+ num_processors = "MAX-1"
+ export_data_loader_matrices = modeller.tool(
+ "sandag.export.export_data_loader_matrices")
+ export_data_loader_matrices(output_dir, 100, main_emmebank, transit_emmebank, num_processors)
+"""
+TOOLBOX_ORDER = 74
+
+
+import inro.modeller as _m
+import inro.emme.database.emmebank as _eb
+import traceback as _traceback
+from collections import OrderedDict
+import os
+import numpy
+import warnings
+import tables
+
+
+warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+
+_join = os.path.join
+_dir = os.path.dirname
+
+class ExportDataLoaderMatrices(_m.Tool(), gen_utils.Snapshot):
+
+ output_dir = _m.Attribute(str)
+ base_scenario_id = _m.Attribute(int)
+ transit_scenario_id = _m.Attribute(int)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ self.output_dir = os.path.join(os.path.dirname(project_dir), "output")
+ self.base_scenario_id = 100
+ self.transit_scenario_id = 100
+ self.periods = ["EA", "AM", "MD", "PM", "EV"]
+ self.attributes = ["main_directory", "base_scenario_id", "transit_scenario_id"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Export matrices for Data Loader"
+ pb.description = """
+ Export model results to OMX files for export by Data Exporter
+ to CSV format for load in SQL Data loader."""
+ pb.branding_text = "- SANDAG - Export"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('output_dir', 'directory',
+ title='Select output directory')
+
+ pb.add_text_box('base_scenario_id', title="Base scenario ID:", size=10)
+ pb.add_text_box('transit_scenario_id', title="Transit scenario ID:", size=10)
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ base_emmebank = _eb.Emmebank(os.path.join(project_dir, "Database", "emmebank"))
+ transit_emmebank = _eb.Emmebank(os.path.join(project_dir, "Database_transit", "emmebank"))
+ base_scenario = base_emmebank.scenario(self.base_scenario_id)
+ transit_scenario = transit_emmebank.scenario(self.transit_scenario_id)
+
+ results = self(self.output_dir, base_scenario, transit_scenario)
+ run_msg = "Export completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace("Export matrices for Data Loader", save_arguments=True)
+ def __call__(self, output_dir, base_scenario, transit_scenario):
+ attrs = {
+ "output_dir": output_dir,
+ "base_scenario_id": base_scenario.id,
+ "transit_scenario_id": transit_scenario.id,
+ "self": str(self)
+ }
+ gen_utils.log_snapshot("Export Matrices for Data Loader", str(self), attrs)
+ self.output_dir = output_dir
+ self.base_scenario = base_scenario
+ self.transit_scenario = transit_scenario
+
+ self.truck_demand()
+ self.external_demand()
+ self.total_demand()
+
+ @_m.logbook_trace("Export truck demand")
+ def truck_demand(self):
+ name_mapping = [
+ # ("lhdn", "TRKLGP", 1.3),
+ # ("mhdn", "TRKMGP", 1.5),
+ # ("hhdn", "TRKHGP", 2.5),
+ ("lhdt", "TRK_L", 1.3),
+ ("mhdt", "TRK_M", 1.5),
+ ("hhdt", "TRK_H", 2.5),
+ ]
+ scenario = self.base_scenario
+ emmebank = scenario.emmebank
+ zones = scenario.zone_numbers
+ formater = lambda x: ("%.5f" % x).rstrip('0').rstrip(".")
+ truck_trip_path = os.path.join(os.path.dirname(self.output_dir), "report", "trucktrip.csv")
+
+ # get auto operating cost
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(_join(_dir(self.output_dir), "conf", "sandag_abm.properties"))
+ try:
+ aoc = float(props["aoc.fuel"]) + float(props["aoc.maintenance"])
+ except ValueError:
+ raise Exception("Error during float conversion for aoc.fuel or aoc.maintenance from sandag_abm.properties file")
+
+ with open(truck_trip_path, 'w') as f:
+ f.write("OTAZ,DTAZ,TOD,MODE,TRIPS,TIME,DIST,AOC,TOLLCOST\n")
+ for period in self.periods:
+ for key, name, pce in name_mapping:
+ matrix_data = emmebank.matrix(period + "_" + name + "_VEH").get_data(scenario)
+ matrix_data_time = emmebank.matrix(period + "_" + name + "_TIME").get_data(scenario)
+ matrix_data_dist = emmebank.matrix(period + "_" + name + "_DIST").get_data(scenario)
+ matrix_data_tollcost = emmebank.matrix(period + "_" + name + "_TOLLCOST").get_data(scenario)
+ rounded_demand = 0
+ for orig in zones:
+ for dest in zones:
+ value = matrix_data.get(orig, dest)
+ # skip trips less than 0.00001 to avoid 0 trips records in database
+ if value < 0.00001:
+ rounded_demand += value
+ continue
+ time = matrix_data_time.get(orig, dest)
+ distance = matrix_data_dist.get(orig, dest)
+ tollcost = matrix_data_tollcost.get(orig, dest)
+ od_aoc = distance * aoc
+ f.write(",".join([str(orig), str(dest), period, key, formater(value), formater(time), formater(distance), formater(od_aoc), formater(tollcost)]))
+ f.write("\n")
+ if rounded_demand > 0:
+ print period + "_" + name + "_VEH", "rounded_demand", rounded_demand
+
+ def external_demand(self):
+ #get auto operating cost
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(_join(_dir(self.output_dir), "conf", "sandag_abm.properties"))
+ try:
+ aoc = float(props["aoc.fuel"]) + float(props["aoc.maintenance"])
+ except ValueError:
+ raise Exception("Error during float conversion for aoc.fuel or aoc.maintenance from sandag_abm.properties file")
+
+ # EXTERNAL-EXTERNAL TRIP TABLE (toll-eligible)
+ name_mapping = [
+ ("DA", "SOV"),
+ ("S2", "HOV2"),
+ ("S3", "HOV3"),
+ ]
+ scenario = self.base_scenario
+ emmebank = scenario.emmebank
+ zones = scenario.zone_numbers
+ formater = lambda x: ("%.5f" % x).rstrip('0').rstrip(".")
+ ee_trip_path = os.path.join(os.path.dirname(self.output_dir), "report", "eetrip.csv")
+ with _m.logbook_trace("Export external-external demand"):
+ with open(ee_trip_path, 'w') as f:
+ f.write("OTAZ,DTAZ,TOD,MODE,TRIPS,TIME,DIST,AOC,TOLLCOST\n")
+ for period in self.periods:
+ matrix_data_time = emmebank.matrix(period + "_SOV_NT_M_TIME").get_data(scenario)
+ matrix_data_dist = emmebank.matrix(period + "_SOV_NT_M_DIST").get_data(scenario)
+ matrix_data_tollcost = emmebank.matrix(period + "_SOV_NT_M_TOLLCOST").get_data(scenario)
+ for key, name in name_mapping:
+ matrix_data = emmebank.matrix(period + "_" + name + "_EETRIPS").get_data(scenario)
+ rounded_demand = 0
+ for orig in zones:
+ for dest in zones:
+ value = matrix_data.get(orig, dest)
+ # skip trips less than 0.00001 to avoid 0 trips records in database
+ if value < 0.00001:
+ rounded_demand += value
+ continue
+ time = matrix_data_time.get(orig, dest)
+ distance = matrix_data_dist.get(orig, dest)
+ tollcost = 0
+ tollcost = matrix_data_tollcost.get(orig, dest)
+ od_aoc = distance * aoc
+ f.write(",".join(
+ [str(orig), str(dest), period, key, formater(value), formater(time),
+ formater(distance), formater(od_aoc), formater(tollcost)]))
+ f.write("\n")
+ if rounded_demand > 0:
+ print period + "_" + name + "_EETRIPS", "rounded_demand", rounded_demand
+
+ # EXTERNAL-INTERNAL TRIP TABLE
+ name_mapping = [
+ ("DAN", "SOVGP"),
+ ("DAT", "SOVTOLL"),
+ ("S2N", "HOV2HOV"),
+ ("S2T", "HOV2TOLL"),
+ ("S3N", "HOV3HOV"),
+ ("S3T", "HOV3TOLL"),
+ ]
+ ei_trip_path = os.path.join(os.path.dirname(self.output_dir), "report", "eitrip.csv")
+
+ with _m.logbook_trace("Export external-internal demand"):
+ with open(ei_trip_path, 'w') as f:
+ f.write("OTAZ,DTAZ,TOD,MODE,PURPOSE,TRIPS,TIME,DIST,AOC,TOLLCOST\n")
+ for period in self.periods:
+ matrix_data_time = emmebank.matrix(period + "_SOV_TR_M_TIME").get_data(scenario)
+ matrix_data_dist = emmebank.matrix(period + "_SOV_TR_M_DIST").get_data(scenario)
+ if "TOLL" in name:
+ matrix_data_tollcost = emmebank.matrix(period + "_SOV_NT_M_TOLLCOST").get_data(scenario)
+ for purpose in ["WORK", "NONWORK"]:
+ for key, name in name_mapping:
+ matrix_data = emmebank.matrix(period + "_" + name + "_EI" + purpose).get_data(scenario)
+ rounded_demand = 0
+ for orig in zones:
+ for dest in zones:
+ value = matrix_data.get(orig, dest)
+ # skip trips less than 0.00001 to avoid 0 trips records in database
+ if value < 0.00001:
+ rounded_demand += value
+ continue
+ time = matrix_data_time.get(orig, dest)
+ distance = matrix_data_dist.get(orig, dest)
+ tollcost = 0
+ if "TOLL" in name:
+ tollcost = matrix_data_tollcost.get(orig, dest)
+ od_aoc = distance * aoc
+ f.write(",".join(
+ [str(orig), str(dest), period, key, purpose, formater(value), formater(time),
+ formater(distance), formater(od_aoc), formater(tollcost)]))
+ f.write("\n")
+ if rounded_demand > 0:
+ print period + "_" + name + "_EI" + purpose, "rounded_demand", rounded_demand
+
+ @_m.logbook_trace("Export total auto and truck demand to OMX")
+ def total_demand(self):
+ for period in self.periods:
+ matrices = {
+ "%s_SOV_NT_L": 'mf"%s_SOV_NT_L"',
+ "%s_SOV_TR_L": 'mf"%s_SOV_TR_L"',
+ "%s_HOV2_L": 'mf"%s_HOV2_L"',
+ "%s_HOV3_L": 'mf"%s_HOV3_L"',
+ "%s_SOV_NT_M": 'mf"%s_SOV_NT_M"',
+ "%s_SOV_TR_M": 'mf"%s_SOV_TR_M"',
+ "%s_HOV2_M": 'mf"%s_HOV2_M"',
+ "%s_HOV3_M": 'mf"%s_HOV3_M"',
+ "%s_SOV_NT_H": 'mf"%s_SOV_NT_H"',
+ "%s_SOV_TR_H": 'mf"%s_SOV_TR_H"',
+ "%s_HOV2_H": 'mf"%s_HOV2_H"',
+ "%s_HOV3_H": 'mf"%s_HOV3_H"',
+ "%s_TRK_H": 'mf"%s_TRK_H"',
+ "%s_TRK_L": 'mf"%s_TRK_L"',
+ "%s_TRK_M": 'mf"%s_TRK_M"',
+ }
+ matrices = dict((k % period, v % period) for k, v in matrices.iteritems())
+ omx_file = os.path.join(self.output_dir, "trip_%s.omx" % period)
+ with gen_utils.ExportOMX(omx_file, self.base_scenario) as exporter:
+ exporter.write_matrices(matrices)
+
+ @_m.method(return_type=unicode)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
diff --git a/sandag_abm/src/main/emme/toolbox/export/export_data_loader_network.py b/sandag_abm/src/main/emme/toolbox/export/export_data_loader_network.py
new file mode 100644
index 0000000..ec38ed3
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/export/export_data_loader_network.py
@@ -0,0 +1,1203 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// export_data_loader_network.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Exports the network results to csv file for use by the Java Data export process
+# and the Data loader to the reporting database.
+#
+#
+# Inputs:
+# main_directory: main ABM directory
+# base_scenario_id: scenario ID for the base scenario (same used in the Import network tool)
+# traffic_emmebank: the base, traffic, Emme database
+# transit_emmebank: the transit database
+# num_processors: number of processors to use in the transit analysis calculations
+#
+# Files created:
+# report/hwyload_pp.csv
+# report/hwy_tcad.csv rename to hwyTcad.csv
+# report/transit_aggflow.csv
+# report/transit_flow.csv
+# report/transit_onoff.csv
+# report/trrt.csv rename to transitRoute.csv
+# report/trstop.csv renmae to transitStop.csv
+# report/transitTap.csv
+# report/transitLink.csv
+#
+# Script example:
+"""
+ import os
+ import inro.emme.database.emmebank as _eb
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ main_emmebank = _eb.Emmebank(os.path.join(main_directory, "emme_project", "Database", "emmebank"))
+ transit_emmebank = _eb.Emmebank(os.path.join(main_directory, "emme_project", "Database_transit", "emmebank"))
+ num_processors = "MAX-1"
+ export_data_loader_network = modeller.tool(
+ "sandag.export.export_data_loader_network")
+ export_data_loader_network(main_directory, 100, main_emmebank, transit_emmebank, num_processors)
+"""
+
+TOOLBOX_ORDER = 73
+
+
+import inro.modeller as _m
+import traceback as _traceback
+import inro.emme.database.emmebank as _eb
+import inro.emme.desktop.worksheet as _ws
+import inro.emme.datatable as _dt
+import inro.emme.core.exception as _except
+from contextlib import contextmanager as _context
+from collections import OrderedDict
+from itertools import chain as _chain
+import math
+import os
+import pandas as pd
+import numpy as _np
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+
+format = lambda x: ("%.6f" % x).rstrip('0').rstrip(".")
+id_format = lambda x: str(int(x))
+
+class ExportDataLoaderNetwork(_m.Tool(), gen_utils.Snapshot):
+
+ main_directory = _m.Attribute(str)
+ base_scenario_id = _m.Attribute(int)
+ traffic_emmebank = _m.Attribute(str)
+ transit_emmebank = _m.Attribute(str)
+ num_processors = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ self.main_directory = os.path.dirname(project_dir)
+ self.base_scenario_id = 100
+ self.traffic_emmebank = os.path.join(project_dir, "Database", "emmebank")
+ self.transit_emmebank = os.path.join(project_dir, "Database_transit", "emmebank")
+ self.num_processors = "MAX-1"
+ self.attributes = ["main_directory", "base_scenario_id", "traffic_emmebank", "transit_emmebank", "num_processors"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Export network for Data Loader"
+ pb.description = """
+Export network results to csv files for SQL data loader."""
+ pb.branding_text = "- SANDAG - Export"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('main_directory', 'directory',
+ title='Select main directory')
+
+ pb.add_text_box('base_scenario_id', title="Base scenario ID:", size=10)
+ pb.add_select_file('traffic_emmebank', 'file',
+ title='Select traffic emmebank')
+ pb.add_select_file('transit_emmebank', 'file',
+ title='Select transit emmebank')
+
+ dem_utils.add_select_processors("num_processors", pb, self)
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ results = self(self.main_directory, self.base_scenario_id,
+ self.traffic_emmebank, self.transit_emmebank,
+ self.num_processors)
+ run_msg = "Export completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace("Export network results for Data Loader", save_arguments=True)
+ def __call__(self, main_directory, base_scenario_id, traffic_emmebank, transit_emmebank, num_processors):
+ attrs = {
+ "traffic_emmebank": str(traffic_emmebank),
+ "transit_emmebank": str(transit_emmebank),
+ "main_directory": main_directory,
+ "base_scenario_id": base_scenario_id,
+ "self": str(self)
+ }
+ gen_utils.log_snapshot("Export network results", str(self), attrs)
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(os.path.join(main_directory, "conf", "sandag_abm.properties"))
+
+ traffic_emmebank = _eb.Emmebank(traffic_emmebank)
+ transit_emmebank = _eb.Emmebank(transit_emmebank)
+ export_path = os.path.join(main_directory, "report")
+ input_path = os.path.join(main_directory,"input")
+ num_processors = dem_utils.parse_num_processors(num_processors)
+
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ period_scenario_ids = OrderedDict((v, i) for i, v in enumerate(periods, start=base_scenario_id + 1))
+
+ base_scenario = traffic_emmebank.scenario(base_scenario_id)
+
+ self.export_traffic_attribute(base_scenario, export_path, traffic_emmebank, period_scenario_ids, props)
+ self.export_traffic_load_by_period(export_path, traffic_emmebank, period_scenario_ids)
+ self.export_transit_results(export_path, input_path, transit_emmebank, period_scenario_ids, num_processors)
+ self.export_geometry(export_path, traffic_emmebank)
+
+ @_m.logbook_trace("Export traffic attribute data")
+ def export_traffic_attribute(self, base_scenario, export_path, traffic_emmebank, period_scenario_ids, props):
+ # Several column names are legacy from the original network files
+ # and data loader process, and are populated with zeros.
+ # items are ("column name", "attribute name") or ("column name", ("attribute name", default))
+ hwylink_attrs = [
+ ("ID", "@tcov_id"),
+ ("Length", "length"),
+ ("Dir", "is_one_way"),
+ ("hwycov-id:1", "@tcov_id"),
+ ("ID:1", "@tcov_id"),
+ ("Length:1", "length_feet"),
+ ("QID", "zero"),
+ ("CCSTYLE", "zero"),
+ ("UVOL", "zero"),
+ ("AVOL", "zero"),
+ ("TMP1", "zero"),
+ ("TMP2", "zero"),
+ ("PLOT", "zero"),
+ ("SPHERE", "@sphere"),
+ ("RTNO", "zero"),
+ ("LKNO", "zero"),
+ ("NM", "#name"),
+ ("FXNM", "#name_from"),
+ ("TXNM", "#name_to"),
+ ("AN", "i"),
+ ("BN", "j"),
+ ("COJUR", "zero"),
+ ("COSTAT", "zero"),
+ ("COLOC", "zero"),
+ ("RLOOP", "zero"),
+ ("ADTLK", "zero"),
+ ("ADTVL", "zero"),
+ ("PKPCT", "zero"),
+ ("TRPCT", "zero"),
+ ("SECNO", "zero"),
+ ("DIR:1", "zero"),
+ ("FFC", "type"),
+ ("CLASS", "zero"),
+ ("ASPD", "@speed_adjusted"),
+ ("IYR", "@year_open_traffic"),
+ ("IPROJ", "@project_code"),
+ ("IJUR", "@jurisdiction_type"),
+ ("IFC", "type"),
+ ("IHOV", "@lane_restriction"),
+ ("ITRUCK", "@truck_restriction"),
+ ("ISPD", "@speed_posted"),
+ ("ITSPD", "zero"),
+ ("IWAY", "iway"),
+ ("IMED", "@median"),
+ ("COST", "@cost_operating"),
+ ("ITOLLO", "@toll_md"),
+ ("ITOLLA", "@toll_am"),
+ ("ITOLLP", "@toll_pm"),
+ ]
+ directional_attrs = [
+ ("ABLNO", "@lane_md", "0"),
+ ("ABLNA", "@lane_am", "0"),
+ ("ABLNP", "@lane_pm", "0"),
+ ("ABAU", "@lane_auxiliary", "0"),
+ ("ABPCT", "zero", "0"),
+ ("ABPHF", "zero", "0"),
+ ("ABCNT", "@traffic_control", "0"),
+ ("ABTL", "@turn_thru", "0"),
+ ("ABRL", "@turn_right", "0"),
+ ("ABLL", "@turn_left", "0"),
+ ("ABTLB", "zero", "0"),
+ ("ABRLB", "zero", "0"),
+ ("ABLLB", "zero", "0"),
+ ("ABGC", "@green_to_cycle_init", "0"),
+ ("ABPLC", "per_lane_capacity", "1900"),
+ ("ABCPO", "@capacity_link_md", "999999"),
+ ("ABCPA", "@capacity_link_am", "999999"),
+ ("ABCPP", "@capacity_link_pm", "999999"),
+ ("ABCXO", "@capacity_inter_md", "999999"),
+ ("ABCXA", "@capacity_inter_am", "999999"),
+ ("ABCXP", "@capacity_inter_pm", "999999"),
+ ("ABCHO", "@capacity_hourly_op", "0"),
+ ("ABCHA", "@capacity_hourly_am", "0"),
+ ("ABCHP", "@capacity_hourly_pm", "0"),
+ ("ABTMO", "@time_link_md", "999"),
+ ("ABTMA", "@time_link_am", "999"),
+ ("ABTMP", "@time_link_pm", "999"),
+ ("ABTXO", "@time_inter_md", "0"),
+ ("ABTXA", "@time_inter_am", "0"),
+ ("ABTXP", "@time_inter_pm", "0"),
+ ("ABCST", "zero", "999.999"),
+ ("ABVLA", "zero", "0"),
+ ("ABVLP", "zero", "0"),
+ ("ABLOS", "zero", "0"),
+ ]
+ for key, name, default in directional_attrs:
+ hwylink_attrs.append((key, name))
+ for key, name, default in directional_attrs:
+ hwylink_attrs.append(("BA" + key[2:], (name, default)))
+ hwylink_attrs.append(("relifac", "relifac"))
+
+ time_period_atts = [
+ ("ITOLL2", "@toll"),
+ ("ITOLL3", "@cost_auto"),
+ ("ITOLL4", "@cost_med_truck"),
+ ("ITOLL5", "@cost_hvy_truck"),
+ ("ITOLL", "toll_hov"),
+ ("ABCP", "@capacity_link", "999999"),
+ ("ABCX", "@capacity_inter", "999999"),
+ ("ABTM", "@time_link", "999"),
+ ("ABTX", "@time_inter", "0"),
+ ("ABLN", "@lane", "0"),
+ ("ABSCST", "sov_total_gencost", ""),
+ ("ABH2CST", "hov2_total_gencost", ""),
+ ("ABH3CST", "hov3_total_gencost", ""),
+ ("ABSTM", "auto_time", ""),
+ ("ABHTM", "auto_time", ""),
+ ]
+ periods = ["_ea", "_am", "_md", "_pm", "_ev"]
+ for column in time_period_atts:
+ for period in periods:
+ key = column[0] + period.upper()
+ name = column[1] + period
+ hwylink_attrs.append((key, name))
+ if key.startswith("AB"):
+ for period in periods:
+ key = column[0] + period.upper()
+ name = column[1] + period
+ default = column[2]
+ hwylink_attrs.append(("BA" + key[2:], (name, default)))
+ for period in periods:
+ key = "ABPRELOAD" + period.upper()
+ name = "additional_volume" + period
+ default = "0"
+ hwylink_attrs.append((key, name))
+ hwylink_attrs.append(("BA" + key[2:], (name, default)))
+
+ vdf_attrs = [
+ ("AB_GCRatio", "@green_to_cycle", ""),
+ ("AB_Cycle", "@cycle", ""),
+ ("AB_PF", "progression_factor", ""),
+ ("ALPHA1", "alpha1", "0.8"),
+ ("BETA1", "beta1", "4"),
+ ("ALPHA2", "alpha2", "4.5"),
+ ("BETA2", "beta2", "2"),
+ ]
+ for key, name, default in vdf_attrs:
+ name = name + "_am" if name.startswith("@") else name
+ hwylink_attrs.append((key, name))
+ if key.startswith("AB"):
+ hwylink_attrs.append(("BA" + key[2:], (name, default)))
+ for period in periods:
+ for key, name, default in vdf_attrs:
+ name = name + period if name.startswith("@") else name
+ default = default or "0"
+ hwylink_attrs.append((key + period.upper(), name))
+ if key.startswith("AB"):
+ hwylink_attrs.append(("BA" + key[2:] + period.upper(), (name, default)))
+
+ network = base_scenario.get_partial_network(["LINK"], include_attributes=True)
+
+ #copy assignment from period scenarios
+ for period, scenario_id in period_scenario_ids.iteritems():
+ from_scenario = traffic_emmebank.scenario(scenario_id)
+ src_attrs = ["@auto_time", "additional_volume"]
+ dst_attrs = ["auto_time_" + period.lower(),
+ "additional_volume_" + period.lower()]
+ for dst_attr in dst_attrs:
+ network.create_attribute("LINK", dst_attr)
+ values = from_scenario.get_attribute_values("LINK", src_attrs)
+ network.set_attribute_values("LINK", dst_attrs, values)
+ # add in and calculate additional columns
+ new_attrs = [
+ ("zero", 0), ("is_one_way", 0), ("iway", 2), ("length_feet", 0),
+ ("toll_hov", 0), ("per_lane_capacity", 1900),
+ ("progression_factor", 1.0), ("alpha1", 0.8), ("beta1", 4.0),
+ ("alpha2", 4.5), ("beta2", 2.0), ("relifac", 1.0),
+ ]
+ for name, default in new_attrs:
+ network.create_attribute("LINK", name, default)
+ for period in periods:
+ network.create_attribute("LINK", "toll_hov" + period, 0)
+ network.create_attribute("LINK", "sov_total_gencost" + period, 0)
+ network.create_attribute("LINK", "hov2_total_gencost" + period, 0)
+ network.create_attribute("LINK", "hov3_total_gencost" + period, 0)
+ for link in network.links():
+ link.is_one_way = 1 if link.reverse_link else 0
+ link.iway = 2 if link.reverse_link else 1
+ link.length_feet = link.length * 5280
+ for period in periods:
+ link["toll_hov" + period] = link["@cost_hov2" + period] - link["@cost_operating"]
+ link["sov_total_gencost" + period] = link["auto_time" + period] + link["@cost_auto" + period]
+ link["hov2_total_gencost" + period] = link["auto_time" + period] + link["@cost_hov2" + period]
+ link["hov3_total_gencost" + period] = link["auto_time" + period] + link["@cost_hov3" + period]
+ if link.volume_delay_func == 24:
+ link.alpha2 = 6.0
+ link.per_lane_capacity = max([(link["@capacity_link" + p] / link["@lane" + p])
+ for p in periods if link["@lane" + p] > 0] + [0])
+
+ hwylink_atts_file = os.path.join(export_path, "hwy_tcad.csv")
+ busPCE = props["transit.bus.pceveh"]
+ self.export_traffic_to_csv(hwylink_atts_file, hwylink_attrs, network, busPCE)
+
+ @_m.logbook_trace("Export traffic load data by period")
+ def export_traffic_load_by_period(self, export_path, traffic_emmebank, period_scenario_ids):
+ create_attribute = _m.Modeller().tool(
+ "inro.emme.data.extra_attribute.create_extra_attribute")
+ net_calculator = _m.Modeller().tool(
+ "inro.emme.network_calculation.network_calculator")
+ hwyload_attrs = [("ID1", "@tcov_id")]
+
+ dir_atts = [
+ ("AB_Flow_PCE", "@pce_flow"), # sum of pce flow
+ ("AB_Time", "@auto_time"), # computed vdf based on pce flow
+ ("AB_VOC", "@voc"),
+ ("AB_V_Dist_T", "length"),
+ ("AB_VHT", "@vht"),
+ ("AB_Speed", "@speed"),
+ ("AB_VDF", "@msa_time"),
+ ("AB_MSA_Flow", "@msa_flow"),
+ ("AB_MSA_Time", "@msa_time"),
+ ("AB_Flow_SOV_NTPL", "@sov_nt_l"),
+ ("AB_Flow_SOV_TPL", "@sov_tr_l"),
+ ("AB_Flow_SR2L", "@hov2_l"),
+ ("AB_Flow_SR3L", "@hov3_l"),
+ ("AB_Flow_SOV_NTPM", "@sov_nt_m"),
+ ("AB_Flow_SOV_TPM", "@sov_tr_m"),
+ ("AB_Flow_SR2M", "@hov2_m"),
+ ("AB_Flow_SR3M", "@hov3_m"),
+ ("AB_Flow_SOV_NTPH", "@sov_nt_h"),
+ ("AB_Flow_SOV_TPH", "@sov_tr_h"),
+ ("AB_Flow_SR2H", "@hov2_h"),
+ ("AB_Flow_SR3H", "@hov3_h"),
+ ("AB_Flow_lhd", "@trk_l_non_pce"),
+ ("AB_Flow_mhd", "@trk_m_non_pce"),
+ ("AB_Flow_hhd", "@trk_h_non_pce"),
+ ("AB_Flow", "@non_pce_flow"),
+ ]
+
+ for key, attr in dir_atts:
+ hwyload_attrs.append((key, attr))
+ hwyload_attrs.append((key.replace("AB_", "BA_"), (attr, ""))) # default for BA on one-way links is blank
+ for p, scen_id in period_scenario_ids.iteritems():
+ scenario = traffic_emmebank.scenario(scen_id)
+ new_atts = [
+ ("@speed", "link travel speed", "length*60/@auto_time"),
+ ("@sov_nt_all", "total number of SOV GP vehicles",
+ "@sov_nt_l+@sov_nt_m+@sov_nt_h" ),
+ ("@sov_tr_all", "total number of SOV TOLL vehicles",
+ "@sov_tr_l+@sov_tr_m+@sov_tr_h" ),
+ ("@hov2_all", "total number of HOV2 HOV vehicles",
+ "@hov2_l+@hov2_m+@hov2_h" ),
+ ("@hov3_all", "total number of HOV3 HOV vehicles",
+ "@hov3_l+@hov3_m+@hov3_h" ),
+ ("@trk_l_non_pce", "total number of light trucks in non-Pce",
+ "(@trk_l)/1.3" ),
+ ("@trk_m_non_pce", "total medium trucks in non-Pce",
+ "(@trk_m)/1.5" ),
+ ("@trk_h_non_pce", "total heavy trucks in non-Pce",
+ "(@trk_h)/2.5" ),
+ ("@pce_flow", "total number of vehicles in Pce",
+ "@sov_nt_all+@sov_tr_all+ \
+ @hov2_all+ \
+ @hov3_all+ \
+ (@trk_l) + (@trk_m) + \
+ (@trk_h) + volad" ),
+ ("@non_pce_flow", "total number of vehicles in non-Pce",
+ "@sov_nt_all+@sov_tr_all+ \
+ @hov2_all+ \
+ @hov3_all+ \
+ (@trk_l)/1.3 + (@trk_m)/1.5 + \
+ (@trk_h)/2.5 + volad/3" ), #volad includes bus flow - pce factor is 3
+ ("@msa_flow", "MSA flow", "@non_pce_flow"), #flow from final assignment
+ ("@msa_time", "MSA time", "timau"), #skim assignment time on msa flow
+ ("@voc", "volume over capacity", "@pce_flow/ul3"), #pce flow over road capacity
+ ("@vht", "vehicle hours travelled", "@non_pce_flow*@auto_time/60") #vehicle flow (non-pce)*time
+ ]
+
+ for name, des, formula in new_atts:
+ att = scenario.extra_attribute(name)
+ if not att:
+ att = create_attribute("LINK", name, des, 0, overwrite=True, scenario=scenario)
+ cal_spec = {"result": att.id,
+ "expression": formula,
+ "aggregation": None,
+ "selections": {"link": "mode=d"},
+ "type": "NETWORK_CALCULATION"
+ }
+ net_calculator(cal_spec, scenario=scenario)
+ file_path = os.path.join(export_path, "hwyload_%s.csv" % p)
+ network = self.get_partial_network(scenario, {"LINK": ["@tcov_id"] + [a[1] for a in dir_atts]})
+ self.export_traffic_to_csv(file_path, hwyload_attrs, network)
+
+ def export_traffic_to_csv(self, filename, att_list, network, busPCE = None):
+ auto_mode = network.mode("d")
+ # only the original forward direction links and auto links only
+ links = [l for l in network.links()
+ if l["@tcov_id"] > 0 and
+ (auto_mode in l.modes or (l.reverse_link and auto_mode in l.reverse_link.modes))
+ ]
+ links.sort(key=lambda l: l["@tcov_id"])
+ with open(filename, 'w') as fout:
+ fout.write(",".join(['"%s"' % x[0] for x in att_list]))
+ fout.write("\n")
+ for link in links:
+ key, att = att_list[0] # expected to be the link id
+ values = [id_format(link[att])]
+ reverse_link = link.reverse_link
+ for key, att in att_list[1:]:
+ if key == "AN":
+ values.append(link.i_node.id)
+ elif key == "BN":
+ values.append(link.j_node.id)
+ elif key.startswith("BA"):
+ name, default = att
+ if reverse_link and (abs(link["@tcov_id"]) == abs(reverse_link["@tcov_id"])):
+ if "additional_volume" in name:
+ values.append(format(float(reverse_link[name]) / busPCE))
+ else:
+ values.append(format(reverse_link[name]))
+ else:
+ values.append(default)
+
+ #values.append(format(reverse_link[name]) if reverse_link else default)
+ elif att.startswith("#"):
+ values.append('"%s"' % link[att])
+ else:
+ if "additional_volume" in att:
+ values.append(format(float(link[att]) / busPCE))
+ else:
+ values.append(format(link[att]))
+ fout.write(",".join(values))
+ fout.write("\n")
+
+ @_m.logbook_trace("Export transit results")
+ def export_transit_results(self, export_path, input_path, transit_emmebank, period_scenario_ids, num_processors):
+ # Note: Node analysis for transfers is VERY time consuming
+ # this implementation will be replaced when new Emme version is available
+
+ trrt_atts = ["Route_ID","Route_Name","Mode","AM_Headway","PM_Headway","OP_Headway","Night_Headway","Night_Hours","Config","Fare"]
+ trstop_atts = ["Stop_ID","Route_ID","Link_ID","Pass_Count","Milepost","Longitude","Latitude","NearNode","FareZone","StopName"]
+
+ #transit route file
+ trrt_infile = os.path.join(input_path, "trrt.csv")
+ trrt = pd.read_csv(trrt_infile)
+ trrt = trrt.rename(columns=lambda x:x.strip())
+ trrt_out = trrt[trrt_atts]
+ trrt_outfile = os.path.join(export_path, "trrt.csv")
+ trrt_out.to_csv(trrt_outfile, index=False)
+
+ #transit stop file
+ trstop_infile = os.path.join(input_path, "trstop.csv")
+ trstop = pd.read_csv(trstop_infile)
+ trstop = trstop.rename(columns={"HwyNode":"NearNode"})
+ trstop = trstop.rename(columns=lambda x:x.strip())
+ trstop_out = trstop[trstop_atts]
+ trstop_outfile = os.path.join(export_path, "trstop.csv")
+ trstop_out.to_csv(trstop_outfile, index=False)
+
+ use_node_analysis_to_get_transit_transfers = False
+
+ copy_scenario = _m.Modeller().tool(
+ "inro.emme.data.scenario.copy_scenario")
+ create_attribute = _m.Modeller().tool(
+ "inro.emme.data.extra_attribute.create_extra_attribute")
+ net_calculator = _m.Modeller().tool(
+ "inro.emme.network_calculation.network_calculator")
+ copy_attribute= _m.Modeller().tool(
+ "inro.emme.data.network.copy_attribute")
+ delete_scenario = _m.Modeller().tool(
+ "inro.emme.data.scenario.delete_scenario")
+ transit_flow_atts = [
+ "MODE",
+ "ACCESSMODE",
+ "TOD",
+ "ROUTE",
+ "FROM_STOP",
+ "TO_STOP",
+ "CENTROID",
+ "FROMMP",
+ "TOMP",
+ "TRANSITFLOW",
+ "BASEIVTT",
+ "COST",
+ "VOC",
+ ]
+ transit_aggregate_flow_atts = [
+ "MODE",
+ "ACCESSMODE",
+ "TOD",
+ "LINK_ID",
+ "AB_TransitFlow",
+ "BA_TransitFlow",
+ "AB_NonTransit",
+ "BA_NonTransit",
+ "AB_TotalFlow",
+ "BA_TotalFlow",
+ "AB_Access_Walk_Flow",
+ "BA_Access_Walk_Flow",
+ "AB_Xfer_Walk_Flow",
+ "BA_Xfer_Walk_Flow",
+ "AB_Egress_Walk_Flow",
+ "BA_Egress_Walk_Flow"
+ ]
+ transit_onoff_atts = [
+ "MODE",
+ "ACCESSMODE",
+ "TOD",
+ "ROUTE",
+ "STOP",
+ "BOARDINGS",
+ "ALIGHTINGS",
+ "WALKACCESSON",
+ "DIRECTTRANSFERON",
+ "WALKTRANSFERON",
+ "DIRECTTRANSFEROFF",
+ "WALKTRANSFEROFF",
+ "EGRESSOFF"
+ ]
+
+ transit_flow_file = os.path.join(export_path, "transit_flow.csv")
+ fout_seg = open(transit_flow_file, 'w')
+ fout_seg.write(",".join(['"%s"' % x for x in transit_flow_atts]))
+ fout_seg.write("\n")
+
+ transit_aggregate_flow_file = os.path.join(export_path, "transit_aggflow.csv")
+ fout_link = open(transit_aggregate_flow_file, 'w')
+ fout_link.write(",".join(['"%s"' % x for x in transit_aggregate_flow_atts]))
+ fout_link.write("\n")
+
+ transit_onoff_file = os.path.join(export_path, "transit_onoff.csv")
+ fout_stop = open(transit_onoff_file, 'w')
+ fout_stop.write(",".join(['"%s"' % x for x in transit_onoff_atts]))
+ fout_stop.write("\n")
+ try:
+ access_modes = ["WLK", "PNR", "KNR"]
+ main_modes = ["BUS", "PREM","ALLPEN"]
+ all_modes = ["b", "c", "e", "l", "r", "p", "y", "o", "a", "w", "x"]
+ local_bus_modes = ["b", "a", "w", "x"]
+ premium_modes = ["c", "l", "e", "p", "r", "y", "o", "a", "w", "x"]
+ for tod, scen_id in period_scenario_ids.iteritems():
+ with _m.logbook_trace("Processing period %s" % tod):
+ scenario = transit_emmebank.scenario(scen_id)
+ # attributes
+ total_walk_flow = create_attribute("LINK", "@volax", "total walk flow on links",
+ 0, overwrite=True, scenario=scenario)
+ segment_flow = create_attribute("TRANSIT_SEGMENT", "@voltr", "transit segment flow",
+ 0, overwrite=True, scenario=scenario)
+ link_transit_flow = create_attribute("LINK", "@link_voltr", "total transit flow on link",
+ 0, overwrite=True, scenario=scenario)
+ initial_boardings = create_attribute("TRANSIT_SEGMENT",
+ "@init_boardings", "transit initial boardings",
+ 0, overwrite=True, scenario=scenario)
+ xfer_boardings = create_attribute("TRANSIT_SEGMENT",
+ "@xfer_boardings", "transit transfer boardings",
+ 0, overwrite=True, scenario=scenario)
+ total_boardings = create_attribute("TRANSIT_SEGMENT",
+ "@total_boardings", "transit total boardings",
+ 0, overwrite=True, scenario=scenario)
+ final_alightings = create_attribute("TRANSIT_SEGMENT",
+ "@final_alightings", "transit final alightings",
+ 0, overwrite=True, scenario=scenario)
+ xfer_alightings = create_attribute("TRANSIT_SEGMENT",
+ "@xfer_alightings", "transit transfer alightings",
+ 0, overwrite=True, scenario=scenario)
+ total_alightings = create_attribute("TRANSIT_SEGMENT",
+ "@total_alightings", "transit total alightings",
+ 0, overwrite=True, scenario=scenario)
+
+ access_walk_flow = create_attribute("LINK",
+ "@access_walk_flow", "access walks (orig to init board)",
+ 0, overwrite=True, scenario=scenario)
+ xfer_walk_flow = create_attribute("LINK",
+ "@xfer_walk_flow", "xfer walks (init board to final alight)",
+ 0, overwrite=True, scenario=scenario)
+ egress_walk_flow = create_attribute("LINK",
+ "@egress_walk_flow", "egress walks (final alight to dest)",
+ 0, overwrite=True, scenario=scenario)
+
+ for main_mode in main_modes:
+ mode = main_mode
+ if main_mode == "BUS":
+ mode_list = local_bus_modes
+ elif main_mode == "PREM":
+ mode_list = premium_modes
+ else:
+ mode_list = all_modes
+
+ for access_type in access_modes:
+ with _m.logbook_trace("Main mode %s access mode %s" % (main_mode, access_type)):
+ class_name = "%s_%s%s" % (tod, access_type, main_mode)
+ segment_results = {
+ "transit_volumes": segment_flow.id,
+ "initial_boardings": initial_boardings.id,
+ "total_boardings": total_boardings.id,
+ "final_alightings": final_alightings.id,
+ "total_alightings": total_alightings.id,
+ "transfer_boardings": xfer_boardings.id,
+ "transfer_alightings": xfer_alightings.id
+ }
+ link_results = {
+ "total_walk_flow": total_walk_flow.id,
+ "link_transit_flow": link_transit_flow.id,
+ "access_walk_flow": access_walk_flow.id,
+ "xfer_walk_flow": xfer_walk_flow.id,
+ "egress_walk_flow": egress_walk_flow.id
+ }
+
+ self.calc_additional_results(
+ scenario, class_name, num_processors,
+ total_walk_flow, segment_results, link_transit_flow,
+ access_walk_flow, xfer_walk_flow, egress_walk_flow)
+ attributes = {
+ "NODE": ["@network_adj", "@network_adj_src"],#, "initial_boardings", "final_alightings"],
+ "LINK": link_results.values() + ["@tcov_id", "length"],
+ "TRANSIT_LINE": ["@route_id"],
+ "TRANSIT_SEGMENT": segment_results.values() + [
+ "transit_time", "dwell_time", "@stop_id", "allow_boardings", "allow_alightings"],
+ }
+ network = self.get_partial_network(scenario, attributes)
+ self.collapse_network_adjustments(network, segment_results, link_results)
+ # ===============================================
+ # analysis for nodes with/without walk option
+ if use_node_analysis_to_get_transit_transfers:
+ stop_on, stop_off = self.transfer_analysis(scenario, class_name, num_processors)
+ else:
+ stop_on, stop_off = {}, {}
+ # ===============================================
+ transit_modes = [m for m in network.modes() if m.type in ("TRANSIT", "AUX_TRANSIT")]
+ links = [link for link in network.links()
+ if link["@tcov_id"] > 0 and (link.modes.union(transit_modes))]
+ links.sort(key=lambda l: l["@tcov_id"])
+ lines = [line for line in network.transit_lines() if line.mode.id in mode_list]
+ lines.sort(key=lambda l: l["@route_id"])
+
+ label = ",".join([mode, access_type, tod])
+ self.output_transit_flow(label, lines, segment_flow.id, fout_seg)
+ self.output_transit_aggregate_flow(
+ label, links, link_transit_flow.id, total_walk_flow.id, access_walk_flow.id,
+ xfer_walk_flow.id, egress_walk_flow.id, fout_link)
+ self.output_transit_onoff(
+ label, lines, total_boardings.id, total_alightings.id, initial_boardings.id,
+ xfer_boardings.id, xfer_alightings.id, final_alightings.id,
+ stop_on, stop_off, fout_stop)
+ finally:
+ fout_stop.close()
+ fout_link.close()
+ fout_seg.close()
+ return
+
+ @_m.logbook_trace("Export geometries")
+ def export_geometry(self, export_path, traffic_emmebank):
+ # --------------------------Export Transit Nework Geometory-----------------------------
+ # domain: NODE, LINK, TURN, TRANSIT_LINE, TRANSIT_VEHICLE, TRANSIT_SEGMENT
+ def export_as_csv(domain, attributes, scenario = None):
+ if scenario is None:
+ scenario = _m.Modeller().scenario
+ initial_scenario = _m.Modeller().scenario
+ #if initial_scenario.number != scenario.number:
+ #data_explorer.replace_primary_scenario(scenario)
+ # Create the network table
+ network_table = project.new_network_table(domain)
+ for k, a in enumerate(attributes):
+ column = _ws.Column()
+ column.name = column.expression = a
+ network_table.add_column(k, column)
+ # Extract data
+ data = network_table.get_data()
+ f = _np.vectorize(lambda x: x.text) # required to get the WKT representation of the geometry column
+ data_dict = {}
+ for a in data.attributes():
+ if isinstance(a, _dt.GeometryAttribute):
+ data_dict[a.name] = f(a.values)
+ else:
+ data_dict[a.name] = a.values
+ df = pd.DataFrame(data_dict)
+
+ network_table.close()
+ #if initial_scenario.number != scenario.number:
+ # data_explorer.replace_primary_scenario(initial_scenario)
+ return df
+
+ desktop = _m.Modeller().desktop
+ desktop.refresh_data()
+ data_explorer = desktop.data_explorer()
+ previous_active_database = data_explorer.active_database()
+ try:
+ desktop_traffic_database = data_explorer.add_database(traffic_emmebank.path)
+ desktop_traffic_database.open()
+ except Exception as error:
+ import traceback
+ print (traceback.format_exc())
+ project = desktop.project
+ scenario = _m.Modeller().emmebank.scenario(101)
+ data_explorer.replace_primary_scenario(scenario)
+ node_attributes = ['i','@tap_id']
+ link_attributes = ['i', 'j', '@tcov_id', 'modes']
+ transit_line_attributes = ['line', 'routeID']
+ transit_segment_attributes = ['line', 'i', 'j', 'loop_index','@tcov_id','@stop_id']
+ mode_talbe = ['mode', 'type']
+ network_table = project.new_network_table('MODE')
+ for k, a in enumerate(mode_talbe):
+ column = _ws.Column()
+ column.name = column.expression = a
+ network_table.add_column(k, column)
+ data = network_table.get_data()
+ data_dict = {}
+ for a in data.attributes():
+ data_dict[a.name] = a.values
+ df = pd.DataFrame(data_dict)
+ mode_list = df[df['type'].isin([2.0, 3.0])]['mode'].tolist()
+
+ df = export_as_csv('NODE', node_attributes, scenario)
+ df = df[['@tap_id', 'geometry']]
+ is_tap = df['@tap_id'] > 0
+ df = df[is_tap]
+ df.columns = ['tapID', 'geometry']
+ df.to_csv(os.path.join(export_path, 'transitTap.csv'), index=False)
+
+ df = export_as_csv('TRANSIT_LINE', transit_line_attributes)
+ df = df[['line', 'geometry']]
+ df.columns = ['Route_Name', 'geometry']
+ df['Route_Name'] = df['Route_Name'].astype(int)
+ df_routeFull = pd.read_csv(os.path.join(export_path, 'trrt.csv'))
+ result = pd.merge(df_routeFull, df, how='left', on=['Route_Name'])
+ result.to_csv(os.path.join(export_path, 'transitRoute.csv'), index=False)
+ os.remove(os.path.join(export_path, 'trrt.csv'))
+
+ df = export_as_csv('TRANSIT_SEGMENT', transit_segment_attributes, None)
+ df_seg = df[['@tcov_id', 'geometry']]
+ df_seg.columns = ['trcovID', 'geometry']
+ df_seg = df_seg.drop_duplicates()
+ #df_seg.to_csv(os.path.join(export_path, 'transitLink.csv'), index=False)
+ #df_stop = df[(df['@stop_id'] > 0) & (df['@tcov_id'] > 0)]
+ df_stop = df[(df['@stop_id'] > 0)]
+ df_stop = df_stop[['@stop_id', 'geometry']]
+ df_stop = df_stop.drop_duplicates()
+ df_stop.columns = ['Stop_ID', 'geometry']
+ temp=[]
+ for value in df_stop['geometry']:
+ value=value.split(',')
+ value[0]=value[0]+')'
+ value[0]=value[0].replace("LINESTRING", "POINT")
+ temp.append(value[0])
+ df_stop['geometry'] = temp
+ df_stopFull = pd.read_csv(os.path.join(export_path, 'trstop.csv'))
+ result = pd.merge(df_stopFull, df_stop, how='left', on=['Stop_ID'])
+ result.to_csv(os.path.join(export_path, 'transitStop.csv'), index=False)
+ os.remove(os.path.join(export_path, 'trstop.csv'))
+
+ df = export_as_csv('LINK', link_attributes, None)
+ df_link = df[['@tcov_id', 'geometry']]
+ df_link.columns = ['hwycov-id:1', 'geometry']
+ df_linkFull = pd.read_csv(os.path.join(export_path, 'hwy_tcad.csv'))
+ result = pd.merge(df_linkFull, df_link, how='left', on=['hwycov-id:1'])
+ result.to_csv(os.path.join(export_path, 'hwyTcad.csv'), index=False)
+ os.remove(os.path.join(export_path, 'hwy_tcad.csv'))
+ ##mode_list = ['Y','b','c','e','l','p','r','y','a','x','w']##
+ df_transit_link = df[df.modes.str.contains('|'.join(mode_list))]
+ df_transit_link = df_transit_link[['@tcov_id', 'geometry']]
+ df_transit_link.columns = ['trcovID', 'geometry']
+ df_transit_link = df_transit_link[df_transit_link['trcovID'] != 0]
+ df_transit_link['AB'] = df_transit_link['trcovID'].apply(lambda x: 1 if x > 0 else 0)
+ df_transit_link['trcovID'] = abs(df_transit_link['trcovID'])
+ df_transit_link = df_transit_link[['trcovID', 'AB', 'geometry']]
+ df_transit_link.to_csv(os.path.join(export_path, 'transitLink.csv'), index=False)
+ network_table.close()
+ try:
+ previous_active_database.open()
+ data_explorer.remove_database(desktop_traffic_database)
+ except:
+ pass
+
+ def get_partial_network(self, scenario, attributes):
+ domains = attributes.keys()
+ network = scenario.get_partial_network(domains, include_attributes=False)
+ for domain, attrs in attributes.iteritems():
+ if attrs:
+ values = scenario.get_attribute_values(domain, attrs)
+ network.set_attribute_values(domain, attrs, values)
+ return network
+
+ def output_transit_flow(self, label, lines, segment_flow, fout_seg):
+ # output segment data (transit_flow)
+ centroid = "0" # always 0
+ voc = "" # volume/capacity, not actually used,
+ for line in lines:
+ line_id = id_format(line["@route_id"])
+ ivtt = from_mp = to_mp = 0
+ segments = iter(line.segments(include_hidden=True))
+ seg = segments.next()
+ from_stop = id_format(seg["@stop_id"])
+ for next_seg in segments:
+ to_mp += seg.link.length
+ ivtt += seg.transit_time - next_seg.dwell_time
+ transit_flow = seg[segment_flow]
+ seg = next_seg
+ if not next_seg.allow_alightings:
+ continue
+ to_stop = id_format(next_seg["@stop_id"])
+ formatted_ivtt = format(ivtt)
+ fout_seg.write(",".join([
+ label, line_id, from_stop, to_stop, centroid, format(from_mp), format(to_mp),
+ format(transit_flow), formatted_ivtt, formatted_ivtt, voc]))
+ fout_seg.write("\n")
+ from_stop = to_stop
+ from_mp = to_mp
+ ivtt = 0
+
+ def output_transit_aggregate_flow(self, label, links,
+ link_transit_flow, total_walk_flow, access_walk_flow,
+ xfer_walk_flow, egress_walk_flow, fout_link):
+ # output link data (transit_aggregate_flow)
+ for link in links:
+ link_id = id_format(link["@tcov_id"])
+ ab_transit_flow = link[link_transit_flow]
+ ab_non_transit_flow = link[total_walk_flow]
+ ab_total_flow = ab_transit_flow + ab_non_transit_flow
+ ab_access_walk_flow = link[access_walk_flow]
+ ab_xfer_walk_flow = link[xfer_walk_flow]
+ ab_egress_walk_flow = link[egress_walk_flow]
+ if link.reverse_link:
+ ba_transit_flow = link.reverse_link[link_transit_flow]
+ ba_non_transit_flow = link.reverse_link[total_walk_flow]
+ ba_total_flow = ba_transit_flow + ba_non_transit_flow
+ ba_access_walk_flow = link.reverse_link[access_walk_flow]
+ ba_xfer_walk_flow = link.reverse_link[xfer_walk_flow]
+ ba_egress_walk_flow = link.reverse_link[egress_walk_flow]
+ else:
+ ba_transit_flow = 0.0
+ ba_non_transit_flow = 0.0
+ ba_total_flow = 0.0
+ ba_access_walk_flow = 0.0
+ ba_xfer_walk_flow = 0.0
+ ba_egress_walk_flow = 0.0
+
+ fout_link.write(",".join(
+ [label, link_id,
+ format(ab_transit_flow), format(ba_transit_flow),
+ format(ab_non_transit_flow), format(ba_non_transit_flow),
+ format(ab_total_flow), format(ba_total_flow),
+ format(ab_access_walk_flow), format(ba_access_walk_flow),
+ format(ab_xfer_walk_flow), format(ba_xfer_walk_flow),
+ format(ab_egress_walk_flow), format(ba_egress_walk_flow)]))
+ fout_link.write("\n")
+
+ def output_transit_onoff(self, label, lines,
+ total_boardings, total_alightings, initial_boardings,
+ xfer_boardings, xfer_alightings, final_alightings,
+ stop_on, stop_off, fout_stop):
+ # output stop data (transit_onoff)
+ for line in lines:
+ line_id = id_format(line["@route_id"])
+ for seg in line.segments(True):
+ if not (seg.allow_alightings or seg.allow_boardings):
+ continue
+ i_node = seg.i_node.id
+ boardings = seg[total_boardings]
+ alightings = seg[total_alightings]
+ walk_access_on = seg[initial_boardings]
+ direct_xfer_on = seg[xfer_boardings]
+ walk_xfer_on = 0.0
+ direct_xfer_off = seg[xfer_alightings]
+ walk_xfer_off = 0.0
+ if stop_on.has_key(i_node):
+ if stop_on[i_node].has_key(line.id):
+ if direct_xfer_on > 0:
+ walk_xfer_on = direct_xfer_on - stop_on[i_node][line.id]
+ direct_xfer_on = stop_on[i_node][line.id]
+ if stop_off.has_key(i_node):
+ if stop_off[i_node].has_key(line.id):
+ if direct_xfer_off > 0:
+ walk_xfer_off = direct_xfer_off - stop_off[i_node][line.id]
+ direct_xfer_off = stop_off[i_node][line.id]
+
+ egress_off = seg[final_alightings]
+ fout_stop.write(",".join([
+ label, line_id, id_format(seg["@stop_id"]),
+ format(boardings), format(alightings), format(walk_access_on),
+ format(direct_xfer_on), format(walk_xfer_on), format(direct_xfer_off),
+ format(walk_xfer_off), format(egress_off)]))
+ fout_stop.write("\n")
+
+ def collapse_network_adjustments(self, network, segment_results, link_results):
+ segment_alights = [v for k, v in segment_results.items() if "alightings" in k]
+ segment_boards = [v for k, v in segment_results.items() if "boardings" in k] + ["transit_boardings"]
+ segment_result_attrs = segment_alights + segment_boards
+ link_result_attrs = link_results.values() + ["aux_transit_volume"]
+ link_attrs = network.attributes("LINK")
+ link_modified_attrs = [
+ "length", "@trtime_link_ea", "@trtime_link_am", "@trtime_link_md",
+ "@trtime_link_pm", "@trtime_link_ev", link_results["link_transit_flow"]]
+ seg_attrs = network.attributes("TRANSIT_SEGMENT")
+ line_attrs = network.attributes("TRANSIT_LINE")
+
+ transit_modes = set([network.mode(m) for m in "blryepc"])
+ aux_modes = set([network.mode(m) for m in "wxa"])
+ xfer_mode = network.mode('x')
+
+ def copy_seg_attrs(src_seg, dst_seg):
+ for attr in segment_result_attrs:
+ dst_seg[attr] += src_seg[attr]
+ dst_seg["allow_alightings"] |= src_seg["allow_alightings"]
+ dst_seg["allow_boardings"] |= src_seg["allow_boardings"]
+
+ def get_xfer_link(node, timed_xfer_link, is_outgoing=True):
+ links = node.outgoing_links() if is_outgoing else node.incoming_links()
+ for link in links:
+ if xfer_mode in link.modes and link.length == timed_xfer_link.length:
+ return link
+ return None
+
+ lines_to_update = set([])
+ nodes_to_merge = []
+ nodes_to_delete = []
+
+ for node in network.regular_nodes():
+ if node["@network_adj"] == 1:
+ nodes_to_merge.append(node)
+ # copy boarding / alighting attributes for the segments to the original segment / stop
+ for seg in node.incoming_segments():
+ lines_to_update.add(seg.line)
+ copy_seg_attrs(seg, seg.line.segment(seg.number+2))
+ for seg in node.outgoing_segments():
+ lines_to_update.add(seg.line)
+ copy_seg_attrs(seg, seg.line.segment(seg.number+1))
+ elif node["@network_adj"] == 2:
+ nodes_to_delete.append(node)
+ # copy boarding / alighting attributes for the segments to the original segment / stop
+ for seg in node.outgoing_segments(True):
+ lines_to_update.add(seg.line)
+ if seg.j_node:
+ copy_seg_attrs(seg, seg.line.segment(seg.number+1))
+ else:
+ copy_seg_attrs(seg, seg.line.segment(seg.number-1))
+ elif node["@network_adj"] == 3:
+ orig_node = network.node(node["@network_adj_src"])
+ # Remove transfer walk links and copy data to source walk link
+ for link in node.outgoing_links():
+ if xfer_mode in link.modes and link.j_node["@network_adj"] == 3:
+ orig_xfer_link = get_xfer_link(orig_node, link)
+ for attr in link_result_attrs:
+ orig_xfer_link[attr] += link[attr]
+ network.delete_link(link.i_node, link.j_node)
+ # Sum link and segment results and merge links
+ mapping = network.merge_links_mapping(node)
+ for (link1, link2), attr_map in mapping['links'].iteritems():
+ for attr in link_modified_attrs:
+ attr_map[attr] = max(link1[attr], link2[attr])
+
+ for (seg1, seg2), attr_map in mapping['transit_segments'].iteritems():
+ if seg2.allow_alightings:
+ for attr in seg_attrs:
+ attr_map[attr] = seg1[attr]
+ else: # if it is a boarding stop or non-stop
+ for attr in seg_attrs:
+ attr_map[attr] = max(seg1[attr], seg2[attr])
+ attr_map["transit_time_func"] = min(seg1["transit_time_func"], seg2["transit_time_func"])
+ for attr in segment_boards:
+ attr_map[attr] = seg1[attr] + seg2[attr]
+ next_seg = seg2.line.segment(seg2.number+1)
+ for attr in segment_alights:
+ next_seg[attr] += seg2[attr]
+ network.merge_links(node, mapping)
+
+ # Backup transit lines with altered routes and remove from network
+ lines = []
+ for line in lines_to_update:
+ seg_data = {}
+ itinerary = []
+ for seg in line.segments(include_hidden=True):
+ if seg.i_node["@network_adj"] in [1,2] or (seg.j_node and seg.j_node["@network_adj"] == 1):
+ continue
+ # for circle line transfers, j_node is now None for new "hidden" segment
+ j_node = seg.j_node
+ if (seg.j_node and seg.j_node["@network_adj"] == 2):
+ j_node = None
+ seg_data[(seg.i_node, j_node, seg.loop_index)] = dict((k, seg[k]) for k in seg_attrs)
+ itinerary.append(seg.i_node.number)
+
+ lines.append({
+ "id": line.id,
+ "vehicle": line.vehicle,
+ "itinerary": itinerary,
+ "attributes": dict((k, line[k]) for k in line_attrs),
+ "seg_attributes": seg_data})
+ network.delete_transit_line(line)
+ # Remove duplicate network elements (undo network adjustments)
+ for node in nodes_to_delete:
+ for link in _chain(node.incoming_links(), node.outgoing_links()):
+ network.delete_link(link.i_node, link.j_node)
+ network.delete_node(node)
+ for node in nodes_to_merge:
+ mapping = network.merge_links_mapping(node)
+ for (link1, link2), attr_map in mapping["links"].iteritems():
+ if link2.j_node.is_centroid:
+ link1, link2 = link2, link1
+ for attr in link_attrs:
+ attr_map[attr] = link1[attr]
+ network.merge_links(node, mapping)
+ # Re-create transit lines on new itineraries
+ for line_data in lines:
+ new_line = network.create_transit_line(
+ line_data["id"], line_data["vehicle"], line_data["itinerary"])
+ for k, v in line_data["attributes"].iteritems():
+ new_line[k] = v
+ seg_data = line_data["seg_attributes"]
+ for seg in new_line.segments(include_hidden=True):
+ data = seg_data.get((seg.i_node, seg.j_node, seg.loop_index), {})
+ for k, v in data.iteritems():
+ seg[k] = v
+
+ def calc_additional_results(self, scenario, class_name, num_processors,
+ total_walk_flow, segment_results, link_transit_flow,
+ access_walk_flow, xfer_walk_flow, egress_walk_flow):
+ network_results = _m.Modeller().tool(
+ "inro.emme.transit_assignment.extended.network_results")
+ path_based_analysis = _m.Modeller().tool(
+ "inro.emme.transit_assignment.extended.path_based_analysis")
+ net_calculator = _m.Modeller().tool(
+ "inro.emme.network_calculation.network_calculator")
+
+ spec = {
+ "on_links": {
+ "aux_transit_volumes": total_walk_flow.id
+ },
+ "on_segments": segment_results,
+ "aggregated_from_segments": None,
+ "analyzed_demand": None,
+ "constraint": None,
+ "type": "EXTENDED_TRANSIT_NETWORK_RESULTS"
+ }
+ network_results(specification=spec, scenario=scenario,
+ class_name=class_name, num_processors=num_processors)
+ cal_spec = {
+ "result": "%s" % link_transit_flow.id,
+ "expression": "%s" % segment_results["transit_volumes"],
+ "aggregation": "+",
+ "selections": {
+ "link": "all",
+ "transit_line": "all"
+ },
+ "type": "NETWORK_CALCULATION"
+ }
+ net_calculator(cal_spec, scenario=scenario)
+
+ walk_flows = [("INITIAL_BOARDING_TO_FINAL_ALIGHTING", access_walk_flow.id),
+ ("INITIAL_BOARDING_TO_FINAL_ALIGHTING", xfer_walk_flow.id),
+ ("FINAL_ALIGHTING_TO_DESTINATION", egress_walk_flow.id)]
+ for portion_of_path, aux_transit_volumes in walk_flows:
+ spec = {
+ "portion_of_path": portion_of_path,
+ "trip_components": {
+ "in_vehicle": None,
+ "aux_transit": "length",
+ "initial_boarding": None,
+ "transfer_boarding": None,
+ "transfer_alighting": None,
+ "final_alighting": None
+ },
+ "path_operator": ".max.",
+ "path_selection_threshold": {
+ "lower": -1.0,
+ "upper": 999999.0
+ },
+ "path_to_od_aggregation": None,
+ "constraint": None,
+ "analyzed_demand": None,
+ "results_from_retained_paths": {
+ "paths_to_retain": "SELECTED",
+ "aux_transit_volumes": aux_transit_volumes
+ },
+ "path_to_od_statistics": None,
+ "path_details": None,
+ "type": "EXTENDED_TRANSIT_PATH_ANALYSIS"
+ }
+ path_based_analysis(
+ specification=spec, scenario=scenario,
+ class_name=class_name, num_processors=num_processors)
+
+ def transfer_analysis(self, scenario, net, class_name, num_processors):
+ create_attribute = _m.Modeller().tool(
+ "inro.emme.data.extra_attribute.create_extra_attribute")
+ transfers_at_stops = _m.Modeller().tool(
+ "inro.emme.transit_assignment.extended.apps.transfers_at_stops")
+
+ # find stop with/without walk transfer option
+ stop_walk_list = [] # stop (id) with walk option
+ stop_flag = "@stop_flag"
+ create_attribute("NODE", att, "1=stop without walk option, 2=otherwise",
+ 0, overwrite=True, scenario=scenario)
+ stop_nline = "@stop_nline"
+ create_attribute("NODE", stop_nline, "number of lines on the stop",
+ 0, overwrite=True, scenario=scenario)
+
+ for line in net.transit_lines():
+ for seg in line.segments(True):
+ node = seg.i_node
+ if seg.allow_alightings or seg.allow_boardings:
+ node[stop_nline] += 1
+ if node[stop_flag] > 0 : #node checked
+ continue
+ if seg.allow_alightings or seg.allow_boardings:
+ node[stop_flag] = 1
+ for ilink in node.incoming_links():
+ # skip connector
+ if ilink.i_node.is_centroid:
+ continue
+ for m in ilink.modes:
+ if m.type=="AUX_TRANSIT":
+ node[stop_flag] = 2
+ stop_walk_list.append(node.id)
+ break
+ if node[stop_flag]>=2:
+ break
+ if node[stop_flag]>=2:
+ continue
+ for olink in node.outgoing_links():
+ # skip connector
+ if olink.j_node.is_centroid:
+ continue
+ for m in olink.modes:
+ if m.type=="AUX_TRANSIT":
+ node[stop_flag] = 2
+ stop_walk_list.append(node.id)
+ break
+ if node[stop_flag]>=2:
+ break
+ #scenario.publish_network(net)
+ stop_off = {}
+ stop_on = {}
+ for stop in stop_walk_list:
+ stop_off[stop] = {}
+ stop_on[stop] = {}
+ selection = "i=%s" % stop
+ results = transfers_at_stops(
+ selection, scenario=scenario,
+ class_name=class_name, num_processors=num_processors)
+ for off_line in results:
+ stop_off[stop][off_line] = 0.0
+ for on_line in results[off_line]:
+ trip = float(results[off_line][on_line])
+ stop_off[stop][off_line] += trip
+ if not stop_on[stop].has_key(on_line):
+ stop_on[stop][on_line] = 0.0
+ stop_on[stop][on_line] += trip
+ return stop_off, stop_on
+
+ @_m.method(return_type=unicode)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
diff --git a/sandag_abm/src/main/emme/toolbox/export/export_for_commercial_vehicle.py b/sandag_abm/src/main/emme/toolbox/export/export_for_commercial_vehicle.py
new file mode 100644
index 0000000..fee626d
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/export/export_for_commercial_vehicle.py
@@ -0,0 +1,158 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// export/export_for_commercial_vehicle.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Exports the required skims in CSV format for the commercial vehicle model.
+#
+#
+# Inputs:
+# source:
+#
+# Files referenced:
+#
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ source_dir = os.path.join(main_directory, "input")
+ title = "Base 2012 scenario"
+ tool = modeller.tool("sandag.export.export_for_commercial_vehicle")
+"""
+
+
+TOOLBOX_ORDER = 51
+
+
+import inro.modeller as _m
+import numpy as _np
+import subprocess as _subprocess
+import tempfile as _tempfile
+import traceback as _traceback
+import os
+
+_join = os.path.join
+_dir = os.path.dirname
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class ExportForCommercialVehicleModel(_m.Tool(), gen_utils.Snapshot):
+
+ output_directory = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ project_dir = _dir(_m.Modeller().desktop.project.path)
+ self.output_directory = _join(_dir(project_dir), "output")
+ self.attributes = ["output_directory"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Export for commercial vehicle model"
+ pb.description = """
+ Exports the required skims in CSV format for the commercial vehicle model.
+ """
+ pb.branding_text = "- SANDAG - Export"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('output_directory', 'directory',
+ title='Select output directory')
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.output_directory, scenario)
+ run_msg = "Tool complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('Export skims for commercial vehicle model', save_arguments=True)
+ def __call__(self, output_directory, scenario):
+ emmebank = scenario.emmebank
+ modes = ['ldn', 'ldt', 'lhdn', 'lhdt', 'mhdn', 'mhdt', 'hhdn', 'hhdt']
+ classes = ['SOV_NT_H', 'SOV_TR_H', 'TRK_L', 'TRK_L', 'TRK_M', 'TRK_M', 'TRK_H', 'TRK_H']
+ # Mappings between COMMVEH modes and Emme classes
+ mode_class = dict(zip(modes, classes))
+ class_mode = dict(zip(classes, modes))
+
+ is_toll_mode = lambda m: m.endswith('t')
+ #periods = ['EA', 'AM', 'MD', 'PM', 'EV']
+ period = "MD"
+ skims = ['TIME', 'DIST', 'TOLLCOST']
+ DUCoef = [
+ [-0.313, -0.138, -0.01],
+ [-0.313, -0.492, -0.01],
+ [-0.302, -0.580, -0.02]
+ ]
+ # Mappings for DUCoef utility index
+ modes_util = {
+ 'ldn': 0,
+ 'ldt': 0,
+ 'lhdn': 1,
+ 'lhdt': 1,
+ 'mhdn': 1,
+ 'mhdt': 1,
+ 'hhdn': 2,
+ 'hhdt': 2
+ }
+
+ # Lookup relevant skims as numpy arrays
+ skim_mat = {}
+ for cls in classes:
+ for skim in skims:
+ name = '%s_%s_%s' % (period, cls, skim)
+ if name not in skim_mat:
+ skim_mat[name] = emmebank.matrix(name).get_numpy_data(scenario)
+
+ output_matrices = {
+ 'impldt_MD_Time.txt': skim_mat['MD_SOV_TR_H_TIME'],
+ 'impldt_MD_Dist.txt': skim_mat['MD_SOV_TR_H_DIST'],
+ }
+
+ # Calculate DU matrices in numpy
+ for mode in modes:
+ time = skim_mat['%s_%s_TIME' % (period, mode_class[mode])]
+ distance = skim_mat['%s_%s_DIST' % (period, mode_class[mode])]
+ # All classes now have a tollcost skim available
+ toll_cost = skim_mat['%s_%s_TOLLCOST' % (period, mode_class[mode])]
+ _np.fill_diagonal(toll_cost, 0)
+
+ coeffs = DUCoef[modes_util[mode]]
+ disutil_mat = coeffs[0] * time + coeffs[1] * distance + coeffs[2] * toll_cost
+ output_matrices['imp%s_%s_DU.txt' % (mode, period)] = disutil_mat
+
+ # Insert row number into first column of the array
+ # Note: assumes zone IDs are continuous
+ for key, array in output_matrices.iteritems():
+ output_matrices[key] = _np.insert(array, 0, range(1, array.shape[0]+1), axis=1)
+
+ # Output DU matrices to CSV
+ # Print first column as integer, subsequent columns as floats rounded to 6 decimals
+ fmt_spec = ['%i'] + ['%.6f'] * (disutil_mat.shape[0])
+ # Save to separate files
+ for name, array in output_matrices.iteritems():
+ _np.savetxt(_join(output_directory, name), array, fmt=fmt_spec, delimiter=',')
diff --git a/sandag_abm/src/main/emme/toolbox/export/export_for_transponder.py b/sandag_abm/src/main/emme/toolbox/export/export_for_transponder.py
new file mode 100644
index 0000000..f5736a3
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/export/export_for_transponder.py
@@ -0,0 +1,374 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2019-2020. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// export_for_transponder.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+
+
+TOOLBOX_ORDER = 57
+
+
+import inro.modeller as _m
+
+import numpy as _np
+import pandas as _pd
+import string as _string
+import traceback as _traceback
+import math
+import os
+_dir, _join = os.path.dirname, os.path.join
+
+from shapely.geometry import MultiLineString, Point, LineString
+from contextlib import contextmanager as _context
+from itertools import izip as _izip
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module('sandag.utilities.demand')
+
+
+class ExportForTransponder(_m.Tool(), gen_utils.Snapshot):
+
+ scenario = _m.Attribute(_m.InstanceType)
+ output_directory = _m.Attribute(unicode)
+ num_processors = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ project_dir = _dir(_m.Modeller().desktop.project.path)
+ modeller = _m.Modeller()
+ if modeller.emmebank.path == _join(project_dir, "Database", "emmebank"):
+ self.scenario = modeller.emmebank.scenario(102)
+ self.num_processors = "max-1"
+ self.output_directory = _join(_dir(project_dir), "output")
+ self.attributes = ["scenario", "output_directory", "num_processors"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Export for transponder ownership model"
+ pb.description = """
+
Calculates and exports the following results for each origin zone:
+
+
"MLDIST" - Managed lane distance - straight-line distance to the
+ nearest managed lane facility
+
"AVGTTS" - Average travel time savings - average travel time savings
+ across all possible destinations.
+
"PCTDETOUR" - Percent detour - The percent difference between the AM
+ transponder travel time and the AM non-transponder travel time
+ to sample zones when the general purpose lanes parallel to all toll
+ lanes using transponders are not available.
+
+ ."""
+ pb.branding_text = "- SANDAG - Export"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_scenario("scenario",
+ title="Representative scenario")
+ pb.add_select_file('output_directory', 'directory',
+ title='Select output directory')
+
+ dem_utils.add_select_processors("num_processors", pb, self)
+ return pb.render()
+
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ self(self.output_directory, self.num_processors, self.scenario)
+ run_msg = "Tool completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace("Export results for transponder ownership model", save_arguments=True)
+ def __call__(self, output_directory, num_processors, scenario):
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(
+ _join(_dir(output_directory), "conf", "sandag_abm.properties"))
+ input_directory = _join(_dir(output_directory), "input")
+ num_processors = dem_utils.parse_num_processors(num_processors)
+ network = scenario.get_network()
+ distances = self.ml_facility_dist(network)
+ savings = self.avg_travel_time_savings(scenario, input_directory, props, num_processors)
+ detour = self.percent_detour(scenario, network, props, num_processors)
+ self.export_results(output_directory, scenario, distances, savings, detour)
+
+ @_m.logbook_trace("Calculate distance to nearest managed lane facility")
+ def ml_facility_dist(self, network):
+ # DIST: Straight line distance to the nearest ML facility (nearest link with a ML Cost)
+ # managed lane is :
+ # HOV2+ only (carpool lane): "IFC" = 1 and "IHOV" = 2 and "ITOLLO" = 0 and "ITOLLA" = 0 and "ITOLLP" = 0
+ # HOV3+ only (carpool lane): "IFC" = 1 and "IHOV" = 3 and "ITOLLO" = 0 and "ITOLLA" = 0 and "ITOLLP" = 0
+ # HOV2+ & HOT (managed lane. HOV 2+ free. SOV pay toll): ): "IFC" = 1 and "IHOV" = 2 and "ITOLLO" > 0 and "ITOLLA" > 0 and "ITOLLP" > 0
+ # HOV2+ & HOT (managed lane. HOV 3+ free. HOV2 & SOV pay toll): ): "IFC" = 1 and "IHOV" = 3 and "ITOLLO" > 0 and "ITOLLA" > 0 and "ITOLLP" > 0
+ # Tollway (all vehicles tolled): "IFC" = 1 and "IHOV" = 4
+ #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
+ # NOTE: NOT ALL MANAGED LANE LINKS HAVE A TOLL COST,
+ # SOME COSTS ARE JUST SPECIFIED ON THE ENTRANCE / EXIT LINKS
+ #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
+
+ ml_link_coords = []
+ ml_links = []
+ for link in network.links():
+ if link["type"] == 1 and link["@lane_restriction"] in (2,3) and (
+ link["@toll_am"] + link["@toll_md"] + link["@toll_pm"]) > 0:
+ ml_link_coords.append(LineString(link.shape))
+ ml_links.append(link)
+ ml_link_collection = MultiLineString(ml_link_coords)
+ distances = []
+ for zone in network.centroids():
+ zone_point = Point((zone.x, zone.y))
+ distances.append(zone_point.distance(ml_link_collection) / 5280)
+
+ # distances is a Python list of the distance from each zone to nearest ML link
+ # in same order as centroids
+ return distances
+
+ @_m.logbook_trace("Calculate average travel time savings")
+ def avg_travel_time_savings(self, scenario, input_directory, props, num_processors):
+ # AVGTTS: The average travel savings of all households in each zone over all possible
+ # work destinations d.
+ # This average was calculated using an expected value with probabilities taken
+ # from a simplified destination
+ # choice model. The expected travel time savings of households in a zone z is
+ # SUM[d](NTTime[zd] - TRTime[zd]) * Employment[d] * exp(-0.01*NTTime[zd]) /
+ # SUM[d]Employmentd * exp(-0.01*NTTime[zd])
+ #
+ # NTTime[zd] = AM_NTTime[zd] + PM_NTTime[dz]
+ # TRTime[zd] = AM_TRTime[zd] + PM_TRTime[dz]
+
+ emmebank = scenario.emmebank
+ year = int(props['scenarioYear'])
+ mgra = _pd.read_csv(
+ _join(input_directory, 'mgra13_based_input%s.csv' % year))
+ taz = mgra[['taz', 'emp_total']].groupby('taz').sum()
+ taz.reset_index(inplace=True)
+ taz = dem_utils.add_missing_zones(taz, scenario)
+ taz.reset_index(inplace=True)
+
+ with setup_for_tt_savings_calc(emmebank):
+ employment_matrix = emmebank.matrix("mdemployment")
+ employment_matrix.set_numpy_data(taz["emp_total"].values, scenario.id)
+ matrix_calc = dem_utils.MatrixCalculator(scenario, num_processors)
+ matrix_calc.add("NTTime", "AM_SOV_NT_M_TIME + PM_SOV_NT_M_TIME'")
+ matrix_calc.add("TRTime", "AM_SOV_TR_M_TIME + PM_SOV_TR_M_TIME'")
+ matrix_calc.add("numerator", "((NTTime - TRTime).max.0) * employment * exp(-0.01 * NTTime)",
+ aggregation={"destinations": "+"})
+ matrix_calc.add("denominator", "employment * exp(-0.01 * NTTime)",
+ aggregation={"destinations": "+"})
+ matrix_calc.add("AVGTTS", "numerator / denominator")
+ matrix_calc.run()
+ avg_tts = emmebank.matrix("AVGTTS").get_numpy_data(scenario.id)
+ return avg_tts
+
+ @_m.logbook_trace("Calculate percent detour without managed lane facilities")
+ def percent_detour(self, scenario, network, props, num_processors):
+ # PCTDETOUR: The percent difference between the AM non-toll travel time
+ # to a sample downtown zone and the AM non-toll travel time to downtown
+ # when the general purpose lanes parallel to all toll lanes requiring
+ # transponders are not available. This variable
+ # is calculated as
+ # 100*(TimeWithoutFacility - NonTransponderTime) / NonTransponderTime
+
+ destinations = props["transponder.destinations"]
+
+ network.create_attribute("NODE", "@root")
+ network.create_attribute("NODE", "@leaf")
+
+ mode_id = get_available_mode_id(network)
+ new_mode = network.create_mode("AUX_AUTO", mode_id)
+ sov_non_toll_mode = network.mode("s")
+
+ # Find special managed links and potential parallel GP facilities
+ ml_link_coords = []
+ freeway_links = []
+ for link in network.links():
+ if link["@lane_restriction"] in [2, 3] and link["type"] == 1 and (
+ link["@toll_am"] + link["@toll_md"] + link["@toll_pm"]) > 0:
+ ml_link_coords.append(LineString(link.shape))
+ if sov_non_toll_mode in link.modes:
+ link.modes |= set([new_mode])
+ if link["type"] == 1:
+ freeway_links.append(link)
+
+ # remove mode from nearby GP links to special managed lanes
+ ml_link_collection = MultiLineString(ml_link_coords)
+ for link in freeway_links:
+ link_shape = LineString(link.shape)
+ distance = link_shape.distance(ml_link_collection)
+ if distance < 100:
+ for ml_shape in ml_link_collection:
+ if ml_shape.distance(link_shape) and close_bearing(link_shape, ml_shape):
+ link.modes -= set([new_mode])
+ break
+
+ for node in network.centroids():
+ node["@root"] = 1
+ for dst in destinations:
+ network.node(dst)["@leaf"] = 1
+
+ reverse_auto_network(network, "@auto_time")
+ detour_impedances = shortest_paths_impedances(
+ network, new_mode, "@auto_time", destinations)
+ direct_impedances = shortest_paths_impedances(
+ network, sov_non_toll_mode, "@auto_time", destinations)
+
+ percent_detour = (detour_impedances - direct_impedances) / direct_impedances
+ avg_percent_detour = _np.sum(percent_detour, axis=1) / len(destinations)
+ avg_percent_detour = _np.nan_to_num(avg_percent_detour)
+ return avg_percent_detour
+
+ @_m.logbook_trace("Export results to transponderModelAccessibilities.csv file")
+ def export_results(self, output_directory, scenario, distances, savings, detour):
+ zones = scenario.zone_numbers
+ output_file = _join(output_directory, "transponderModelAccessibilities.csv")
+ with open(output_file, 'w') as f:
+ f.write("TAZ,DIST,AVGTTS,PCTDETOUR\n")
+ for row in _izip(zones, distances, savings, detour):
+ f.write("%d, %.4f, %.5f, %.5f\n" % row)
+
+ @_m.method(return_type=unicode)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+
+def reverse_auto_network(network, link_cost):
+ # swap directionality of modes and specified link costs, as well as turn prohibitions
+ # delete all transit lines
+ for line in network.transit_lines():
+ network.delete_transit_line(line)
+
+ # backup modes so that turns can be swapped (auto mode remains avialable)
+ network.create_attribute("LINK", "backup_modes")
+ for link in network.links():
+ link.backup_modes = link.modes
+ # add new reverse links (where needed) and get the one-way links to be deleted
+ auto_mode = network.mode("d")
+ links_to_delete = []
+ for link in network.links():
+ reverse_link = network.link(link.j_node.id, link.i_node.id)
+ if reverse_link is None:
+ reverse_link = network.create_link(link.j_node.id, link.i_node.id, link.modes)
+ reverse_link.backup_modes = reverse_link.modes
+ links_to_delete.append(link)
+ reverse_link.modes |= link.modes
+
+ # reverse the turn data
+ visited = set([])
+ for turn in network.turns():
+ if turn in visited:
+ continue
+ reverse_turn = network.turn(turn.k_node, turn.j_node, turn.i_node)
+ time, reverse_time = turn["data1"], turn["data1"]
+ turn["data1"], turn["data1"] = time, reverse_time
+ tpf, reverse_tpf = turn.penalty_func, reverse_turn.penalty_func
+ reverse_turn.penalty_func, turn.penalty_func = tpf, reverse_tpf
+ visited.add(turn)
+ visited.add(reverse_turn)
+
+ # reverse the link data
+ visited = set([])
+ for link in network.links():
+ if link in visited:
+ continue
+ reverse_link = network.link(link.j_node.id, link.i_node.id)
+ time, reverse_time = link[link_cost], reverse_link[link_cost]
+ reverse_link[link_cost], link[link_cost] = time, reverse_time
+ reverse_link.modes, link.modes = link.backup_modes, reverse_link.backup_modes
+ visited.add(link)
+ visited.add(reverse_link)
+
+ # delete the one-way links
+ for link in links_to_delete:
+ network.delete_link(link.i_node, link.j_node)
+
+
+def shortest_paths_impedances(network, mode, link_cost, destinations):
+ excluded_links = []
+ for link in network.links():
+ if mode not in link.modes:
+ excluded_links.append(link)
+
+ impedances = []
+ for dest_id in destinations:
+ tree = network.shortest_path_tree(
+ dest_id, link_cost, excluded_links=excluded_links, consider_turns=True)
+ costs = []
+ for node in network.centroids():
+ if node.number == dest_id:
+ costs.append(0)
+ else:
+ try:
+ path_cost = tree.cost_to_node(node.id)
+ except KeyError:
+ path_cost = 600
+ costs.append(path_cost)
+ impedances.append(costs)
+ return _np.array(impedances).T
+
+
+@_context
+def setup_for_tt_savings_calc(emmebank):
+ with gen_utils.temp_matrices(emmebank, "FULL", 2) as mf:
+ mf[0].name = "NTTime"
+ mf[0].description = "Temp AM + PM' Auto non-transponder time"
+ mf[1].name = "TRTime"
+ mf[1].description = "Temp AM + PM' Auto transponder time"
+ with gen_utils.temp_matrices(emmebank, "ORIGIN", 3) as mo:
+ mo[0].name = "numerator"
+ mo[1].name = "denominator"
+ mo[2].name = "AVGTTS"
+ mo[2].description = "Temp average travel time savings"
+ with gen_utils.temp_matrices(emmebank, "DESTINATION", 1) as md:
+ md[0].name = "employment"
+ md[0].description = "Temp employment per zone"
+ yield
+
+@_context
+def get_temp_scenario(src_scenario):
+ delete_scenario = _m.Modeller().tool(
+ "inro.emme.data.scenario.delete_scenario")
+ emmebank = src_scenario.emmebank
+ scenario_id = get_available_scenario_id(emmebank)
+ temp_scenario = emmebank.copy_scenario(src_scenario, scenario_id)
+ try:
+ yield temp_scenario
+ finally:
+ delete_scenario(temp_scenario)
+
+def get_available_mode_id(network):
+ for mode_id in _string.letters:
+ if network.mode(mode_id) is None:
+ return mode_id
+
+def get_available_scenario_id(emmebank):
+ for i in range(1,10000):
+ if not emmebank.scenario(i):
+ return i
+
+def bearing(shape):
+ pt1 = shape.coords[0]
+ pt2 = shape.coords[-1]
+ x_diff = pt2[0] - pt1[0]
+ y_diff = pt2[1] - pt1[1]
+ return math.degrees(math.atan2(y_diff, x_diff))
+
+def close_bearing(shape1, shape2, tol=25):
+ b1 = bearing(shape1)
+ b2 = bearing(shape2)
+ diff = (b1 - b2) % 360
+ if diff >= 180:
+ diff -= 360
+ return abs(diff) < tol
diff --git a/sandag_abm/src/main/emme/toolbox/export/export_tap_adjacent_lines.py b/sandag_abm/src/main/emme/toolbox/export/export_tap_adjacent_lines.py
new file mode 100644
index 0000000..068481a
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/export/export_tap_adjacent_lines.py
@@ -0,0 +1,122 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// export/export_tap_adjacent_lines.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Exports a list of transit lines adjacent to each TAP.
+#
+#
+# Inputs:
+# file_path: export path for the tap adjacency file
+# scenario: scenario ID for the base scenario (same used in the Import network tool)
+#
+# Files created:
+# output/tapLines.csv (or as specified)
+#
+#
+# Script example:
+"""
+import inro.modeller as _m
+import os
+modeller = _m.Modeller()
+desktop = modeller.desktop
+
+export_tap_adjacent_lines = modeller.tool("sandag.export.export_tap_adjacent_lines")
+
+project_dir = os.path.dirname(desktop.project_path())
+main_directory = os.path.dirname(project_dir)
+output_dir = os.path.join(main_directory, "output")
+
+main_emmebank = os.path.join(project_dir, "Database", "emmebank")
+scenario_id = 100
+base_scenario = main_emmebank.scenario(scenario_id)
+
+export_tap_adjacent_lines(os.path.join(output_dir, "tapLines.csv"), base_scenario)
+
+"""
+
+
+TOOLBOX_ORDER = 75
+
+
+import inro.modeller as _m
+import traceback as _traceback
+import os
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class ExportLines(_m.Tool(), gen_utils.Snapshot):
+
+ file_path = _m.Attribute(unicode)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ main_dir = os.path.dirname(project_dir)
+ self.file_path = os.path.join(main_dir, "output", "tapLines.csv")
+ self.attributes = ["file_path"]
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Export TAP adjacent lines"
+ pb.description = """Exports a list of the transit lines adjacent to each tap."""
+ pb.branding_text = "- SANDAG - Export"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('file_path', 'save_file',title='Select file path')
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.file_path, scenario)
+ run_msg = "Tool completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace("Export list of TAP adjacent lines", save_arguments=True)
+ def __call__(self, file_path, scenario):
+ attributes = {"file_path": file_path}
+ gen_utils.log_snapshot("Export list of TAP adjacent lines", str(self), attributes)
+
+ network = scenario.get_partial_network(
+ ["NODE", "TRANSIT_LINE"], include_attributes=False)
+ values = scenario.get_attribute_values("NODE", ["@tap_id"])
+ network.set_attribute_values("NODE", ["@tap_id"], values)
+ with open(file_path, 'w') as f:
+ f.write("TAP,LINES\n")
+ for node in network.nodes():
+ if node["@tap_id"] == 0:
+ continue
+ lines = set([])
+ for link in node.outgoing_links():
+ for seg in link.j_node.outgoing_segments(include_hidden=True):
+ if seg.allow_alightings:
+ lines.add(seg.line)
+ if not lines:
+ continue
+ f.write("%d," % node["@tap_id"])
+ f.write(" ".join([l.id for l in lines]))
+ f.write("\n")
diff --git a/sandag_abm/src/main/emme/toolbox/export/export_traffic_skims.py b/sandag_abm/src/main/emme/toolbox/export/export_traffic_skims.py
new file mode 100644
index 0000000..247eb60
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/export/export_traffic_skims.py
@@ -0,0 +1,95 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// export/export_traffic_skims.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Exports the traffic skims for use in the disaggregate demand models (CT-RAMP)
+# and the data loader process.
+#
+# Note the matrix name mapping from the OMX file names to the Emme database names.
+#
+# Inputs:
+# omx_file: output directory to read the OMX files from
+# period: the period for which to export the skim matrices, "EA", "AM", "MD", "PM", "EV"
+# scenario: base traffic scenario to use for reference zone system
+#
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ output_dir = os.path.join(main_directory, "output")
+ scenario = modeller.scenario
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ export_traffic_skims = modeller.tool("sandag.import.export_traffic_skims")
+ for period in periods:
+ omx_file_path = os.path.join(output_dir, "traffic_skims_%s.omx" % period
+ export_traffic_skims(output_dir, period, scenario)
+"""
+
+TOOLBOX_ORDER = 71
+
+
+import inro.modeller as _m
+import traceback as _traceback
+import os
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class ExportSkims(_m.Tool(), gen_utils.Snapshot):
+
+ omx_file = _m.Attribute(unicode)
+ period = _m.Attribute(str)
+ tool_run_msg = ""
+
+ def __init__(self):
+ self.attributes = ["omx_file", "period"]
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Export traffic skims"
+ pb.description = """Export the skim matrices to OMX format for the selected period."""
+ pb.branding_text = "- SANDAG - Export"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+ pb.add_select_file('omx_file', 'save_file', title='Select OMX file')
+ options = [(x, x) for x in ["EA", "AM", "MD", "PM", "EV"]]
+ pb.add_select("period", keyvalues=options, title="Select corresponding period")
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.period, self.omx_file, scenario)
+ run_msg = "Tool completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace("Export traffic skims to OMX", save_arguments=True)
+ def __call__(self, period, omx_file, scenario):
+ attributes = {"omx_file": omx_file, "period": period}
+ gen_utils.log_snapshot("Export traffic skims to OMX", str(self), attributes)
+ init_matrices = _m.Modeller().tool("sandag.initialize.initialize_matrices")
+ matrices = init_matrices.get_matrix_names("traffic_skims", [period], scenario)
+ with gen_utils.ExportOMX(omx_file, scenario, omx_key="NAME") as exporter:
+ exporter.write_matrices(matrices)
diff --git a/sandag_abm/src/main/emme/toolbox/export/export_transit_skims.py b/sandag_abm/src/main/emme/toolbox/export/export_transit_skims.py
new file mode 100644
index 0000000..7c22691
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/export/export_transit_skims.py
@@ -0,0 +1,108 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// export/export_transit_skims.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Exports the transit skims for use in the disaggregate demand models (CT-RAMP)
+# and the data loader process.
+#
+# Note the matrix name mapping from the OMX file names to the Emme database names.
+#
+# Inputs:
+# omx_file: output directory to read the OMX files from
+# periods: list of periods, using the standard two-character abbreviation
+# big_to_zero: replace big values (>10E6) with zero
+# This is used in the final iteration skim (after the demand models are
+# complete) to filter large values from the OMX files which are not
+# compatible with the data loader process
+# scenario: transit scenario to use for reference zone system
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ output_dir = os.path.join(main_directory, "output")
+ scenario = modeller.scenario
+ export_transit_skims = modeller.tool("sandag.import.export_transit_skims")
+ omx_file_path = os.path.join(output_dir, "transit_skims.omx"
+ export_transit_skims(output_dir, period, scenario)
+"""
+
+
+TOOLBOX_ORDER = 72
+
+
+import inro.modeller as _m
+import traceback as _traceback
+import os
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class ExportSkims(_m.Tool(), gen_utils.Snapshot):
+ omx_file = _m.Attribute(unicode)
+ periods = _m.Attribute(unicode)
+ big_to_zero = _m.Attribute(bool)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ self.attributes = ["omx_file", "periods", "big_to_zero"]
+ self.periods = "EA, AM, MD, PM, EV"
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Export transit skim matrices"
+ pb.description = """Export the skim matrices to OMX format for all periods."""
+ pb.branding_text = "- SANDAG - Export"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+ pb.add_select_file('omx_file', 'save_file', title='Select OMX file')
+ pb.add_text_box('periods', title="Selected periods:")
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ periods = [x.strip() for x in self.periods.split(",")]
+ self(self.omx_file, periods, scenario, self.big_to_zero)
+ run_msg = "Tool completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace("Export transit skims to OMX", save_arguments=True)
+ def __call__(self, omx_file, periods, scenario, big_to_zero=False):
+ attributes = {"omx_file": omx_file, "periods": periods, "big_to_zero": big_to_zero}
+ gen_utils.log_snapshot("Export transit skims to OMX", str(self), attributes)
+ init_matrices = _m.Modeller().tool("sandag.initialize.initialize_matrices")
+ matrices = init_matrices.get_matrix_names(
+ "transit_skims", periods, scenario)
+ with gen_utils.ExportOMX(omx_file, scenario, omx_key="NAME") as exporter:
+ if big_to_zero:
+ emmebank = scenario.emmebank
+ for name in matrices:
+ matrix = emmebank.matrix(name)
+ array = matrix.get_numpy_data(scenario)
+ array[array>10E6] = 0
+ exporter.write_array(array, exporter.generate_key(matrix))
+ else:
+ exporter.write_matrices(matrices)
diff --git a/sandag_abm/src/main/emme/toolbox/import/adjust_network_links.py b/sandag_abm/src/main/emme/toolbox/import/adjust_network_links.py
new file mode 100644
index 0000000..7630a5d
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/import/adjust_network_links.py
@@ -0,0 +1,156 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// import/adjust_network_links.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# deletes the existing centroid connectors and create new centroid connectors
+# connecting the centroid of aggregated zones to the original end points (on network)
+# of old centroid connectors
+#
+# Inputs:
+# source: path to the location of the input network files
+# base_scenario: scenario that has highway network only
+# emmebank: the Emme database in which to the new network is published
+# external_zone: string "1-12" that refernces to range of external zones
+# taz_cwk_file: input csv file created after zone aggregation. It has the crosswalk between existing TAZ to new zone structure
+# cluster_zone_file: input csv file created after zone aggregation. It has the centroid coordinates of the new zone structure
+#
+#
+
+import inro.modeller as _m
+import os
+
+import pandas as pd
+from scipy.spatial import distance
+
+
+def adjust_network_links(source, base_scenario, emmebank, external_zone, taz_cwk_file, cluster_zone_file)
+
+ taz_cwk = pd.read_csv(os.path.join(source, taz_cwk_file), index_col = 0)
+ taz_cwk = taz_cwk['cluster_id'].to_dict()
+
+ emmebank = _m.Modeller().emmebank
+ scenario = emmebank.scenario(base_scenario)
+ hwy_network = scenario.get_network()
+
+ centroid_nodes = []
+ exclude_nodes = []
+
+
+ ext_zones = [int(s) for s in external_zone.split() if s.isdigit()]
+
+ for node in range(ext_zones[0],ext_zones[1],1):
+ exclude_nodes.append(hwy_network.node(node))
+
+ for node in hwy_network.centroids():
+ if not node in exclude_nodes:
+ centroid_nodes.append(node)
+
+ i_nodes = []
+ j_nodes = []
+ data1 = []
+ length = []
+ links = []
+
+ for link in hwy_network.links():
+ if link.i_node in centroid_nodes:
+ links.append(link)
+ i_nodes.append(int(link.i_node))
+ j_nodes.append(int(link.j_node))
+ data1.append(link.data1)
+ length.append(link.length)
+
+ df = pd.DataFrame({'links' : links, 'i_nodes' : i_nodes, 'j_nodes': j_nodes, 'ul1_org': data1, 'length_org':length})
+ df['i_nodes_new'] = df['i_nodes'].map(taz_cwk)
+
+ #get XY of existing centroids
+ j_nodes_list = df['j_nodes'].unique()
+ j_nodes_list = [hwy_network.node(x) for x in j_nodes_list]
+
+ j_nodes = []
+ j_x = []
+ j_y = []
+ for nodes in hwy_network.nodes():
+ if nodes in j_nodes_list:
+ j_nodes.append(nodes)
+ j_x.append(nodes.x)
+ j_y.append(nodes.y)
+
+ j_nodes_XY = pd.DataFrame({'j_nodes' : j_nodes, 'j_x' : j_x, 'j_y': j_y})
+ j_nodes_XY['j_nodes'] = [int(x) for x in j_nodes_XY['j_nodes']]
+ df = pd.merge(df, j_nodes_XY, on = 'j_nodes', how = 'left')
+
+ agg_node_coords = pd.read_csv(os.path.join(source, cluster_zone_file))
+ df = pd.merge(df, agg_node_coords, left_on = 'i_nodes_new', right_on = 'cluster_id', how = 'left')
+ df = df.drop(columns = 'cluster_id')
+ df = df.rename(columns = {'centroid_x' : 'i_new_x', 'centroid_y' : 'i_new_y'})
+
+ i_coords = zip(df['j_x'], df['j_y'])
+ j_coords = zip(df['i_new_x'], df['i_new_y'])
+
+ df['length'] = [distance.euclidean(i, j)/5280.0 for i,j in zip(i_coords, j_coords)]
+
+ #delete all the existing centroid nodes
+ for index,row in df.iterrows():
+ if hwy_network.node(row['i_nodes']):
+ hwy_network.delete_node(row['i_nodes'], True)
+
+ # create new nodes (centroids of clusters)
+ for index,row in agg_node_coords.iterrows():
+ new_node = hwy_network.create_node(row['cluster_id'], is_centroid = True)
+ new_node.x = int(row['centroid_x'])
+ new_node.y = int(row['centroid_y'])
+
+ df['type'] = 10
+ df['num_lanes'] = 1
+ df['vdf'] = 11
+ df['ul3'] = 999999
+
+ final_df = df[["i_nodes_new", "j_nodes", "length", "type", "num_lanes", "vdf", "ul3"]]
+ final_df = final_df.drop_duplicates()
+ final_df = final_df.reset_index(drop=True)
+ final_df['type'] = final_df['type'].astype("int")
+
+ # create new links
+ for index,row in final_df.iterrows():
+
+ link_ij = hwy_network.create_link(row['i_nodes_new'], row['j_nodes'],
+ modes = ["d", "h", "H", "i","I","s", "S", "v", "V", "m", "M", "t", "T"])
+ link_ij.length = row['length']
+ link_ij.type = row['type'].astype("int")
+ link_ij.num_lanes = row['num_lanes'].astype("int")
+ link_ij.volume_delay_func = row['vdf'].astype("int")
+ link_ij.data3 = row['ul3'].astype("int")
+ link_ij['@lane_ea'] = 1 # had to do this as they are being replaced in highway assignment by the values in these columns
+ link_ij['@lane_am'] = 1
+ link_ij['@lane_md'] = 1
+ link_ij['@lane_pm'] = 1
+ link_ij['@lane_ev'] = 1
+
+
+ link_ji = hwy_network.create_link(row['j_nodes'], row['i_nodes_new'],
+ modes = ["d", "h", "H", "i","I","s", "S", "v", "V", "m", "M", "t", "T"])
+ link_ji.length = row['length']
+ link_ji.type = row['type'].astype("int")
+ link_ji.num_lanes = row['num_lanes'].astype("int")
+ link_ji.volume_delay_func = row['vdf'].astype("int")
+ link_ji.data3 = row['ul3'].astype("int")
+ link_ji['@lane_ea'] = 1 # had to do this as they are being replaced in highway assignment by the values in these columns
+ link_ji['@lane_am'] = 1
+ link_ji['@lane_md'] = 1
+ link_ji['@lane_pm'] = 1
+ link_ji['@lane_ev'] = 1
+
+ return(hwy_network)
+ #publish the highway network to the scenario
+ #scenario.publish_network(hwy_network)
\ No newline at end of file
diff --git a/sandag_abm/src/main/emme/toolbox/import/import_auto_demand.py b/sandag_abm/src/main/emme/toolbox/import/import_auto_demand.py
new file mode 100644
index 0000000..375a27c
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/import/import_auto_demand.py
@@ -0,0 +1,516 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// import/import_auto_demand.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Imports the auto demand matrices generated from an iteration of the disaggregate
+# demand models (CT-RAMP) and adds the saved disaggregated demand matrices to
+# generate the total auto demand in preparation for the auto assignment.
+#
+# Note the matrix name mapping from the OMX file names to the Emme database names.
+#
+# Inputs:
+# external_zones: set of external zone IDs as a range "1-12"
+# output_dir: output directory to read the OMX files from
+# num_processors: number of processors to use in the matrix calculations
+# scenario: traffic scenario to use for reference zone system
+#
+# Files referenced:
+# Note: pp is time period, one of EA, AM, MD, PM, EV, vot is one of low, med, high
+# output/autoInternalExternalTrips_pp_vot.omx
+# output/autoVisitorTrips_pp_vot.omx
+# output/autoCrossBorderTrips_pp_vot.omx
+# output/autoAirportTrips.SAN_pp_vot.omx
+# output/autoAirportTrips.CDX_pp_vot.omx (if they exist)
+# output/autoTrips_pp_vot.omx
+# output/othrTrips_pp.omx (added to high vot)
+# output/TripMatrices.csv
+# output/EmptyAVTrips.omx (added to high vot)
+# output/TNCVehicleTrips_pp.omx (added to high vot)
+#
+# Matrix inputs:
+# pp_SOVGP_EIWORK, pp_SOVGP_EINONWORK, pp_SOVTOLL_EIWORK, pp_SOVTOLL_EINONWORK,
+# pp_HOV2HOV_EIWORK, pp_HOV2HOV_EINONWORK, pp_HOV2TOLL_EIWORK, pp_HOV2TOLL_EINONWORK,
+# pp_HOV3HOV_EIWORK, pp_HOV3HOV_EINONWORK, pp_HOV3TOLL_EIWORK, pp_HOV3TOLL_EINONWORK
+# pp_SOV_EETRIPS, pp_HOV2_EETRIPS, pp_HOV3_EETRIPS
+#
+# Matrix results:
+# Note: pp is time period, one of EA, AM, MD, PM, EV, v is one of L, M, H
+# pp_SOV_TR_v, pp_SOV_NT_v, pp_HOV2_v, pp_HOV3_v, pp_HOV3_v
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ output_dir = os.path.join(main_directory, "output")
+ external_zones = "1-12"
+ num_processors = "MAX-1"
+ base_scenario = modeller.scenario
+ import_auto_demand = modeller.tool("sandag.import.import_auto_demand")
+ import_auto_demand(external_zones, output_dir, num_processors, base_scenario)
+"""
+
+TOOLBOX_ORDER = 13
+
+
+import inro.modeller as _m
+import traceback as _traceback
+import pandas as _pandas
+import os
+import numpy
+from contextlib import contextmanager as _context
+
+_join = os.path.join
+
+dem_utils = _m.Modeller().module('sandag.utilities.demand')
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class ImportMatrices(_m.Tool(), gen_utils.Snapshot):
+
+ external_zones = _m.Attribute(str)
+ output_dir = _m.Attribute(unicode)
+ num_processors = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ self.external_zones = "1-12"
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ main_dir = os.path.dirname(project_dir)
+ self.main_dir = main_dir
+ self.output_dir = os.path.join(main_dir, "output")
+ self.num_processors = "MAX-1"
+ self.attributes = ["external_zones", "output_dir", "num_processors"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Import auto demand and sum matrices"
+ pb.description = """
+
+ Imports the trip matrices generated by CT-RAMP in OMX format,
+ the commercial vehicle demand in CSV format,
+ and adds the demand from the aggregate models for the final
+ trip assignments.
+ A total of 90 OMX files are expected, for 5 time periods
+ EA, AM, MD, PM and EV, and value-of-time level low, med or high,
+ with internal matrices by SOV, HOV2, HOV3+ and toll access type:
+
+
autoInternalExternalTrips_pp_vot.omx
+
autoVisitorTrips_pp_vot.omx
+
autoCrossBorderTrips_pp_vot.omx
+
autoAirportTrips.SAN_pp_vot.omx
+
autoAirportTrips.CDX_pp_vot.omx (optional)
+
autoTrips_pp_vot.omx
+
othrTrips_pp.omx (added to high vot)
+
EmptyAVTrips.omx (added to high vot)
+
TNCVehicleTrips_pp.omx (added to high vot)
+
+ As well as one CSV file "TripMatrices.csv" for the commercial vehicle trips.
+ Adds the aggregate demand from the
+ external-external and external-internal demand matrices:
+
" % item["title"]]
+ if "header" in item:
+ table_msg.append("
")
+ for label in item["header"]:
+ table_msg.append("
%s
" % label)
+ table_msg.append("
")
+ for row in item["content"]:
+ table_msg.append("
")
+ for cell in row:
+ table_msg.append("
%s
" % cell)
+ table_msg.append("
")
+ table_msg.append("
")
+ report.add_html("".join(table_msg))
+
+ except Exception as error:
+ # no raise during report to avoid masking real error
+ report.add_html("Error generating report")
+ report.add_html(unicode(error))
+ report.add_html(_traceback.format_exc())
+
+ _m.logbook_write("Import network report", report.render())
+
+
+def get_node(network, number, coordinates, is_centroid):
+ node = network.node(number)
+ if not node:
+ node = network.create_node(number, is_centroid)
+ node.x, node.y = coordinates
+ return node
+
+
+# shortest path interpolation
+def find_path(orig_link, dest_link, mode):
+ visited = set([])
+ visited_add = visited.add
+ back_links = {}
+ heap = []
+
+ for link in orig_link.j_node.outgoing_links():
+ if mode in link.modes:
+ back_links[link] = None
+ _heapq.heappush(heap, (link["length"], link))
+
+ link_found = False
+ try:
+ while not link_found:
+ link_cost, link = _heapq.heappop(heap)
+ if link in visited:
+ continue
+ visited_add(link)
+ for outgoing in link.j_node.outgoing_links():
+ if mode not in outgoing.modes:
+ continue
+ if outgoing in visited:
+ continue
+ back_links[outgoing] = link
+ if outgoing == dest_link:
+ link_found = True
+ break
+ outgoing_cost = link_cost + link["length"]
+ _heapq.heappush(heap, (outgoing_cost, outgoing))
+ except IndexError:
+ pass # IndexError if heap is empty
+ if not link_found:
+ raise NoPathException(
+ "no path found between links with trcov_id %s and %s (Emme IDs %s and %s)" % (
+ orig_link["@tcov_id"], dest_link["@tcov_id"], orig_link, dest_link))
+
+ prev_link = back_links[dest_link]
+ route = []
+ while prev_link:
+ route.append(prev_link)
+ prev_link = back_links[prev_link]
+ return list(reversed(route))
+
+
+class NoPathException(Exception):
+ pass
+
+
+def revised_headway(headway):
+ # CALCULATE REVISED HEADWAY
+ # new headway calculation is less aggressive; also only being used for initial wait
+ # It uses a negative exponential formula to calculate headway
+ #
+ if headway <= 10:
+ rev_headway = headway
+ else:
+ rev_headway = headway * (0.275 + 0.788 * _np.exp(-0.011*headway))
+ return rev_headway
+
+
+def interchange_distance(orig_link, direction):
+ visited = set([])
+ visited_add = visited.add
+ back_links = {}
+ heap = []
+ if direction == "DOWNSTREAM":
+ get_links = lambda l: l.j_node.outgoing_links()
+ check_far_node = lambda l: l.j_node.is_interchange
+ elif direction == "UPSTREAM":
+ get_links = lambda l: l.i_node.incoming_links()
+ check_far_node = lambda l: l.i_node.is_interchange
+ # Shortest path search for nearest interchange node along freeway
+ for link in get_links(orig_link):
+ _heapq.heappush(heap, (link["length"], link))
+ interchange_found = False
+ try:
+ while not interchange_found:
+ link_cost, link = _heapq.heappop(heap)
+ if link in visited:
+ continue
+ visited_add(link)
+ if check_far_node(link):
+ interchange_found = True
+ break
+ for next_link in get_links(link):
+ if next_link in visited:
+ continue
+ next_cost = link_cost + link["length"]
+ _heapq.heappush(heap, (next_cost, next_link))
+ except IndexError:
+ # IndexError if heap is empty
+ # case where start / end of highway, dist = 99
+ return 99
+ return orig_link["length"] / 2.0 + link_cost
diff --git a/sandag_abm/src/main/emme/toolbox/import/import_seed_demand.py b/sandag_abm/src/main/emme/toolbox/import/import_seed_demand.py
new file mode 100644
index 0000000..2b87f35
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/import/import_seed_demand.py
@@ -0,0 +1,193 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// import/import_seed_demand.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Imports the warm start demand matrices from specified OMX files for auto, truck and transit.
+#
+# Note the matrix name mapping from the OMX file names to the Emme database names.
+#
+# Inputs:
+# omx_file: source
+# demand_type: The type of demand in the provided OMX file, one of "AUTO", "TRUCK", "TRANSIT".
+# Used to determine the matrix mapping for the import.
+# period: The period for which to import the matrices, one of "EA", "AM", "MD", "PM", "EV"
+# scenario: traffic scenario to use for reference zone system
+# convert_truck_to_pce: boolean, if True the result matrices are adjusted to PCEs instead of
+# vehicles (default, and required for traffic assignment). Only used if the demand_type is TRUCK.
+#
+# Matrix results:
+# Note: pp is time period, one of EA, AM, MD, PM, EV
+# For AUTO:
+# pp_SOVNTP, pp_SOVTB, pp_HOV2, pp_HOV3
+# For TRUCK:
+# pp_TRKH, pp_TRKL, pp_TRKM
+# For TRANSIT:
+# pp_WLKBUS, pp_WLKLRT, pp_WLKCMR, pp_WLKEXP, pp_WLKBRT,
+# pp_PNRBUS, pp_PNRLRT, pp_PNRCMR, pp_PNREXP, pp_PNRBRT,
+# pp_KNRBUS, pp_KNRLRT, pp_KNRCMR, pp_KNREXP, pp_KNRBRT
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ period = "AM"
+ input_omx_file = os.path.join(main_directory, "input", "trip_%s.omx" % period)
+ demand_type = "TRUCK"
+ demand_as_pce = True
+ base_scenario = modeller.scenario
+ import_seed_demand = modeller.tool("sandag.import.import_seed_demand")
+ import_seed_demand(input_omx_file, demand_type, period, demand_as_pce, base_scenario)
+"""
+
+
+TOOLBOX_ORDER = 12
+
+
+import inro.modeller as _m
+import inro.emme.matrix as _matrix
+import traceback as _traceback
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+_omx = _m.Modeller().module("sandag.utilities.omxwrapper")
+
+
+class ImportMatrices(_m.Tool(), gen_utils.Snapshot):
+
+ omx_file = _m.Attribute(unicode)
+ demand_type = _m.Attribute(str)
+ period = _m.Attribute(str)
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ self.attributes = ["omx_file", "demand_type", "period"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Import demand matrices"
+ pb.description = """Imports the seed demand matrices."""
+ pb.branding_text = "- SANDAG - Import"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('omx_file', 'file',
+ title='Select input OMX file')
+ options = [(x, x) for x in ["AUTO", "TRUCK", "TRANSIT"]]
+ pb.add_select("demand_type", keyvalues=options, title="Select corresponding demand type")
+ options = [(x, x) for x in ["EA", "AM", "MD", "PM", "EV"]]
+ pb.add_select("period", keyvalues=options, title="Select corresponding period")
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.omx_file, self.demand_type, self.period, scenario)
+ run_msg = "Tool completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ def __call__(self, omx_file, demand_type, period, scenario):
+ attributes = {
+ "omx_file": omx_file,
+ "demand_type": demand_type,
+ "period": period,
+ "scenario": scenario.id,
+ "self": str(self)
+ }
+ with _m.logbook_trace("Import %s matrices for period %s" % (demand_type, period), attributes=attributes):
+ gen_utils.log_snapshot("Import matrices", str(self), attributes)
+ demand_types = ["AUTO", "TRUCK", "TRANSIT"]
+ if demand_type not in demand_types:
+ raise Exception("Invalid demand_type, must be one of %s" % demand_types)
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ if period not in periods:
+ raise Exception("Invalid period, must be one of %s" % periods)
+
+ if demand_type == "AUTO":
+ # TODO: update for new seed matrices
+ matrices = {
+ '%s_SOV_NT_L': 'mf"%s_SOV_NT_L"',
+ '%s_SOV_TR_L': 'mf"%s_SOV_TR_L"',
+ '%s_HOV2_L': 'mf"%s_HOV2_L"',
+ '%s_HOV3_L': 'mf"%s_HOV3_L"',
+ '%s_SOV_NT_M': 'mf"%s_SOV_NT_M"',
+ '%s_SOV_TR_M': 'mf"%s_SOV_TR_M"',
+ '%s_HOV2_M': 'mf"%s_HOV2_M"',
+ '%s_HOV3_M': 'mf"%s_HOV3_M"',
+ '%s_SOV_NT_H': 'mf"%s_SOV_NT_H"',
+ '%s_SOV_TR_H': 'mf"%s_SOV_TR_H"',
+ '%s_HOV2_H': 'mf"%s_HOV2_H"',
+ '%s_HOV3_H': 'mf"%s_HOV3_H"'}
+ matrices = dict((k % period, v % period) for k, v in matrices.iteritems())
+ self._import_from_omx(omx_file, matrices, scenario)
+
+ if demand_type == "TRUCK":
+ # TODO: update for new seed matrices
+ matrices = {
+ '%s_TRK_H': 'mf"%s_TRK_H"',
+ '%s_TRK_L': 'mf"%s_TRK_L"',
+ '%s_TRK_M': 'mf"%s_TRK_M"'}
+ matrices = dict((k % period, v % period) for k, v in matrices.iteritems())
+ self._import_from_omx(omx_file, matrices, scenario)
+
+ if demand_type == "TRANSIT":
+ matrices = {
+ 'SET1': 'mf"%s_WLKBUS"',
+ 'SET2': 'mf"%s_WLKPREM"',
+ 'SET3': 'mf"%s_WLKALLPEN"',}
+ matrices = dict((k, v % period) for k, v in matrices.iteritems())
+ # special custom mapping from subset of TAPs to all TAPs
+ self._import_from_omx(omx_file, matrices, scenario)
+
+ def _import_from_omx(self, file_path, matrices, scenario):
+ matrices_to_write = {}
+ emme_zones = scenario.zone_numbers
+ emmebank = scenario.emmebank
+ omx_file_obj = _omx.open_file(file_path, 'r')
+ try:
+ zone_mapping = omx_file_obj.mapping(omx_file_obj.list_mappings()[0]).items()
+ zone_mapping.sort(key=lambda x: x[1])
+ omx_zones = [x[0] for x in zone_mapping]
+ for omx_name, emme_name in matrices.iteritems():
+ omx_data = omx_file_obj[omx_name].read()
+ if emme_name not in matrices_to_write:
+ matrices_to_write[emme_name] = omx_data
+ else:
+ # Allow multiple src matrices from OMX to sum to same matrix in Emme
+ matrices_to_write[emme_name] = omx_data + matrices_to_write[emme_name]
+ except Exception as error:
+ import traceback
+ print (traceback.format_exc())
+ omx_file_obj.close()
+
+ if omx_zones != emme_zones:
+ # special custom mapping from subset of TAPs to all TAPs
+ for emme_name, omx_data in matrices_to_write.iteritems():
+ matrix_data = _matrix.MatrixData(type='f', indices=[omx_zones, omx_zones])
+ matrix_data.from_numpy(omx_data)
+ expanded_matrix_data = matrix_data.expand([emme_zones, emme_zones])
+ matrix = emmebank.matrix(emme_name)
+ matrix.set_data(expanded_matrix_data, scenario)
+ else:
+ for emme_name, omx_data in matrices_to_write.iteritems():
+ matrix = emmebank.matrix(emme_name)
+ matrix.set_numpy_data(omx_data, scenario)
diff --git a/sandag_abm/src/main/emme/toolbox/import/import_transit_demand.py b/sandag_abm/src/main/emme/toolbox/import/import_transit_demand.py
new file mode 100644
index 0000000..827baff
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/import/import_transit_demand.py
@@ -0,0 +1,230 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// import/import_transit_demand.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Imports the transit demand generated from an iteration of the disaggregate
+# demand models (CT-RAMP) in preparation for the transit assignment
+#
+# Note the matrix name mapping from the OMX file names to the Emme database names.
+#
+# Inputs:
+# output_dir: output directory to read the OMX files from
+# scenario: transit scenario to use for reference zone system
+#
+# Files referenced:
+# Note: pp is time period, one of EA, AM, MD, PM, EV
+# output/tranTrips_pp.omx
+# output/tranCrossBorderTrips_pp.omx
+# output/tranAirportTrips.SAN_pp.omx
+# output/tranAirportTrips.CBX_pp.omx (optional)
+# output/tranVisitorTrips_pp.omx
+# output/tranInternalExternalTrips_pp.omx
+#
+# Matrix results:
+# Note: pp is time period, one of EA, AM, MD, PM, EV
+# pp_WLKBUS, pp_WLKLRT, pp_WLKCMR, pp_WLKEXP, pp_WLKBRT,
+# pp_PNRBUS, pp_PNRLRT, pp_PNRCMR, pp_PNREXP, pp_PNRBRT,
+# pp_KNRBUS, pp_KNRLRT, pp_KNRCMR, pp_KNREXP, pp_KNRBRT
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ output_dir = os.path.join(main_directory, "output")
+ scenario = modeller.scenario
+ import_transit_demand = modeller.tool("sandag.import.import_transit_demand")
+ import_transit_demand(output_dir, scenario)
+"""
+
+
+TOOLBOX_ORDER = 14
+
+
+import inro.modeller as _m
+import inro.emme.matrix as _matrix
+import traceback as _traceback
+import os
+
+
+dem_utils = _m.Modeller().module('sandag.utilities.demand')
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class ImportMatrices(_m.Tool(), gen_utils.Snapshot):
+
+ output_dir = _m.Attribute(unicode)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ main_dir = os.path.dirname(project_dir)
+ self.output_dir = os.path.join(main_dir, "output")
+ self.attributes = ["output_dir"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Import transit demand"
+ pb.description = """
+
+ Imports the trip matrices generated by CT-RAMP in OMX format.
+ A total of 30 OMX files are expected, for 5 time periods
+ EA, AM, MD, PM and EV, with internal matrices by 3 model segments
+ (assignment access sets) and 3 access modes (walk, PNR, KNR):
+
+ The input checker goes through the list of checks and evaluates each
+ one as True or False. A summary file is produced at the end with results
+ for each check. The input checker additionally outputs a report for
+ failed checks of severity type Logical with more than 25 failed records.
+ The additional summary report lists every failed record.
+ The following reports are output:
+
+
+ """
+ pb.branding_text = "SANDAG - Input Checker"
+
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ self(path = self.path)
+ run_msg = "Input Checker Complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ def __call__(self, path = ""):
+ _m.logbook_write("Started running input checker...")
+
+ self.path = path
+
+ self.input_checker_path = _join(self.path, 'input_checker')
+ self.inputs_list_path = _join(self.input_checker_path, 'config', 'inputs_list.csv')
+ self.inputs_checks_path = _join(self.input_checker_path, 'config', 'inputs_checks.csv')
+
+ file_paths = [self.inputs_list_path, self.inputs_checks_path]
+ for path in file_paths:
+ if not os.path.exists(path):
+ raise Exception("missing file '%s'" % (path))
+
+ _m.logbook_write("Reading inputs...")
+ self.read_inputs()
+
+ _m.logbook_write("Conducting checks...")
+ self.checks()
+
+ _m.logbook_write("Writing logical fail logs...")
+ self.write_logical_log()
+
+ _m.logbook_write("Writing logs...")
+ self.write_log()
+
+ _m.logbook_write("Checking for logical errors...")
+ self.check_logical()
+
+ _m.logbook_write("Checking for fatal errors...")
+ self.check_num_fatal()
+
+ _m.logbook_write("Finisehd running input checker")
+
+ def read_inputs(self):
+ # read list of inputs from CSV file
+ self.inputs_list = pd.read_csv(self.inputs_list_path)
+
+ # remove all commented inputs from the inputs list
+ self.inputs_list = self.inputs_list.loc[[not i for i in (self.inputs_list['Input_Table'].str.startswith('#'))]]
+
+ # obtain file paths from the sandag_abm.properties
+ self.prop_file_paths()
+
+ # load emmebank
+ eb_path = _join(self.path, "emme_project", "Database", "emmebank")
+ eb = _eb.Emmebank(eb_path)
+
+ # load emme network
+ network = eb.scenario(100).get_network()
+
+ # create extra network attributes (maybe temporary)
+
+ # link modes_str attribute
+ network.create_attribute("LINK", "mode_str")
+ for link in network.links():
+ link.mode_str = "".join([m.id for m in link.modes])
+
+ # link isTransit flag attribute
+ network.create_attribute("LINK", "isTransit")
+ transit_modes = set([m for m in network.modes() if m.type == "TRANSIT"])
+ for link in network.links():
+ link.isTransit = bool(link.modes.intersection(transit_modes))
+
+ # transit segment isFirst and isLast flags attributes
+ network.create_attribute("TRANSIT_SEGMENT", "isFirst", False)
+ network.create_attribute("TRANSIT_SEGMENT", "isLast", False)
+ for line in network.transit_lines():
+ first_seg = line.segment(0)
+ last_seg = line.segment(-2)
+ first_seg.isFirst = True
+ last_seg.isLast = True
+
+ # node isCentroid flag attribute
+ network.create_attribute("NODE", "isCentroid", False)
+ centroids = [c for c in network.nodes() if c.is_centroid]
+ for node in network.nodes():
+ node.isCentroid = bool(node in centroids)
+
+ # node numInLinks and numOutLinks attributes
+ network.create_attribute("NODE", "numInLinks")
+ network.create_attribute("NODE", "numOutLinks")
+ for node in network.nodes():
+ node.numInLinks = len(list(node.incoming_links()))
+ node.numOutLinks = len(list(node.outgoing_links()))
+
+ # node hasLocalConnection flag attribute
+ class BreakLoop (Exception):
+ pass
+
+ network.create_attribute("NODE", "hasLocalConnection", False)
+ for node in network.centroids():
+ try:
+ for zone_connector in node.incoming_links():
+ for local_link in zone_connector.i_node.incoming_links():
+ if local_link["@lane_restriction"] == 1.0:
+ node.hasLocalConnection = True
+ raise BreakLoop("")
+ except:
+ pass
+
+ # transit line hasTAP flag attribute
+ network.create_attribute("TRANSIT_LINE", "hasTAP", False)
+ for line in network.transit_lines():
+ has_first_tap = False
+ has_last_tap = False
+ for link in line.segment(0).i_node.outgoing_links():
+ if link.j_node["@tap_id"] > 0:
+ has_first_tap = True
+ break
+ for link in line.segment(-2).j_node.outgoing_links():
+ if link.j_node["@tap_id"] > 0:
+ has_last_tap = True
+ break
+ line.hasTAP = has_first_tap and has_last_tap
+
+ # link names attribute
+ network.create_attribute("LINK", "linkNames")
+ for link in network.links():
+ link.linkNames = str(link['#name'] + "," + link['#name_from'] + "," + link['#name_to'])
+
+ def get_emme_object(emme_network, emme_network_object, fields_to_export):
+ # Emme network attribute and object names
+ net_attr = {
+ 'NODE':'nodes',
+ 'LINK':'links',
+ 'TRANSIT_SEGMENT':'transit_segments',
+ 'TRANSIT_LINE':'transit_lines',
+ 'CENTROID':'centroids'
+ }
+
+ # read-in entire emme network object as a list
+ get_objs = 'list(emme_network.' + net_attr[emme_network_object] + '())'
+ uda = eval(get_objs)
+
+ # get list of network object attributes
+ obj_attr = []
+ if fields_to_export[0] in ['all','All','ALL']:
+ if emme_network_object == 'CENTROID':
+ obj_attr = emme_network.attributes('NODE')
+ else:
+ obj_attr = emme_network.attributes(emme_network_object)
+ else:
+ obj_attr = fields_to_export
+
+ # instantiate list of network objects
+ net_objs = []
+ for i in range(len(uda)):
+ obj_fields = []
+ get_id = 'uda[i].id'
+ obj_fields.append(eval(get_id))
+ for attr in obj_attr:
+ get_field = 'uda[i]["' + attr + '"]'
+ obj_fields.append(eval(get_field))
+ net_objs.append(obj_fields)
+ net_obj_df = pd.DataFrame(net_objs, columns = ['id'] + obj_attr)
+
+ return(net_obj_df)
+
+ for item, row in self.inputs_list.iterrows():
+
+ table_name = row['Input_Table']
+ emme_network_object = row['Emme_Object']
+ column_map = row['Column_Map']
+ fields_to_export = row['Fields'].split(',')
+
+ # obtain emme network object, csv or dbf input
+ if not (pd.isnull(emme_network_object)):
+ df = get_emme_object(network, emme_network_object, fields_to_export)
+ self.inputs[table_name] = df
+ else:
+ input_path = self.prop_input_paths[table_name]
+ input_ext = os.path.splitext(input_path)[1]
+ if input_ext == '.csv':
+ df = pd.read_csv(_join(self.path, input_path))
+ self.inputs[table_name] = df
+ else:
+ dbf_path = input_path
+ if '%project.folder%' in dbf_path:
+ dbf_path = dbf_path.replace('%project.folder%/', '')
+ dbf = Dbf5(_join(self.path, dbf_path))
+ df = dbf.to_dataframe()
+ self.inputs[table_name] = df
+
+ # add scenario year
+ self.inputs['scenario'] = self.scenario_df
+
+ def checks(self):
+ # read all input DFs into memory
+ for key, df in self.inputs.items():
+ expr = key + ' = df'
+ exec(expr)
+
+ # copy of locals(), a dictionary of all local variables
+ calc_dict = locals()
+
+ # read list of checks from CSV file
+ self.inputs_checks = pd.read_csv(self.inputs_checks_path)
+
+ # remove all commented checks from the checks list
+ self.inputs_checks = self.inputs_checks.loc[[not i for i in (self.inputs_checks['Test'].str.startswith('#'))]]
+
+ # perform calculations and add user-defined data frame subsets
+ for item, row in self.inputs_checks.iterrows():
+
+ test = row['Test']
+ table = row['Input_Table']
+ id_col = row['Input_ID_Column']
+ expr = row['Expression']
+ test_vals = row['Test_Vals']
+ if not (pd.isnull(row['Test_Vals'])):
+ test_vals = test_vals.split(',')
+ test_vals = [txt.strip() for txt in test_vals]
+ test_type = row['Type']
+ Severity = row['Severity']
+ stat_expr = row['Report_Statistic']
+
+ if test_type == 'Calculation':
+
+ try:
+ calc_expr = test + ' = ' + expr
+ exec(calc_expr, {}, calc_dict)
+ calc_out = eval(expr, calc_dict)
+ except Exception as error:
+ print('An error occurred with the calculation: {}'.format(test))
+ raise
+
+ if str(type(calc_out)) == "":
+ print('added '+ row['Test'] + ' as new DataFrame input')
+ self.inputs[row['Test']] = calc_out
+ self.inputs_list = self.inputs_list.append({'Input_Table': row['Test'],'Property_Token':'NA','Emme_Object':'NA', \
+ 'Fields':'NA','Column_Map':'NA','Input_Description':'NA'}, ignore_index = True)
+ self.inputs_checks = self.inputs_checks.append({'Test':test, 'Input_Table': table, 'Input_ID_Column':id_col, 'Severity':Severity, \
+ 'Type':test_type, 'Expression': expr, 'Test_Vals':test_vals, 'Report_Statistic':stat_expr, 'Test_Description': row['Test_Description']}, \
+ ignore_index = True)
+
+ # loop through list of checks and conduct all checks
+ # checks must evaluate to True if inputs are correct
+ for item, row in self.inputs_checks.iterrows():
+
+ test = row['Test']
+ table = row['Input_Table']
+ id_col = row['Input_ID_Column']
+ expr = row['Expression']
+ test_vals = row['Test_Vals']
+ if not (pd.isnull(row['Test_Vals'])):
+ test_vals = test_vals.split(',')
+ test_vals = [txt.strip() for txt in test_vals]
+ test_type = row['Type']
+ Severity = row['Severity']
+ stat_expr = row['Report_Statistic']
+
+ if test_type == 'Test':
+
+ if (pd.isnull(row['Test_Vals'])):
+
+ # perform test
+ try:
+ out = eval(expr, calc_dict)
+ except Exception as error:
+ print('An error occurred with the check: {}'.format(test))
+ raise
+
+ # check if test result is a series
+ if str(type(out)) == "":
+ # for series, the test must be evaluated across all items
+ # result is False if a single False is found
+ self.results[test] = not (False in out.values)
+
+ # reverse results list since we need all False IDs
+ reverse_results = [not i for i in out.values]
+ error_expr = table + "['" + id_col + "']" + "[reverse_results]"
+ error_id_list = eval(error_expr)
+
+ # report first 25 problem IDs in the log
+ self.problem_ids[test] = error_id_list if error_id_list.size > 0 else []
+
+ # compute report statistics
+ if (pd.isnull(stat_expr)):
+ self.report_stat[test] = ''
+ else:
+ stat_list = eval(stat_expr)
+ self.report_stat[test] = stat_list[reverse_results]
+ else:
+ self.results[test] = out
+ self.problem_ids[test] = []
+ if (pd.isnull(stat_expr)):
+ self.report_stat[test] = ''
+ else:
+ self.report_stat[test] = eval(stat_expr)
+ else:
+ # loop through test_vals and perform test for each item
+ self.result_list[test] = []
+ for test_val in test_vals:
+ # perform test (test result must not be of type Series)
+ try:
+ out = eval(expr)
+ except Exception as error:
+ print('An error occurred with the check: {}'.format(test))
+ raise
+
+ # compute report statistic
+ if (pd.isnull(stat_expr)):
+ self.report_stat[test] = ''
+ else:
+ self.report_stat[test] = eval(stat_expr)
+
+ # append to list
+ self.result_list[test].append(out)
+ self.results[test] = not (False in self.result_list[test])
+ self.problem_ids[test] = []
+ else:
+ # perform calculation
+ try:
+ calc_expr = test + ' = ' + expr
+ exec(calc_expr, {}, calc_dict)
+ except Exception as error:
+ print('An error occurred with the calculation: {}'.format(test))
+ raise
+
+ def prop_file_paths(self):
+ prop_files = self.inputs_list[['Input_Table','Property_Token']].dropna()
+
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(_join(self.path, 'conf', 'sandag_abm.properties'))
+
+ for item, row in prop_files.iterrows():
+ input_table = row['Input_Table']
+ input_path = props[row['Property_Token']]
+ self.prop_input_paths[input_table] = input_path
+
+ # obtain scenario year
+ self.scenario_df['Year'] = [props['scenarioYear']]
+
+ def write_log(self):
+ # function to write out the input checker log file
+ # there are four blocks
+ # - Introduction
+ # - Summary of checks
+ # - Action Required: FATAL, LOGICAL, WARNINGS
+ # - List of passed checks
+
+ # create log file
+ now = datetime.datetime.now()
+
+ self.log_path = _join(self.input_checker_path, ('inputCheckerSummary_' + now.strftime("[%Y-%m-%d]") + '.txt'))
+ f = open(self.log_path, 'wb')
+
+ # define re-usable elements
+ seperator1 = '###########################################################'
+ seperator2 = '***********************************************************'
+
+ # write out Header
+ f.write(seperator1 + seperator1 + "\r\n")
+ f.write(seperator1 + seperator1 + "\r\n\r\n")
+ f.write("\t SANDAG ABM Input Checker Summary File \r\n")
+ f.write("\t _____________________________________ \r\n\r\n\r\n")
+ f.write("\t Created on: " + now.strftime("%Y-%m-%d %H:%M") + "\r\n\r\n")
+ f.write("\t Notes:-\r\n")
+ f.write("\t The SANDAG ABM Input Checker performs various QA/QC checks on SANDAG ABM inputs as specified by the user.\r\n")
+ f.write("\t The Input Checker allows the user to specify three severity levels for each QA/QC check:\r\n\r\n")
+ f.write("\t 1) FATAL 2) LOGICAL 3) WARNING\r\n\r\n")
+ f.write("\t FATAL Checks: The failure of these checks would result in a FATAL errors in the SANDAG ABM run.\r\n")
+ f.write("\t In case of FATAL failure, the Input Checker returns a return code of 1 to the\r\n")
+ f.write("\t main SANDAG ABM model, cauing the model run to halt.\r\n")
+ f.write("\t LOGICAL Checks: The failure of these checks indicate logical inconsistencies in the inputs.\r\n")
+ f.write("\t With logical errors in inputs, the SANDAG ABM outputs may not be meaningful.\r\n")
+ f.write("\t WARNING Checks: The failure of Warning checks would indicate problems in data that would not.\r\n")
+ f.write("\t halt the run or affect model outputs but might indicate an issue with inputs.\r\n\r\n\r\n")
+ f.write("\t The contents of this summary file are organized as follows: \r\n\r\n")
+ f.write("\t TALLY OF FAILED CHECKS:\r\n")
+ f.write("\t -----------------------\r\n")
+ f.write("\t A tally of all failed checks per severity level\r\n\r\n")
+ f.write("\t IMMEDIATE ACTION REQUIRED:\r\n")
+ f.write("\t -------------------------\r\n")
+ f.write("\t A log under this heading will be generated in case of failure of a FATAL check\r\n\r\n")
+ f.write("\t ACTION REQUIRED:\r\n")
+ f.write("\t ---------------\r\n")
+ f.write("\t A log under this heading will be generated in case of failure of a LOGICAL check\r\n\r\n")
+ f.write("\t WARNINGS:\r\n")
+ f.write("\t ---------\r\n")
+ f.write("\t A log under this heading will be generated in case of failure of a WARNING check\r\n\r\n")
+ f.write("\t SUMMARY OF ALL PASSED CHECKS:\r\n")
+ f.write("\t ----------------------------\r\n")
+ f.write("\t A complete listing of results of all passed checks\r\n\r\n")
+ f.write(seperator1 + seperator1 + "\r\n")
+ f.write(seperator1 + seperator1 + "\r\n\r\n\r\n\r\n")
+
+ # combine results, inputs_checks and inputs_list
+ self.inputs_checks['result'] = self.inputs_checks['Test'].map(self.results)
+ checks_df = pd.merge(self.inputs_checks, self.inputs_list, on='Input_Table')
+ checks_df = checks_df[checks_df.Type=='Test']
+ checks_df['reverse_result'] = [not i for i in checks_df.result]
+
+ # get count of all FATAL failures
+ self.num_fatal = checks_df.result[(checks_df.Severity=='Fatal') & (checks_df.reverse_result)].count()
+
+ # get count of all LOGICAL failures
+ self.num_logical = checks_df.result[(checks_df.Severity=='Logical') & (checks_df.reverse_result)].count()
+ self.logical_fails = checks_df[(checks_df.Severity=='Logical') & (checks_df.reverse_result)]
+
+ # get count of all WARNING failures
+ self.num_warning = checks_df.result[(checks_df.Severity=='Warning') & (checks_df.reverse_result)].count()
+
+ # write summary of failed checks
+ f.write('\r\n\r\n' + seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n")
+ f.write('\t' + "TALLY OF FAILED CHECKS \r\n")
+ f.write('\t' + "---------------------- \r\n\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n\t")
+ f.write(' Number of Fatal Errors: ' + str(self.num_fatal))
+ f.write('\r\n\t Number of Logical Errors: ' + str(self.num_logical))
+ f.write('\r\n\t Number of Warnings: ' + str(self.num_warning))
+
+ def write_check_log(self, fh, row):
+ # define constants
+ seperator2 = '-----------------------------------------------------------'
+
+ # integerize problem ID list
+ problem_ids = self.problem_ids[row['Test']]
+ #problem_ids = [int(x) for x in problem_ids]
+
+ # write check summary
+ fh.write('\r\n\r\n' + seperator2 + seperator2)
+ fh.write("\r\n\t Input File Name: " + ('NA' if not pd.isnull(row['Emme_Object']) else
+ (self.prop_input_paths[row['Input_Table']].rsplit('/', 1)[-1])))
+ fh.write("\r\n\t Input File Location: " + ('NA' if not pd.isnull(row['Emme_Object']) else
+ (_join(self.input_checker_path, self.prop_input_paths[row['Input_Table']].replace('/','\\')))))
+ fh.write("\r\n\t Emme Object: " + (row['Emme_Object'] if not pd.isnull(row['Emme_Object']) else 'NA'))
+ fh.write("\r\n\t Input Description: " + (row['Input_Description'] if not pd.isnull(row['Input_Description']) else ""))
+ fh.write("\r\n\t Test Name: " + row['Test'])
+ fh.write("\r\n\t Test_Description: " + (row['Test_Description'] if not pd.isnull(row['Test_Description']) else ""))
+ fh.write("\r\n\t Test Severity: " + row['Severity'])
+ fh.write("\r\n\r\n\t TEST RESULT: " + ('PASSED' if row['result'] else 'FAILED'))
+
+ # display problem IDs for failed column checks
+ wrapper = textwrap.TextWrapper(width = 70)
+ if (not row['result']) & (len(problem_ids)>0) :
+ fh.write("\r\n\t TEST failed for following values of ID Column: " + row['Input_ID_Column'] + " (only up to 25 IDs displayed)")
+ fh.write("\r\n\t " + row['Input_ID_Column'] + ": " + "\r\n\t " + "\r\n\t ".join(wrapper.wrap(text = ", ".join(map(str, problem_ids[0:25])))))
+ if not (pd.isnull(row['Report_Statistic'])):
+ this_report_stat = self.report_stat[row['Test']]
+ fh.write("\r\n\t Test Statistics: " + "\r\n\t " + "\r\n\t ".join(wrapper.wrap(text = ", ".join(map(str, this_report_stat[0:25])))))
+ fh.write("\r\n\t Total number of failures: " + str(len(self.problem_ids[row['Test']])))
+ if ((len(self.problem_ids[row['Test']])) > 25) and (row['Severity'] == 'Logical'):
+ fh.write("\r\n\t Open {} for complete list of failed Logical failures.".format(self.logical_log_path))
+ else:
+ if not (pd.isnull(row['Report_Statistic'])):
+ fh.write("\r\n\t Test Statistic: " + str(self.report_stat[row['Test']]))
+
+ # display result for each test val if it was specified
+ if not (pd.isnull(row['Test_Vals'])):
+ fh.write("\r\n\t TEST results for each test val")
+ result_tuples = zip(row['Test_Vals'].split(","), self.result_list[row['Test']])
+ fh.write("\r\n\t ")
+ fh.write(','.join('[{} - {}]'.format(x[0],x[1]) for x in result_tuples))
+
+ fh.write("\r\n" + seperator2 + seperator2 + "\r\n\r\n")
+
+ # write out IMMEDIATE ACTION REQUIRED section if needed
+ if self.num_fatal > 0:
+ fatal_checks = checks_df[(checks_df.Severity=='Fatal') & (checks_df.reverse_result)]
+ f.write('\r\n\r\n' + seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n")
+ f.write('\t' + "IMMEDIATE ACTION REQUIRED \r\n")
+ f.write('\t' + "------------------------- \r\n\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+
+ # write out log for each check
+ for item, row in fatal_checks.iterrows():
+ #self.write_check_log(f, row, self.problem_ids[row['Test']])
+ #write_check_log(self, f, row, self.problem_ids[row['Test']])
+ write_check_log(self, f, row)
+
+ # write out ACTION REQUIRED section if needed
+ if self.num_logical > 0:
+ logical_checks = checks_df[(checks_df.Severity=='Logical') & (checks_df.reverse_result)]
+ f.write('\r\n\r\n' + seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n")
+ f.write('\t' + "ACTION REQUIRED \r\n")
+ f.write('\t' + "--------------- \r\n\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+
+ #write out log for each check
+ for item, row in logical_checks.iterrows():
+ write_check_log(self, f, row)
+
+ # write out WARNINGS section if needed
+ if self.num_warning > 0:
+ warning_checks = checks_df[(checks_df.Severity=='Warning') & (checks_df.reverse_result)]
+ f.write('\r\n\r\n' + seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n")
+ f.write('\t' + "WARNINGS \r\n")
+ f.write('\t' + "-------- \r\n\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+
+ # write out log for each check
+ for item, row in warning_checks.iterrows():
+ write_check_log(self, f, row)
+
+ # write out the complete listing of all checks that passed
+ passed_checks = checks_df[(checks_df.result)]
+ f.write('\r\n\r\n' + seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n")
+ f.write('\t' + "LOG OF ALL PASSED CHECKS \r\n")
+ f.write('\t' + "------------------------ \r\n\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+
+ # write out log for each check
+ for item, row in passed_checks.iterrows():
+ write_check_log(self, f, row)
+
+ f.close()
+
+ def write_logical_log(self):
+ # function to write out the complete list of Logical failures
+
+ # combine results, inputs_checks and inputs_list
+ self.inputs_checks['result'] = self.inputs_checks['Test'].map(self.results)
+ checks_df = pd.merge(self.inputs_checks, self.inputs_list, on='Input_Table')
+ checks_df = checks_df[checks_df.Type=='Test']
+ checks_df['reverse_result'] = [not i for i in checks_df.result]
+
+ # get count of all LOGICAL failures
+ self.num_logical = checks_df.result[(checks_df.Severity=='Logical') & (checks_df.reverse_result)].count()
+ self.logical_fails = checks_df[(checks_df.Severity=='Logical') & (checks_df.reverse_result)]
+
+ log_fail_id_tally = 0
+ if self.num_logical > 0:
+ for item, row in self.logical_fails.iterrows():
+ problem_ids = self.problem_ids[row['Test']]
+ if len(problem_ids) > 0:
+ log_fail_id_tally += 1
+
+ if log_fail_id_tally > 0:
+
+ # create log file
+ now = datetime.datetime.now()
+
+ self.logical_log_path = _join(self.input_checker_path, ('completeLogicalFails_' + now.strftime("[%Y-%m-%d]") + '.txt'))
+ f = open(self.logical_log_path, 'wb')
+
+ # define re-usable elements
+ seperator1 = '###########################################################'
+ seperator2 = '***********************************************************'
+
+ # write out Header
+ f.write(seperator1 + seperator1 + "\r\n")
+ f.write(seperator1 + seperator1 + "\r\n\r\n")
+ f.write("\t SANDAG ABM Input Checker Logical Failures Complete List \r\n")
+ f.write("\t _______________________________________________________ \r\n\r\n\r\n")
+ f.write("\t Created on: " + now.strftime("%Y-%m-%d %H:%M") + "\r\n\r\n")
+ f.write("\t Notes:-\r\n")
+ f.write("\t The SANDAG ABM Input Checker performs various QA/QC checks on SANDAG ABM inputs as specified by the user.\r\n")
+ f.write("\t The Input Checker allows the user to specify three severity levels for each QA/QC check:\r\n\r\n")
+ f.write("\t 1) FATAL 2) LOGICAL 3) WARNING\r\n\r\n")
+ f.write("\t This file provides the complete list of failed checks for checks of severity type Logical. \r\n")
+ f.write(seperator1 + seperator1 + "\r\n")
+ f.write(seperator1 + seperator1 + "\r\n\r\n\r\n\r\n")
+
+ # write total number of failed logical checks
+ f.write('\r\n\r\n' + seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n")
+ f.write('\t' + "TALLY OF FAILED CHECKS \r\n")
+ f.write('\t' + "---------------------- \r\n\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n\t")
+ f.write('\r\n\t Number of Logical Errors: ' + str(self.num_logical))
+
+ def write_logical_check_log(self, fh, row):
+ # define constants
+ seperator2 = '-----------------------------------------------------------'
+
+ # integerize problem ID list
+ problem_ids = self.problem_ids[row['Test']]
+ #problem_ids = [int(x) for x in problem_ids]
+
+ # write check summary
+ fh.write('\r\n\r\n' + seperator2 + seperator2)
+ fh.write("\r\n\t Input File Name: " + ('NA' if not pd.isnull(row['Emme_Object']) else
+ (self.prop_input_paths[row['Input_Table']].rsplit('/', 1)[-1])))
+ fh.write("\r\n\t Input File Location: " + ('NA' if not pd.isnull(row['Emme_Object']) else
+ (_join(self.input_checker_path, self.prop_input_paths[row['Input_Table']].replace('/','\\')))))
+ fh.write("\r\n\t Emme Object: " + (row['Emme_Object'] if not pd.isnull(row['Emme_Object']) else 'NA'))
+ fh.write("\r\n\t Input Description: " + (row['Input_Description'] if not pd.isnull(row['Input_Description']) else ""))
+ fh.write("\r\n\t Test Name: " + row['Test'])
+ fh.write("\r\n\t Test_Description: " + (row['Test_Description'] if not pd.isnull(row['Test_Description']) else ""))
+ fh.write("\r\n\t Test Severity: " + row['Severity'])
+ fh.write("\r\n\r\n\t TEST RESULT: " + ('PASSED' if row['result'] else 'FAILED'))
+
+ # display problem IDs for failed column checks
+ wrapper = textwrap.TextWrapper(width = 70)
+ if (not row['result']) & (len(problem_ids)>0) :
+ fh.write("\r\n\t TEST failed for following values of ID Column: " + row['Input_ID_Column'])
+ fh.write("\r\n\t " + row['Input_ID_Column'] + ": " + "\r\n\t " + "\r\n\t ".join(wrapper.wrap(text = ", ".join(map(str, problem_ids)))))
+ if not (pd.isnull(row['Report_Statistic'])):
+ this_report_stat = self.report_stat[row['Test']]
+ fh.write("\r\n\t Test Statistics: " + "\r\n\t " + "\r\n\t ".join(wrapper.wrap(text = ", ".join(map(str, this_report_stat)))))
+ fh.write("\r\n\t Total number of failures: " + str(len(self.problem_ids[row['Test']])))
+ else:
+ if not (pd.isnull(row['Report_Statistic'])):
+ fh.write("\r\n\t Test Statistic: " + str(self.report_stat[row['Test']]))
+
+ # display result for each test val if it was specified
+ if not (pd.isnull(row['Test_Vals'])):
+ fh.write("\r\n\t TEST results for each test val")
+ result_tuples = zip(row['Test_Vals'].split(","), self.result_list[row['Test']])
+ fh.write("\r\n\t ")
+ fh.write(','.join('[{} - {}]'.format(x[0],x[1]) for x in result_tuples))
+
+ fh.write("\r\n" + seperator2 + seperator2 + "\r\n\r\n")
+
+ # write out ACTION REQUIRED section if needed
+ if self.num_logical > 0:
+ logical_checks = checks_df[(checks_df.Severity=='Logical') & (checks_df.reverse_result)]
+ f.write('\r\n\r\n' + seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n\r\n")
+ f.write('\t' + "LOG OF ALL FAILED LOGICAL CHECKS \r\n")
+ f.write('\t' + "-------------------------------- \r\n\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+ f.write(seperator2 + seperator2 + "\r\n")
+
+ #write out log for each check
+ for item, row in logical_checks.iterrows():
+ if len(self.problem_ids[row['Test']]) > 25:
+ write_logical_check_log(self, f, row)
+
+ f.close()
+
+ def check_logical(self):
+ if self.num_logical > 0:
+ # raise exception for each logical check fail
+ for item, row in self.logical_fails.iterrows():
+ answer = dialog.alert_question(
+ message = "The following Logical check resulted in at least 1 error: {} \n Open {} for details. \
+ \n\n Click OK to continue or Cancel to stop run.".format(row['Test'], self.log_path),
+ title = "Logical Check Error",
+ answers = [("OK", dialog.YES_ROLE), ("Cancel", dialog.REJECT_ROLE)]
+ )
+
+ if answer == 1:
+ raise Exception("Input checker was cancelled")
+
+ def check_num_fatal(self):
+ # return code to the main model based on input checks and results
+ if self.num_fatal > 0:
+ raise Exception("Input checker failed, {} fatal errors found. Open {} for details.".format(self.num_fatal, self.log_path))
\ No newline at end of file
diff --git a/sandag_abm/src/main/emme/toolbox/import/run4Ds.py b/sandag_abm/src/main/emme/toolbox/import/run4Ds.py
new file mode 100644
index 0000000..89ff0d0
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/import/run4Ds.py
@@ -0,0 +1,412 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright RSG, 2019-2020. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// import/run4Ds.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Generates density variables and adds in mgra socio economic variables
+#
+#
+# Inputs:
+# path: path to the current scenario
+# ref_path: path to the comparison model scenario
+# int_radius: buffer radius for intersection counts
+# maps: default unchecked - means not generating spatial heat maps for
+# intersection counts. This functionality requires
+# following packages; geopandas, folium, and branca
+#
+# File referenced:
+# input\mgra13_based_input2016.csv
+# input\SANDAG_Bike_Net.dbf
+# input\SANDAG_Bike_Node.dbf
+# output\walkMgraEquivMinutes.csv
+#
+# Script example
+# python C:\ABM_runs\maint_2019_RSG\Tasks\4ds\emme_toolbox\emme\toolbox\import\run4Ds.py
+# 0.65 r'C:\ABM_runs\maint_2019_RSG\Model\ABM2_14_2_0' r'C:\ABM_runs\maint_2019_RSG\Model\abm_test_fortran_4d'
+
+
+TOOLBOX_ORDER = 10
+
+#import modules
+import inro.modeller as _m
+from simpledbf import Dbf5
+import os
+import pandas as pd, numpy as np
+#import datetime
+import matplotlib.pyplot as plt
+import seaborn as sns
+import warnings
+import traceback as _traceback
+
+warnings.filterwarnings("ignore")
+
+_join = os.path.join
+_dir = os.path.dirname
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+class FourDs(_m.Tool()):
+
+ path = _m.Attribute(unicode)
+ ref_path = _m.Attribute(unicode)
+ int_radius = _m.Attribute(float)
+ maps = _m.Attribute(bool)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ self._log = []
+ self._error = []
+ project_dir = _dir(_m.Modeller().desktop.project.path)
+ self.path = _dir(project_dir)
+ self.mgradata_file = ''
+ self.equivmins_file = ''
+ self.inNet = ''
+ self.inNode = ''
+ self.ref_path = ''
+ self.maps = False
+ self.int_radius = 0.65 #mile
+ self.oth_radius = self.int_radius #same as intersection radius
+ self.new_cols = ['totint','duden','empden','popden','retempden','totintbin','empdenbin','dudenbin','PopEmpDenPerMi']
+ self.continuous_fields = ['totint', 'popden', 'empden', 'retempden']
+ self.discrete_fields = ['totintbin', 'empdenbin', 'dudenbin']
+ self.mgra_shape_file = ''
+ self.base = pd.DataFrame()
+ self.build = pd.DataFrame()
+ self.mgra_data = pd.DataFrame()
+ self.base_cols = []
+ self.attributes = ["path", "int_radius", "ref_path"]
+
+ def page(self):
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(_join(self.path, "conf", "sandag_abm.properties"))
+ self.ref_path = props["visualizer.reference.path"]
+
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Run 4Ds"
+ pb.description = """
+ Generate Density Variables.
+ Generated from MGRA socio economic file and active transportation (AT) network.
+
+
+ The following files are used:
+
+
+
input\mgra13_based_input2016.csv
+
input\SANDAG_Bike_Net.dbf
+
input\SANDAG_Bike_Node.dbf
+
output\walkMgraEquivMinutes.csv
+
+
+ """
+ pb.branding_text = "SANDAG - Run 4Ds"
+
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file("path", window_type="directory", file_filter="",
+ title="Source directory:",)
+
+ pb.add_text_box("int_radius", size=6, title="Buffer size (miles):")
+ #pb.add_checkbox("maps", title=" ", label="Generate 4D maps")
+ pb.add_select_file("ref_path", window_type="directory", file_filter="", title="Reference directory for comparison")
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ self(path=self.path, int_radius=self.int_radius, ref_path=self.ref_path)
+ run_msg = "Run 4Ds complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ def __call__(self, path= "",
+ int_radius = 0.65,
+ ref_path = ""):
+ _m.logbook_write("Started running 4Ds ...")
+
+ self.path = path
+ self.ref_path = ref_path
+ self.int_radius = int_radius
+ #self.maps = maps
+
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(_join(self.path, "conf", "sandag_abm.properties"))
+
+
+ self.mgradata_file = props["mgra.socec.file"] #input/filename
+ self.syn_households_file = props["PopulationSynthesizer.InputToCTRAMP.HouseholdFile"] #input/filename
+ self.equivmins_file = props["active.logsum.matrix.file.walk.mgra"] #filename
+ self.inNet = os.path.basename(props["active.edge.file"]) #filename
+ self.inNode = os.path.basename(props["active.node.file"]) #filename
+
+ attributes = {
+ "path": self.path,
+ "ref_path": self.ref_path,
+ "int_radius": self.int_radius,
+ "maps": self.maps,
+ }
+ gen_utils.log_snapshot("Run 4Ds", str(self), attributes)
+
+ file_paths = [_join(self.path, self.mgradata_file),_join(self.path, self.syn_households_file),_join(self.path, "output", self.equivmins_file), _join(self.path, "input", self.inNet), _join(self.path, "input", self.inNode)]
+ for path in file_paths:
+ if not os.path.exists(path):
+ raise Exception("missing file '%s'" % (path))
+
+ self.mgra_data = pd.read_csv(os.path.join(self.path,self.mgradata_file))
+ self.base_cols = self.mgra_data.columns.tolist()
+
+ _m.logbook_write("Tagging intersections to mgra")
+ self.get_intersection_count()
+
+ _m.logbook_write("Generating density variables")
+ self.get_density()
+
+ _m.logbook_write("Creating comparison plots")
+ self.make_plots()
+
+ _m.logbook_write("Finished running 4Ds")
+
+ def get_intersection_count(self):
+ links = Dbf5(_join(self.path, "input", self.inNet))
+ links = links.to_dataframe()
+
+ nodes = Dbf5(_join(self.path, "input", self.inNode))
+ nodes = nodes.to_dataframe()
+
+ nodes_int = nodes.loc[(nodes.NodeLev_ID < 100000000)]
+
+ #links
+ #remove taz, mgra, and tap connectors
+ links = links.loc[(links.A <100000000) & (links.B <100000000)]
+
+ #remove freeways (Func_Class=1), ramps (Func_Class=2), and others (Func_Class =0 or -1)
+ links = links.loc[(links.Func_Class > 2)]
+ links['link_count'] = 1
+
+ #aggregate by Node A and Node B
+ links_nodeA = links[['A', 'link_count']].groupby('A').sum().reset_index()
+ links_nodeB = links[['B', 'link_count']].groupby('B').sum().reset_index()
+
+ #merge the two and keep all records from both dataframes (how='outer')
+ nodes_linkcount = pd.merge(links_nodeA, links_nodeB, left_on='A', right_on='B', how = 'outer')
+ nodes_linkcount = nodes_linkcount.fillna(0)
+ nodes_linkcount['link_count'] = nodes_linkcount['link_count_x'] + nodes_linkcount['link_count_y']
+
+ #get node id from both dataframes
+ nodes_linkcount['N']=0
+ nodes_linkcount['N'][nodes_linkcount.A>0] = nodes_linkcount['A']
+ nodes_linkcount['N'][nodes_linkcount.B>0] = nodes_linkcount['B']
+ nodes_linkcount['N']=nodes_linkcount['N'].astype(float)
+ nodes_linkcount = nodes_linkcount[['N','link_count']]
+
+ #keep nodes with 3+ link count
+ intersections_temp = nodes_linkcount.loc[nodes_linkcount.link_count>=3]
+
+ #get node X and Y
+ intersections = pd.merge(intersections_temp,nodes_int[['NodeLev_ID','XCOORD','YCOORD']], left_on = 'N', right_on = 'NodeLev_ID', how = 'left')
+ intersections = intersections[['N','XCOORD','YCOORD']]
+ intersections = intersections.rename(columns = {'XCOORD': 'X', 'YCOORD': 'Y'})
+
+ mgra_nodes = nodes[nodes.MGRA > 0][['MGRA','XCOORD','YCOORD']]
+ mgra_nodes.columns = ['mgra','x','y']
+ int_dict = {}
+ for int in intersections.iterrows():
+ mgra_nodes['dist'] = np.sqrt((int[1][1] - mgra_nodes['x'])**2+(int[1][2] - mgra_nodes['y'])**2)
+ int_dict[int[1][0]] = mgra_nodes.loc[mgra_nodes['dist'] == mgra_nodes['dist'].min()]['mgra'].values[0]
+
+ intersections['near_mgra'] = intersections['N'].map(int_dict)
+ intersections = intersections.groupby('near_mgra', as_index = False).count()[['near_mgra','N']].rename(columns = {'near_mgra':'mgra','N':'icnt'})
+ try:
+ self.mgra_data = self.mgra_data.drop('icnt',axis = 1).merge(intersections, how = 'outer', on = "mgra")
+ except:
+ self.mgra_data = self.mgra_data.merge(intersections, how = 'outer', on = "mgra")
+
+ def get_density(self):
+ if len(self.mgra_data) == 0:
+ mgra_landuse = pd.read_csv(os.path.join(self.path, self.mgradata_file))
+ else:
+ mgra_landuse = self.mgra_data
+
+ # get population from synthetic population instead of mgra data file
+ syn_pop = pd.read_csv(os.path.join(self.path, self.syn_households_file))
+ syn_pop = syn_pop.rename(columns = {'MGRA':'mgra'})[['persons','mgra']].groupby('mgra',as_index = False).sum()
+ #remove if 4D columns exist
+ for col in self.new_cols:
+ if col in self.base_cols:
+ self.base_cols.remove(col)
+ mgra_landuse = mgra_landuse.drop(col,axis=1)
+
+ #merge syntetic population to landuse
+ mgra_landuse = mgra_landuse.merge(syn_pop, how = 'left', on = 'mgra')
+ #all street distance
+ equiv_min = pd.read_csv(_join(self.path, "output", self.equivmins_file))
+ equiv_min['dist'] = equiv_min['actual']/60*3
+ print("MGRA input landuse: " + self.mgradata_file)
+
+ def density_function(mgra_in):
+ eqmn = equiv_min[equiv_min['i'] == mgra_in]
+ mgra_circa_int = eqmn[eqmn['dist'] < self.int_radius]['j'].unique()
+ mgra_circa_oth = eqmn[eqmn['dist'] < self.oth_radius]['j'].unique()
+ totEmp = mgra_landuse[mgra_landuse.mgra.isin(mgra_circa_oth)]['emp_total'].sum()
+ totRet = mgra_landuse[mgra_landuse.mgra.isin(mgra_circa_oth)]['emp_retail'].sum() + mgra_landuse[mgra_landuse.mgra.isin(mgra_circa_oth)]['emp_personal_svcs_retail'].sum() + mgra_landuse[mgra_landuse.mgra.isin(mgra_circa_oth)]['emp_restaurant_bar'].sum()
+ totHH = mgra_landuse[mgra_landuse.mgra.isin(mgra_circa_oth)]['hh'].sum()
+ totPop = mgra_landuse[mgra_landuse.mgra.isin(mgra_circa_oth)]['persons'].sum()
+ totAcres = mgra_landuse[mgra_landuse.mgra.isin(mgra_circa_oth)]['land_acres'].sum()
+ totInt = mgra_landuse[mgra_landuse.mgra.isin(mgra_circa_int)]['icnt'].sum()
+ if(totAcres>0):
+ empDen = totEmp/totAcres
+ retDen = totRet/totAcres
+ duDen = totHH/totAcres
+ popDen = totPop/totAcres
+ popEmpDenPerMi = (totEmp+totPop)/(totAcres/640) #Acres to miles
+ tot_icnt = totInt
+ else:
+ empDen = 0
+ retDen = 0
+ duDen = 0
+ popDen = 0
+ popEmpDenPerMi = 0
+ tot_icnt = 0
+ return tot_icnt,duDen,empDen,popDen,retDen,popEmpDenPerMi
+
+ #new_cols = [0-'totint',1-'duden',2-'empden',3-'popden',4-'retempden',5-'totintbin',6-'empdenbin',7-'dudenbin',8-'PopEmpDenPerMi']
+ mgra_landuse[self.new_cols[0]],mgra_landuse[self.new_cols[1]],mgra_landuse[self.new_cols[2]],mgra_landuse[self.new_cols[3]],mgra_landuse[self.new_cols[4]],mgra_landuse[self.new_cols[8]] = zip(*mgra_landuse['mgra'].map(density_function))
+
+ mgra_landuse = mgra_landuse.fillna(0)
+ mgra_landuse[self.new_cols[5]] = np.where(mgra_landuse[self.new_cols[0]] < 80, 1, np.where(mgra_landuse[self.new_cols[0]] < 130, 2, 3))
+ mgra_landuse[self.new_cols[6]] = np.where(mgra_landuse[self.new_cols[2]] < 10, 1, np.where(mgra_landuse[self.new_cols[2]] < 30, 2,3))
+ mgra_landuse[self.new_cols[7]] = np.where(mgra_landuse[self.new_cols[1]] < 5, 1, np.where(mgra_landuse[self.new_cols[1]] < 10, 2,3))
+
+ mgra_landuse[self.base_cols+self.new_cols].to_csv(os.path.join(self.path, self.mgradata_file), index = False, float_format='%.4f' )
+
+ self.mgra_data = mgra_landuse
+ print( "*** Finished ***")
+
+ #plot comparisons of build and old density values and create heat maps
+ def make_plots(self):
+ if len(self.mgra_data) == 0:
+ self.build = pd.read_csv(os.path.join(self.path, self.mgradata_file))
+ else:
+ self.build = self.mgra_data
+
+ def plot_continuous(field):
+ #colors
+ rsg_orange = '#f68b1f'
+ rsg_marine = '#006fa1'
+ #rsg_leaf = '#63af5e'
+ #rsg_grey = '#48484a'
+ #rsg_mist = '#dcddde'
+
+ max = self.base[field].max() + self.base[field].max()%5
+ div = max/5 if max/5 >= 10 else max/2
+ bins = np.linspace(0,max,div)
+ plt.hist(self.base[field], bins, normed = True, alpha = 0.5, label = 'Base', color = rsg_marine)
+ plt.hist(self.build[field], bins, normed = True, alpha = 0.5, label = 'Build', color = rsg_orange)
+ mean_base = self.base[field].mean()
+ mean = self.build[field].mean()
+ median_base = self.base[field].median()
+ median = self.build[field].median()
+ plt.axvline(mean_base, color = 'b', linestyle = '-', label = 'Base Mean')
+ plt.axvline(median_base, color = 'b', linestyle = '--', label = 'Base Median')
+ plt.axvline(mean, color = 'r', linestyle = '-', label = 'Build Mean')
+ plt.axvline(median, color = 'r', linestyle = '--',label = 'Build Median')
+ plt.legend(loc = 'upper right')
+ ylims = plt.ylim()[1]
+ plt.text(mean_base + div/4, ylims-ylims/32, "mean: {:0.2f}".format(mean_base), color = 'b')
+ plt.text(mean_base + div/4, ylims - 5*ylims/32, "median: {:0.0f}".format(median_base), color = 'b')
+ plt.text(mean_base + div/4, ylims-2*ylims/32, "mean: {:0.2f}".format(mean), size = 'medium',color = 'r')
+ plt.text(mean_base + div/4, ylims-6*ylims/32, "median: {:0.0f}".format(median), color = 'r')
+ plt.text(self.base[field].min() , ylims/32, "min: {:0.0f}".format(self.base[field].min()), color = 'b')
+ plt.text(self.base[field].max()-div , ylims/32, "max: {:0.0f}".format(self.base[field].max()), color = 'b')
+ plt.text(self.build[field].min() , 2*ylims/32, "min: {:0.0f}".format(self.build[field].min()), color = 'r')
+ plt.text(self.base[field].max()-div , 2*ylims/32, "max: {:0.0f}".format(self.build[field].max()), color = 'r')
+
+ plt.xlabel(field)
+ plt.ylabel("MGRA's")
+ plt.title(field.replace('den','') + ' Density')
+ outfile = _join(self.path, "output", '4Ds_{}_plot.png'.format(field))
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ plt.savefig(outfile)
+ plt.clf()
+
+ def plot_discrete(field):
+ fig, ax = plt.subplots()
+ df1 = discretedf_base.groupby(field, as_index = False).agg({'mgra':'count','type':'first'})
+ df2 = discretedf_build.groupby(field, as_index = False).agg({'mgra':'count','type':'first'})
+ df = df1.append(df2)
+ ax = sns.barplot(x=field, y = 'mgra', hue = 'type', data = df)
+ ax.set_title(field)
+ outfile = _join(self.path, "output", '4Ds_{}_plot.png'.format(field))
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ ax.get_figure().savefig(outfile)
+
+ self.base = pd.read_csv(self.ref_path)
+ self.base['type'] = 'base'
+ self.build['type'] = 'build'
+
+ discretedf_base = self.base[['mgra','type']+self.discrete_fields]
+ discretedf_build = self.build[['mgra','type']+self.discrete_fields]
+
+ for f in self.continuous_fields:
+ plot_continuous(f)
+ for f in self.discrete_fields:
+ plot_discrete(f)
+
+ if self.maps:
+ import geopandas as gpd
+ import folium
+ from branca.colormap import linear
+ compare_int = self.base.merge(self.build, how = 'outer', on = 'mgra', suffixes = ['_base','_build'])
+ compare_int['diff'] = compare_int['TotInt'] - compare_int['totint']
+
+ compare_int = gpd.read_file(self.mgra_shape_file).rename(columns = {'MGRA':'mgra'}).merge(compare_int, how = 'left', on = 'mgra')
+ compare_int = compare_int.to_crs({'init': 'epsg:4326'})
+
+ colormap = linear.OrRd_09.scale(
+ compare_int.TotInt.min(),
+ compare_int.TotInt.max())
+ colormapA = linear.RdBu_04.scale(
+ compare_int['diff'].min(),
+ compare_int['diff'].min()*-1)
+
+ compare_int['colordiff'] = compare_int['diff'].map(lambda n: colormapA(n))
+ compare_int['colororig'] = compare_int['TotInt'].map(lambda n: colormap(n))
+ compare_int['colornew'] = compare_int['totint'].map(lambda n: colormap(n))
+
+ def makeheatmap(self,df, colormp,color_field,caption):
+ mapname = folium.Map(location=[32.76, -117.15], zoom_start = 13.459)
+ folium.GeoJson(compare_int,
+ style_function=lambda feature: {
+ 'fillColor': feature['properties'][color_field],
+ 'color' : rsg_marine,
+ 'weight' : 0,
+ 'fillOpacity' : 0.75,
+ }).add_to(mapname)
+
+ colormp.caption = caption
+ colormp.add_to(mapname)
+ return mapname
+
+ makeheatmap(compare_int,colormapA,'colordiff','Intersection Diff (base - build)').save('diff_intersections.html')
+ makeheatmap(compare_int,colormap,'colororig','Intersections').save('base_intersections.html')
+ makeheatmap(compare_int,colormap,'colororig','Intersections').save('build_intersections.html')
diff --git a/sandag_abm/src/main/emme/toolbox/initialize/initialize_matrices.py b/sandag_abm/src/main/emme/toolbox/initialize/initialize_matrices.py
new file mode 100644
index 0000000..f4e880a
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/initialize/initialize_matrices.py
@@ -0,0 +1,426 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// initialize_matrices.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# Coordinates the initialization of all matrices.
+# The matrix names are listed for each of the model components / steps,
+# and the matrix IDs are assigned consistently from the set of matrices.
+# In each of the model steps the matrices are only referenced by name,
+# never by ID.
+#
+#
+# Inputs:
+# components: A list of the model components / steps for which to initialize matrices
+# One or more of "traffic_demand", "transit_demand",
+# "traffic_skims", "transit_skims", "external_internal_model",
+# "external_external_model", "truck_model", "commercial_vehicle_model"
+# periods: A list of periods for which to initialize matrices, "EA", "AM", "MD", "PM", "EV"
+# scenario: scenario to use for reference zone system and the emmebank in which
+# the matrices will be created
+#
+# Script example:
+"""
+ import os
+ import inro.emme.database.emmebank as _eb
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ main_emmebank = _eb.Emmebank(os.path.join(main_directory, "emme_project", "Database", "emmebank"))
+ transit_emmebank = _eb.Emmebank(os.path.join(main_directory, "emme_project", "Database", "emmebank"))
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ traffic_components = [
+ "traffic_demand", "traffic_skims", "external_internal_model",
+ "external_external_model", "truck_model", "commercial_vehicle_model"]
+ transit_components = ["transit_demand", "transit_skims"]
+ base_scenario = main_emmebank.scenario(100)
+ transit_scenario = transit_emmebank.scenario(100)
+ initialize_matrices = modeller.tool("sandag.initialize.initialize_matrices")
+ # Create / initialize matrices in the base, traffic emmebank
+ initialize_matrices(traffic_components, periods, base_scenario)
+ # Create / initialize matrices in the transit emmebank
+ initialize_matrices(transit_components, periods, transit_scenario)
+"""
+
+
+TOOLBOX_ORDER = 9
+
+
+import inro.modeller as _m
+import traceback as _traceback
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class Initialize(_m.Tool(), gen_utils.Snapshot):
+
+ components = _m.Attribute(_m.ListType)
+ periods = _m.Attribute(_m.ListType)
+ delete_all_existing = _m.Attribute(bool)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ self._all_components = [
+ "traffic_demand",
+ "transit_demand",
+ "traffic_skims",
+ "transit_skims",
+ "external_internal_model",
+ "external_external_model",
+ "truck_model",
+ "commercial_vehicle_model",
+ ]
+ self._all_periods = ['EA', 'AM', 'MD', 'PM', 'EV']
+ self.components = self._all_components[:]
+ self.periods = self._all_periods[:]
+ self.attributes = ["components", "periods", "delete_all_existing"]
+ self._matrices = {}
+ self._count = {}
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Initialize matrices"
+ pb.description = """Creates and initializes the required matrices
+ for the selected components / sub-models.
+ Includes all components by default."""
+ pb.branding_text = "- SANDAG"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select("components", keyvalues=[(k,k) for k in self._all_components],
+ title="Select components:")
+ pb.add_select("periods", keyvalues=[(k,k) for k in self._all_periods],
+ title="Select periods:")
+ pb.add_checkbox("delete_all_existing", label="Delete all existing matrices")
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.components, self.periods, scenario, self.delete_all_existing)
+ run_msg = "Tool completed"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace("Create and initialize matrices", save_arguments=True)
+ def __call__(self, components, periods, scenario, delete_all_existing=False):
+ attributes = {
+ "components": components,
+ "periods": periods,
+ "delete_all_existing": delete_all_existing
+ }
+ gen_utils.log_snapshot("Initialize matrices", str(self), attributes)
+
+ self.scenario = scenario
+ emmebank = scenario.emmebank
+ self._create_matrix_tool = _m.Modeller().tool(
+ "inro.emme.data.matrix.create_matrix")
+ if components == "all":
+ components = self._all_components[:]
+ if periods == "all":
+ periods = self._all_periods[:]
+ if delete_all_existing:
+ with _m.logbook_trace("Delete all existing matrices"):
+ for matrix in emmebank.matrices():
+ emmebank.delete_matrix(matrix)
+ self.generate_matrix_list(self.scenario)
+ matrices = []
+ for component in components:
+ matrices.extend(self.create_matrices(component, periods))
+ # Note: matrix is also created in import_network
+ self._create_matrix_tool("ms1", "zero", "zero", scenario=self.scenario, overwrite=True)
+ return matrices
+
+ def generate_matrix_list(self, scenario):
+ self._matrices = dict(
+ (name, dict((k, []) for k in self._all_periods + ["ALL"]))
+ for name in self._all_components)
+ self._count = {"ms": 2, "md": 100, "mo": 100, "mf": 100}
+
+ for component in self._all_components:
+ fcn = getattr(self, component)
+ fcn()
+ # check dimensions can fit full set of matrices
+ type_names = [
+ ('mf', 'full_matrices'),
+ ('mo', 'origin_matrices'),
+ ('md', 'destination_matrices'),
+ ('ms', 'scalar_matrices')]
+ dims = scenario.emmebank.dimensions
+ for prefix, name in type_names:
+ if self._count[prefix] > dims[name]:
+ raise Exception("emmebank capacity error, increase %s to at least %s" % (name, self._count[prefix]))
+
+ def traffic_demand(self):
+ tmplt_matrices = [
+ ("SOV_NT_L", "SOV non-transponder demand low VOT"),
+ ("SOV_TR_L", "SOV transponder demand low VOT"),
+ ("HOV2_L", "HOV2 demand low VOT"),
+ ("HOV3_L", "HOV3+ demand low VOT"),
+ ("SOV_NT_M", "SOV non-transponder demand medium VOT"),
+ ("SOV_TR_M", "SOV transponder demand medium VOT"),
+ ("HOV2_M", "HOV2 demand medium VOT"),
+ ("HOV3_M", "HOV3+ demand medium VOT"),
+ ("SOV_NT_H", "SOV non-transponder demand high VOT"),
+ ("SOV_TR_H", "SOV transponder demand high VOT"),
+ ("HOV2_H", "HOV2 demand high VOT"),
+ ("HOV3_H", "HOV3+ demand high VOT"),
+ ("TRK_H", "Truck Heavy PCE demand"),
+ ("TRK_L", "Truck Light PCE demand"),
+ ("TRK_M", "Truck Medium PCE demand"),
+ ]
+ for period in self._all_periods:
+ self.add_matrices("traffic_demand", period,
+ [("mf", period + "_" + name, period + " " + desc)
+ for name, desc in tmplt_matrices])
+
+ def transit_demand(self):
+ tmplt_matrices = [
+ ("BUS", "local bus demand"),
+ ("PREM", "Premium modes demand"),
+ ("ALLPEN", "all modes xfer pen demand"),
+ ]
+ for period in self._all_periods:
+ for a_name in ["WLK", "PNR", "KNR"]:
+ self.add_matrices("transit_demand", period,
+ [("mf", "%s_%s%s" % (period, a_name, name), "%s %s access %s" % (period, a_name, desc))
+ for name, desc in tmplt_matrices])
+
+ def traffic_skims(self):
+ tp_desc = {"TR": "transponder", "NT": "non-transponder"}
+ vot_desc = {"L": "low", "M": "medium", "H": "high"}
+ truck_desc = {"L": "light", "M": "medium", "H": "heavy"}
+
+ sov_tmplt_matrices = [
+ ("TIME", "SOV %s travel time"),
+ ("DIST", "SOV %s distance"),
+ ("REL", "SOV %s reliability skim"),
+ ("TOLLCOST", "SOV %s toll cost $0.01"),
+ ("TOLLDIST", "SOV %s distance on toll facility"),
+ ]
+ hov_tmplt_matrices = [
+ ("TIME", "HOV%s travel time"),
+ ("DIST", "HOV%s distance"),
+ ("REL", "HOV%s reliability skim"),
+ ("TOLLCOST", "HOV%s toll cost $0.01"),
+ ("TOLLDIST", "HOV%s distance on toll facility"),
+ ("HOVDIST", "HOV%s HOV distance on HOV facility")
+ ]
+ truck_tmplt_matrices = [
+ ("TIME", "Truck %s travel time"),
+ ("DIST", "Truck %s distance"),
+ ("TOLLCOST", "Truck %s toll cost $0.01")
+ ]
+ for period in self._all_periods:
+ for vot_type in "L", "M", "H":
+ for tp_type in "NT", "TR":
+ cls_name = "SOV_" + tp_type + "_" + vot_type
+ cls_desc = tp_desc[tp_type] + " " + vot_desc[vot_type] + " VOT"
+ self.add_matrices("traffic_skims", period,
+ [("mf", period + "_" + cls_name + "_" + name, period + " " + desc % cls_desc) for name, desc in sov_tmplt_matrices])
+ for hov_type in "2", "3":
+ cls_name = "HOV" + hov_type + "_" + vot_type
+ cls_desc = hov_type + " " + vot_desc[vot_type] + " VOT"
+ self.add_matrices("traffic_skims", period,
+ [("mf", period + "_" + cls_name + "_" + name,
+ period + " " + desc % cls_desc)
+ for name, desc in hov_tmplt_matrices])
+ for truck_type in "L", "M", "H":
+ cls_name = "TRK" + "_" + truck_type
+ cls_desc = truck_desc[truck_type]
+ self.add_matrices("traffic_skims", period,
+ [("mf", period + "_" + cls_name + "_" + name,
+ period + " " + desc % cls_desc)
+ for name, desc in truck_tmplt_matrices])
+
+ self.add_matrices("traffic_skims", "MD",
+ [("mf", "MD_TRK_TIME", "MD Truck generic travel time")])
+
+ def transit_skims(self):
+ tmplt_matrices = [
+ ("GENCOST", "total impedance"),
+ ("FIRSTWAIT", "first wait time"),
+ ("XFERWAIT", "transfer wait time"),
+ ("TOTALWAIT", "total wait time"),
+ ("FARE", "fare"),
+ ("XFERS", "num transfers"),
+ ("ACCWALK", "access walk time"),
+ ("XFERWALK", "transfer walk time"),
+ ("EGRWALK", "egress walk time"),
+ ("TOTALWALK", "total walk time"),
+ ("TOTALIVTT", "in-vehicle time"),
+ ("DWELLTIME", "dwell time"),
+ ("BUSIVTT", "local bus in-vehicle time"),
+ ("LRTIVTT", "LRT in-vehicle time"),
+ ("CMRIVTT", "Rail in-vehicle time"),
+ ("EXPIVTT", "Express in-vehicle time"),
+ ("LTDEXPIVTT", "Ltd exp bus in-vehicle time"),
+ ("BRTREDIVTT", "BRT red in-vehicle time"),
+ ("BRTYELIVTT", "BRT yellow in-vehicle time"),
+ ("TIER1IVTT", "Tier1 in-vehicle time"),
+ ("BUSDIST", "Bus IV distance"),
+ ("LRTDIST", "LRT IV distance"),
+ ("CMRDIST", "Rail IV distance"),
+ ("EXPDIST", "Express and Ltd IV distance"),
+ ("BRTDIST", "BRT red and yel IV distance"),
+ ("TIER1DIST", "Tier1 distance"),
+ ("TOTDIST", "Total transit distance")
+ ]
+ skim_sets = [
+ ("BUS", "Local bus only"),
+ ("PREM", "Premium modes only"),
+ ("ALLPEN", "All w/ xfer pen")
+ ]
+ for period in self._all_periods:
+ for set_name, set_desc in skim_sets:
+ self.add_matrices("transit_skims", period,
+ [("mf", period + "_" + set_name + "_" + name,
+ period + " " + set_desc + ": " + desc)
+ for name, desc in tmplt_matrices])
+
+ def truck_model(self):
+ tmplt_matrices = [
+ ("TRKL", "Truck Light"),
+ ("TRKM", "Truck Medium"),
+ ("TRKH", "Truck Heavy"),
+ ("TRKEI", "Truck external-internal"),
+ ("TRKIE", "Truck internal-external"),
+ ]
+ self.add_matrices("truck_model", "ALL",
+ [("mo", name + '_PROD', desc + ' production')
+ for name, desc in tmplt_matrices])
+ self.add_matrices("truck_model", "ALL",
+ [("md", name + '_ATTR', desc + ' attraction')
+ for name, desc in tmplt_matrices])
+
+ tmplt_matrices = [
+ ("TRKEE_DEMAND", "Truck total external-external demand"),
+ ("TRKL_FRICTION", "Truck Light friction factors"),
+ ("TRKM_FRICTION", "Truck Medium friction factors"),
+ ("TRKH_FRICTION", "Truck Heavy friction factors"),
+ ("TRKIE_FRICTION", "Truck internal-external friction factors"),
+ ("TRKEI_FRICTION", "Truck external-internal friction factors"),
+ ("TRKL_DEMAND", "Truck Light total demand"),
+ ("TRKM_DEMAND", "Truck Medium total demand"),
+ ("TRKH_DEMAND", "Truck Heavy total demand"),
+ ("TRKIE_DEMAND", "Truck internal-external total demand"),
+ ("TRKEI_DEMAND", "Truck external-internal total demand"),
+ ]
+ self.add_matrices("truck_model", "ALL",
+ [("mf", name, desc) for name, desc in tmplt_matrices])
+
+ # TODO: remove GP and TOLL matrices, no longer used
+ tmplt_matrices = [
+ ("TRK_L_VEH", "Truck Light demand"),
+ ("TRKLGP_VEH", "Truck Light GP-only vehicle demand"),
+ ("TRKLTOLL_VEH", "Truck Light toll vehicle demand"),
+ ("TRK_M_VEH", "Truck Medium demand"),
+ ("TRKMGP_VEH", "Truck Medium GP-only vehicle demand"),
+ ("TRKMTOLL_VEH", "Truck Medium toll vehicle demand"),
+ ("TRK_H_VEH", "Truck Heavy demand"),
+ ("TRKHGP_VEH", "Truck Heavy GP-only vehicle demand"),
+ ("TRKHTOLL_VEH", "Truck Heavy toll vehicle demand"),
+ ]
+ for period in self._all_periods:
+ self.add_matrices("truck_model", period,
+ [("mf", period + "_" + name, period + " " + desc)
+ for name, desc in tmplt_matrices])
+
+ def commercial_vehicle_model(self):
+ # TODO : remove commercial vehicle matrices, no longer used
+ tmplt_matrices = [
+ ('mo', 'COMVEH_PROD', 'Commercial vehicle production'),
+ ('md', 'COMVEH_ATTR', 'Commercial vehicle attraction'),
+ ('mf', 'COMVEH_BLENDED_SKIM', 'Commercial vehicle blended skim'),
+ ('mf', 'COMVEH_FRICTION', 'Commercial vehicle friction factors'),
+ ('mf', 'COMVEH_TOTAL_DEMAND', 'Commercial vehicle total demand all periods'),
+ ]
+ self.add_matrices("commercial_vehicle_model", "ALL",
+ [(ident, name, desc) for ident, name, desc in tmplt_matrices])
+
+ tmplt_matrices = [
+ ('COMVEH', 'Commerical vehicle total demand'),
+ ('COMVEHGP', 'Commerical vehicle GP demand'),
+ ('COMVEHTOLL', 'Commerical vehicle Toll demand'),
+ ]
+ for period in self._all_periods:
+ self.add_matrices("commercial_vehicle_model", period,
+ [("mf", period + "_" + name, period + " " + desc)
+ for name, desc in tmplt_matrices])
+
+ def external_internal_model(self):
+ tmplt_matrices = [
+ ('SOVTOLL_EIWORK', 'US to SD SOV Work TOLL demand'),
+ ('HOV2TOLL_EIWORK', 'US to SD HOV2 Work TOLL demand'),
+ ('HOV3TOLL_EIWORK', 'US to SD HOV3 Work TOLL demand'),
+ ('SOVGP_EIWORK', 'US to SD SOV Work GP demand'),
+ ('HOV2HOV_EIWORK', 'US to SD HOV2 Work HOV demand'),
+ ('HOV3HOV_EIWORK', 'US to SD HOV3 Work HOV demand'),
+ ('SOVTOLL_EINONWORK', 'US to SD SOV Non-Work TOLL demand'),
+ ('HOV2TOLL_EINONWORK', 'US to SD HOV2 Non-Work TOLL demand'),
+ ('HOV3TOLL_EINONWORK', 'US to SD HOV3 Non-Work TOLL demand'),
+ ('SOVGP_EINONWORK', 'US to SD SOV Non-Work GP demand'),
+ ('HOV2HOV_EINONWORK', 'US to SD HOV2 Non-Work HOV demand'),
+ ('HOV3HOV_EINONWORK', 'US to SD HOV3 Non-Work HOV demand'),
+ ]
+ for period in self._all_periods:
+ self.add_matrices("external_internal_model", period,
+ [("mf", period + "_" + name, period + " " + desc)
+ for name, desc in tmplt_matrices])
+
+ def external_external_model(self):
+ self.add_matrices("external_external_model", "ALL",
+ [("mf", "ALL_TOTAL_EETRIPS", "All periods Total for all modes external-external trips")])
+ tmplt_matrices = [
+ ('SOV_EETRIPS', 'SOV external-external demand'),
+ ('HOV2_EETRIPS', 'HOV2 external-external demand'),
+ ('HOV3_EETRIPS', 'HOV3 external-external demand'),
+ ]
+ for period in self._all_periods:
+ self.add_matrices("external_external_model", period,
+ [("mf", period + "_" + name, period + " " + desc)
+ for name, desc in tmplt_matrices])
+
+ def add_matrices(self, component, period, matrices):
+ for ident, name, desc in matrices:
+ self._matrices[component][period].append([ident+str(self._count[ident]), name, desc])
+ self._count[ident] += 1
+
+ def create_matrices(self, component, periods):
+ with _m.logbook_trace("Create matrices for component %s" % (component.replace("_", " "))):
+ emmebank = self.scenario.emmebank
+ matrices = []
+ for period in periods + ["ALL"]:
+ with _m.logbook_trace("For period %s" % (period)):
+ for ident, name, desc in self._matrices[component][period]:
+ existing_matrix = emmebank.matrix(name)
+ if existing_matrix and (existing_matrix.id != ident):
+ raise Exception("Matrix name conflict '%s', with id %s instead of %s. Delete all matrices first."
+ % (name, existing_matrix.id, ident))
+ matrices.append(self._create_matrix_tool(ident, name, desc, scenario=self.scenario, overwrite=True))
+ return matrices
+
+ def get_matrix_names(self, component, periods, scenario):
+ self.generate_matrix_list(scenario)
+ matrices = []
+ for period in periods:
+ matrices.extend([m[1] for m in self._matrices[component][period]])
+ return matrices
+
+ @_m.method(return_type=unicode)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
diff --git a/sandag_abm/src/main/emme/toolbox/initialize/initialize_transit_database.py b/sandag_abm/src/main/emme/toolbox/initialize/initialize_transit_database.py
new file mode 100644
index 0000000..513dba3
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/initialize/initialize_transit_database.py
@@ -0,0 +1,168 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// initialize_transit_databse.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# Coordinates the initialization of all matrices.
+# The matrix names are listed for each of the model components / steps,
+# and the matrix IDs are assigned consistently from the set of matrices.
+# In each of the model steps the matrices are only referenced by name,
+# never by ID.
+#
+#
+# Inputs:
+# components: A list of the model components / steps for which to initialize matrices
+# One or more of "traffic_demand", "transit_demand",
+# "traffic_skims", "transit_skims", "external_internal_model",
+# "external_external_model", "truck_model", "commercial_vehicle_model"
+# periods: A list of periods for which to initialize matrices, "EA", "AM", "MD", "PM", "EV"
+# scenario: scenario to use for reference zone system and the emmebank in which
+# the matrices will be created. Defaults to the current primary scenario.
+#
+# Script example:
+"""
+ import os
+ import inro.emme.database.emmebank as _eb
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ main_emmebank = _eb.Emmebank(os.path.join(main_directory, "emme_project", "Database", "emmebank"))
+ base_scenario = main_emmebank.scenario(100)
+ initialize_transit_db = modeller.tool("sandag.initialize.initialize_transit_database")
+ initialize_transit_db(base_scenario)
+"""
+TOOLBOX_ORDER = 8
+
+
+import inro.modeller as _m
+import inro.emme.network as _network
+import inro.emme.database.emmebank as _eb
+from inro.emme.desktop.exception import AddDatabaseError
+import traceback as _traceback
+import shutil as _shutil
+import time
+import os
+
+join = os.path.join
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class InitializeTransitDatabase(_m.Tool(), gen_utils.Snapshot):
+
+ base_scenario = _m.Attribute(_m.InstanceType)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ self.base_scenario = _m.Modeller().scenario
+ self.attributes = ["base_scenario"]
+
+ def from_snapshot(self, snapshot):
+ super(InitializeTransitDatabase, self).from_snapshot(snapshot)
+ # custom from_snapshot to load scenario object
+ self.base_scenario = _m.Modeller().emmebank.scenario(self.base_scenario)
+ return self
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Initialize transit database"
+ pb.description = """Create and setup database for transit assignments under 'Database_transit' directory.
+ Will overwrite an existing database. The TAZs will be removed and TAP nodes converted to zones."""
+ pb.branding_text = "- SANDAG"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_scenario("base_scenario",
+ title="Base scenario:", note="Base traffic and transit scenario with TAZs.")
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ self(self.base_scenario)
+ run_msg = "Tool complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('Initialize transit database', save_arguments=True)
+ def __call__(self, base_scenario, add_database=True):
+ attributes = {"base_scenario": base_scenario.id}
+ gen_utils.log_snapshot("Initialize transit database", str(self), attributes)
+ create_function = _m.Modeller().tool("inro.emme.data.function.create_function")
+ build_transit_scen = _m.Modeller().tool("sandag.assignment.build_transit_scenario")
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+
+ base_eb = base_scenario.emmebank
+ project_dir = os.path.dirname(os.path.dirname(base_eb.path))
+ main_directory = os.path.dirname(project_dir)
+ props = load_properties(os.path.join(main_directory, "conf", "sandag_abm.properties"))
+ scenarioYear = props["scenarioYear"]
+
+ transit_db_dir = join(project_dir, "Database_transit")
+ transit_db_path = join(transit_db_dir, "emmebank")
+ network = base_scenario.get_partial_network(["NODE"], include_attributes=True)
+ num_zones = sum([1 for n in network.nodes() if n["@tap_id"] > 0])
+ dimensions = base_eb.dimensions
+ dimensions["centroids"] = num_zones
+ dimensions["scenarios"] = 10
+ if not os.path.exists(transit_db_dir):
+ os.mkdir(transit_db_dir)
+ if os.path.exists(transit_db_path):
+ transit_eb = _eb.Emmebank(transit_db_path)
+ for scenario in transit_eb.scenarios():
+ transit_eb.delete_scenario(scenario.id)
+ for function in transit_eb.functions():
+ transit_eb.delete_function(function.id)
+ if transit_eb.dimensions != dimensions:
+ _eb.change_dimensions(transit_db_path, dimensions, keep_backup=False)
+ else:
+ transit_eb = _eb.create(transit_db_path, dimensions)
+
+ transit_eb.title = base_eb.title[:65] + "-transit"
+ transit_eb.coord_unit_length = base_eb.coord_unit_length
+ transit_eb.unit_of_length = base_eb.unit_of_length
+ transit_eb.unit_of_cost = base_eb.unit_of_cost
+ transit_eb.unit_of_energy = base_eb.unit_of_energy
+ transit_eb.use_engineering_notation = base_eb.use_engineering_notation
+ transit_eb.node_number_digits = base_eb.node_number_digits
+
+ zone_scenario = build_transit_scen(
+ period="AM", base_scenario=base_scenario, transit_emmebank=transit_eb,
+ scenario_id=base_scenario.id, scenario_title="%s transit zones" % (base_scenario.title),
+ data_table_name=scenarioYear, overwrite=True)
+ for function in base_scenario.emmebank.functions():
+ create_function(function.id, function.expression, transit_eb)
+ if add_database:
+ self.add_database(transit_eb)
+ return zone_scenario
+
+ def add_database(self, emmebank):
+ modeller = _m.Modeller()
+ desktop = modeller.desktop
+ data_explorer = desktop.data_explorer()
+ for db in data_explorer.databases():
+ if os.path.normpath(db.path) == os.path.normpath(emmebank.path):
+ return
+ try:
+ data_explorer.add_database(emmebank.path)
+ except AddDatabaseError:
+ pass # database has already been added to the project
diff --git a/sandag_abm/src/main/emme/toolbox/master_run.py b/sandag_abm/src/main/emme/toolbox/master_run.py
new file mode 100644
index 0000000..de0a8ad
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/master_run.py
@@ -0,0 +1,1220 @@
+# //////////////////////////////////////////////////////////////////////////////
+# //// ///
+# //// Copyright INRO, 2016-2017. ///
+# //// Rights to use and modify are granted to the ///
+# //// San Diego Association of Governments and partner agencies. ///
+# //// This copyright notice must be preserved. ///
+# //// ///
+# //// model/master_run.py ///
+# //// ///
+# //// ///
+# //// ///
+# //// ///
+# //////////////////////////////////////////////////////////////////////////////
+#
+# The Master run tool is the primary method to operate the SANDAG
+# travel demand model. It operates all the model components.
+#
+# main_directory: Main ABM directory: directory which contains all of the
+# ABM scenario data, including this project. The default is the parent
+# directory of the current Emme project.
+# scenario_id: Scenario ID for the base imported network data. The result
+# scenarios are indexed in the next five scenarios by time period.
+# scenario_title: title to use for the scenario.
+# emmebank_title: title to use for the Emmebank (Emme database)
+# num_processors: the number of processors to use for traffic and transit
+# assignments and skims, aggregate demand models (where required) and
+# other parallelized procedures in Emme. Default is Max available - 1.
+# Properties loaded from conf/sandag_abm.properties:
+# When using the tool UI, the sandag_abm.properties file is read
+# and the values cached and the inputs below are pre-set. When the tool
+# is started button is clicked this file is written out with the
+# values specified.
+# Sample rate by iteration: three values for the sample rates for each iteration
+# Start from iteration: iteration from which to start the model run
+# Skip steps: optional checkboxes to skip model steps.
+# Note that most steps are dependent upon the results of the previous steps.
+# Select link: add select link analyses for traffic.
+# See the Select link analysis section under the Traffic assignment tool.
+#
+# Also reads and processes the per-scenario
+# vehicle_class_availability.csv (optional): 0 or 1 indicators by vehicle class and specified facilities to indicate availability
+#
+# Script example:
+"""
+import inro.modeller as _m
+import os
+modeller = _m.Modeller()
+desktop = modeller.desktop
+
+master_run = modeller.tool("sandag.master_run")
+main_directory = os.path.dirname(os.path.dirname(desktop.project_path()))
+scenario_id = 100
+scenario_title = "Base 2015 scenario"
+emmebank_title = "Base 2015 with updated landuse"
+num_processors = "MAX-1"
+master_run(main_directory, scenario_id, scenario_title, emmebank_title, num_processors)
+"""
+
+TOOLBOX_ORDER = 1
+VIRUTALENV_PATH = "C:\\python_virtualenv\\abm14_2_0"
+
+import inro.modeller as _m
+import inro.emme.database.emmebank as _eb
+
+import traceback as _traceback
+import glob as _glob
+import subprocess as _subprocess
+import ctypes as _ctypes
+import json as _json
+import shutil as _shutil
+import tempfile as _tempfile
+from copy import deepcopy as _copy
+from collections import defaultdict as _defaultdict
+import time as _time
+import socket as _socket
+import sys
+import os
+
+import pandas as pd
+import numpy as np
+import csv
+import datetime
+import pyodbc
+import win32com.client as win32
+
+_join = os.path.join
+_dir = os.path.dirname
+_norm = os.path.normpath
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+props_utils = _m.Modeller().module("sandag.utilities.properties")
+
+
+class MasterRun(props_utils.PropertiesSetter, _m.Tool(), gen_utils.Snapshot):
+ main_directory = _m.Attribute(unicode)
+ scenario_id = _m.Attribute(int)
+ scenario_title = _m.Attribute(unicode)
+ emmebank_title = _m.Attribute(unicode)
+ num_processors = _m.Attribute(str)
+ select_link = _m.Attribute(unicode)
+ username = _m.Attribute(unicode)
+ password = _m.Attribute(unicode)
+
+ properties_path = _m.Attribute(unicode)
+
+ tool_run_msg = ""
+
+ def __init__(self):
+ super(MasterRun, self).__init__()
+ project_dir = _dir(_m.Modeller().desktop.project.path)
+ self.main_directory = _dir(project_dir)
+ self.properties_path = _join(_dir(project_dir), "conf", "sandag_abm.properties")
+ self.scenario_id = 100
+ self.scenario_title = ""
+ self.emmebank_title = ""
+ self.num_processors = "MAX-1"
+ self.select_link = '[]'
+ self.username = os.environ.get("USERNAME")
+ self.attributes = [
+ "main_directory", "scenario_id", "scenario_title", "emmebank_title",
+ "num_processors", "select_link"
+ ]
+ self._log_level = "ENABLED"
+ self.LOCAL_ROOT = "C:\\abm_runs"
+
+ def page(self):
+ self.load_properties()
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Master run ABM"
+ pb.description = """Runs the SANDAG ABM, assignments, and other demand model tools."""
+ pb.branding_text = "- SANDAG - Model"
+ tool_proxy_tag = pb.tool_proxy_tag
+
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('main_directory', 'directory',
+ title='Select main ABM directory', note='')
+ pb.add_text_box('scenario_id', title="Scenario ID:")
+ pb.add_text_box('scenario_title', title="Scenario title:", size=80)
+ pb.add_text_box('emmebank_title', title="Emmebank title:", size=60)
+ dem_utils.add_select_processors("num_processors", pb, self)
+
+ # username and password input for distributed assignment
+ # username also used in the folder name for the local drive operation
+ pb.add_html('''
+
+
Credentials for remote run
+
+ Username:
+
+ Password:
+
+
+
+ Note: required for running distributed traffic assignments using PsExec.
+
+ Distributed / single node modes are configured in "config/server-config.csv".
+ The username is also used for the folder name when running on the local drive.
+
+
''' % {"tool_proxy_tag": tool_proxy_tag})
+
+ # defined in properties utilities
+ self.add_properties_interface(pb, disclosure=True)
+ # redirect properties file after browse of main_directory
+ pb.add_html("""
+""" % {"tool_proxy_tag": tool_proxy_tag})
+
+ traffic_assign = _m.Modeller().tool("sandag.assignment.traffic_assignment")
+ traffic_assign._add_select_link_interface(pb)
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ self.save_properties()
+ self(self.main_directory, self.scenario_id, self.scenario_title, self.emmebank_title,
+ self.num_processors, self.select_link, username=self.username, password=self.password)
+ run_msg = "Model run complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(error, _traceback.format_exc())
+
+ raise
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ @_m.logbook_trace("Master run model", save_arguments=True)
+ def __call__(self, main_directory, scenario_id, scenario_title, emmebank_title, num_processors,
+ select_link=None, periods=["EA", "AM", "MD", "PM", "EV"], username=None, password=None):
+ attributes = {
+ "main_directory": main_directory,
+ "scenario_id": scenario_id,
+ "scenario_title": scenario_title,
+ "emmebank_title": emmebank_title,
+ "num_processors": num_processors,
+ "select_link": select_link,
+ "periods": periods,
+ "username": username,
+ }
+ gen_utils.log_snapshot("Master run model", str(self), attributes)
+
+ modeller = _m.Modeller()
+ # Checking that the virtualenv path is set and the folder is installed
+ if not os.path.exists(VIRUTALENV_PATH):
+ raise Exception("Python virtual environment not installed at expected location %s" % VIRUTALENV_PATH)
+ venv_path = os.environ.get("PYTHON_VIRTUALENV")
+ if not venv_path:
+ raise Exception("Environment variable PYTHON_VIRTUALENV not set, start Emme from 'start_emme_with_virtualenv.bat'")
+ if not venv_path == VIRUTALENV_PATH:
+ raise Exception("PYTHON_VIRTUALENV is not the expected value (%s instead of %s)" % (venv_path, VIRUTALENV_PATH))
+ venv_path_found = False
+ for path in sys.path:
+ if VIRUTALENV_PATH in path:
+ venv_path_found = True
+ break
+ if not venv_path_found:
+ raise Exception("Python virtual environment not found in system path %s" % VIRUTALENV_PATH)
+ copy_scenario = modeller.tool("inro.emme.data.scenario.copy_scenario")
+ run4Ds = modeller.tool("sandag.import.run4Ds")
+ import_network = modeller.tool("sandag.import.import_network")
+ input_checker = modeller.tool("sandag.import.input_checker")
+ init_transit_db = modeller.tool("sandag.initialize.initialize_transit_database")
+ init_matrices = modeller.tool("sandag.initialize.initialize_matrices")
+ import_demand = modeller.tool("sandag.import.import_seed_demand")
+ build_transit_scen = modeller.tool("sandag.assignment.build_transit_scenario")
+ transit_assign = modeller.tool("sandag.assignment.transit_assignment")
+ run_truck = modeller.tool("sandag.model.truck.run_truck_model")
+ external_internal = modeller.tool("sandag.model.external_internal")
+ external_external = modeller.tool("sandag.model.external_external")
+ import_auto_demand = modeller.tool("sandag.import.import_auto_demand")
+ import_transit_demand = modeller.tool("sandag.import.import_transit_demand")
+ export_transit_skims = modeller.tool("sandag.export.export_transit_skims")
+ export_for_transponder = modeller.tool("sandag.export.export_for_transponder")
+ export_network_data = modeller.tool("sandag.export.export_data_loader_network")
+ export_matrix_data = modeller.tool("sandag.export.export_data_loader_matrices")
+ export_tap_adjacent_lines = modeller.tool("sandag.export.export_tap_adjacent_lines")
+ export_for_commercial_vehicle = modeller.tool("sandag.export.export_for_commercial_vehicle")
+ validation = modeller.tool("sandag.validation.validation")
+ file_manager = modeller.tool("sandag.utilities.file_manager")
+ utils = modeller.module('sandag.utilities.demand')
+ load_properties = modeller.tool('sandag.utilities.properties')
+ run_summary = modeller.tool("sandag.utilities.run_summary")
+
+ self.username = username
+ self.password = password
+
+ props = load_properties(_join(main_directory, "conf", "sandag_abm.properties"))
+ props.set_year_specific_properties(_join(main_directory, "input", "parametersByYears.csv"))
+ props.set_year_specific_properties(_join(main_directory, "input", "filesByYears.csv"))
+ props.save()
+ # Log current state of props file for debugging of UI / file sync issues
+ attributes = dict((name, props["RunModel." + name]) for name in self._run_model_names)
+ _m.logbook_write("SANDAG properties file", attributes=attributes)
+ if self._properties: # Tool has been called via the UI
+ # Compare UI values and file values to make sure they are the same
+ error_text = ("Different value found in sandag_abm.properties than specified in UI for '%s'. "
+ "Close sandag_abm.properties if open in any text editor, check UI and re-run.")
+ for name in self._run_model_names:
+ if getattr(self, name) != props["RunModel." + name]:
+ raise Exception(error_text % name)
+
+ scenarioYear = str(props["scenarioYear"])
+ startFromIteration = props["RunModel.startFromIteration"]
+ precision = props["RunModel.MatrixPrecision"]
+ minSpaceOnC = props["RunModel.minSpaceOnC"]
+ sample_rate = props["sample_rates"]
+ end_iteration = len(sample_rate)
+ scale_factor = props["cvm.scale_factor"]
+ visualizer_reference_path = props["visualizer.reference.path"]
+ visualizer_output_file = props["visualizer.output"]
+ visualizer_reference_label = props["visualizer.reference.label"]
+ visualizer_build_label = props["visualizer.build.label"]
+ mgraInputFile = props["mgra.socec.file"]
+
+ #for zone restructing in network files
+ taz_cwk_file = props["taz.to.cluster.crosswalk.file"]
+ cluster_zone_file = props["cluster.zone.centroid.file"]
+
+ period_ids = list(enumerate(periods, start=int(scenario_id) + 1))
+
+ useLocalDrive = props["RunModel.useLocalDrive"]
+
+ skip4Ds = props["RunModel.skip4Ds"]
+ skipInputChecker = props["RunModel.skipInputChecker"]
+ skipInitialization = props["RunModel.skipInitialization"]
+ deleteAllMatrices = props["RunModel.deleteAllMatrices"]
+ skipCopyWarmupTripTables = props["RunModel.skipCopyWarmupTripTables"]
+ skipCopyBikeLogsum = props["RunModel.skipCopyBikeLogsum"]
+ skipCopyWalkImpedance = props["RunModel.skipCopyWalkImpedance"]
+ skipWalkLogsums = props["RunModel.skipWalkLogsums"]
+ skipBikeLogsums = props["RunModel.skipBikeLogsums"]
+ skipBuildNetwork = props["RunModel.skipBuildNetwork"]
+ skipHighwayAssignment = props["RunModel.skipHighwayAssignment"]
+ skipTransitSkimming = props["RunModel.skipTransitSkimming"]
+ skipTransponderExport = props["RunModel.skipTransponderExport"]
+ skipCoreABM = props["RunModel.skipCoreABM"]
+ skipOtherSimulateModel = props["RunModel.skipOtherSimulateModel"]
+ skipMAASModel = props["RunModel.skipMAASModel"]
+ skipCTM = props["RunModel.skipCTM"]
+ skipEI = props["RunModel.skipEI"]
+ skipExternal = props["RunModel.skipExternalExternal"]
+ skipTruck = props["RunModel.skipTruck"]
+ skipTripTableCreation = props["RunModel.skipTripTableCreation"]
+ skipFinalHighwayAssignment = props["RunModel.skipFinalHighwayAssignment"]
+ skipFinalHighwayAssignmentStochastic = props["RunModel.skipFinalHighwayAssignmentStochastic"]
+ if skipFinalHighwayAssignmentStochastic == True:
+ makeFinalHighwayAssignmentStochastic = False
+ else:
+ makeFinalHighwayAssignmentStochastic = True
+ skipFinalTransitAssignment = props["RunModel.skipFinalTransitAssignment"]
+ skipVisualizer = props["RunModel.skipVisualizer"]
+ skipDataExport = props["RunModel.skipDataExport"]
+ skipDataLoadRequest = props["RunModel.skipDataLoadRequest"]
+ skipDeleteIntermediateFiles = props["RunModel.skipDeleteIntermediateFiles"]
+ skipTransitShed = props["RunModel.skipTransitShed"]
+ transitShedThreshold = props["transitShed.threshold"]
+ transitShedTOD = props["transitShed.TOD"]
+
+ #check if visualizer.reference.path is valid in filesbyyears.csv
+ if not os.path.exists(visualizer_reference_path):
+ raise Exception("Visualizer reference %s does not exist. Check filesbyyears.csv." %(visualizer_reference_path))
+
+ if useLocalDrive:
+ folder_name = os.path.basename(main_directory)
+ if not os.path.exists(_join(self.LOCAL_ROOT, username, folder_name, "report")): # check free space only if it is a new run
+ self.check_free_space(minSpaceOnC)
+ # if initialization copy ALL files from remote
+ # else check file meta data and copy those that have changed
+ initialize = (skipInitialization == False and startFromIteration == 1)
+ local_directory = file_manager(
+ "DOWNLOAD", main_directory, username, scenario_id, initialize=initialize)
+ self._path = local_directory
+ else:
+ self._path = main_directory
+
+ drive, path_no_drive = os.path.splitdrive(self._path)
+ path_forward_slash = path_no_drive.replace("\\", "/")
+ input_dir = _join(self._path, "input")
+ input_truck_dir = _join(self._path, "input_truck")
+ output_dir = _join(self._path, "output")
+ validation_dir = _join(self._path, "analysis/validation")
+ main_emmebank = _eb.Emmebank(_join(self._path, "emme_project", "Database", "emmebank"))
+ if emmebank_title:
+ main_emmebank.title = emmebank_title
+ external_zones = "1-12"
+
+ travel_modes = ["auto", "tran", "nmot", "othr"]
+ core_abm_files = ["Trips*.omx", "InternalExternalTrips*.omx"]
+ core_abm_files = [mode + name for name in core_abm_files for mode in travel_modes]
+ smm_abm_files = ["AirportTrips*.omx", "CrossBorderTrips*.omx", "VisitorTrips*.omx"]
+ smm_abm_files = [mode + name for name in smm_abm_files for mode in travel_modes]
+ maas_abm_files = ["EmptyAVTrips.omx", "TNCVehicleTrips*.omx"]
+
+ relative_gap = props["convergence"]
+ max_assign_iterations = 1000
+ mgra_lu_input_file = props["mgra.socec.file"]
+
+ with _m.logbook_trace("Setup and initialization"):
+ self.set_global_logbook_level(props)
+
+ # Swap Server Configurations
+ self.run_proc("serverswap.bat", [drive, path_no_drive, path_forward_slash], "Run ServerSwap")
+ self.check_for_fatal(_join(self._path, "logFiles", "serverswap.log"),
+ "ServerSwap failed! Open logFiles/serverswap.log for details.")
+ self.run_proc("checkAtTransitNetworkConsistency.cmd", [drive, path_forward_slash],
+ "Checking if AT and Transit Networks are consistent")
+ self.check_for_fatal(_join(self._path, "logFiles", "AtTransitCheck_event.log"),
+ "AT and Transit network consistency checking failed! Open AtTransitCheck_event.log for details.")
+
+ if startFromIteration == 1: # only run the setup / init steps if starting from iteration 1
+ if not skipWalkLogsums:
+ self.run_proc("runSandagWalkLogsums.cmd", [drive, path_forward_slash],
+ "Walk - create AT logsums and impedances")
+ if not skipCopyWalkImpedance:
+ self.copy_files(["walkMgraEquivMinutes.csv", "walkMgraTapEquivMinutes.csv", "microMgraEquivMinutes.csv", "microMgraTapEquivMinutes.csv"],
+ input_dir, output_dir)
+
+ if not skip4Ds:
+ run4Ds(path=self._path, int_radius=0.65, ref_path=visualizer_reference_path)
+
+
+ mgraFile = 'mgra13_based_input' + str(scenarioYear) + '.csv'
+ self.complete_work(scenarioYear, input_dir, output_dir, mgraFile, "walkMgraEquivMinutes.csv")
+
+ if not skipBuildNetwork:
+ base_scenario = import_network(
+ source=input_dir,
+ merged_scenario_id=scenario_id,
+ title=scenario_title,
+ data_table_name=scenarioYear,
+ overwrite=True,
+ emmebank=main_emmebank)
+
+ if "modify_network.py" in os.listdir(os.getcwd()):
+ try:
+ with _m.logbook_trace("Modify network script"):
+ import modify_network
+ reload(modify_network)
+ modify_network.run(base_scenario)
+ except ImportError as e:
+ pass
+
+ hwy_network = self.update_centroid_connectors(
+ input_dir,
+ base_scenario,
+ main_emmebank,
+ external_zones,
+ taz_cwk_file,
+ cluster_zone_file)
+
+ base_scenario.publish_network(hwy_network)
+
+ if not skipInputChecker:
+ input_checker(path=self._path)
+
+ export_tap_adjacent_lines(_join(output_dir, "tapLines.csv"), base_scenario)
+ # parse vehicle availablility file by time-of-day
+ availability_file = "vehicle_class_availability.csv"
+ availabilities = self.parse_availability_file(_join(input_dir, availability_file), periods)
+ # initialize per time-period scenarios
+ for number, period in period_ids:
+ title = "%s - %s assign" % (base_scenario.title, period)
+ # copy_scenario(base_scenario, number, title, overwrite=True)
+ _m.logbook_write(
+ name="Copy scenario %s to %s" % (base_scenario.number, number),
+ attributes={
+ 'from_scenario': base_scenario.number,
+ 'scenario_id': number,
+ 'overwrite': True,
+ 'scenario_title': title
+ }
+ )
+ if main_emmebank.scenario(number):
+ main_emmebank.delete_scenario(number)
+ scenario = main_emmebank.copy_scenario(base_scenario.number, number)
+ scenario.title = title
+ # Apply availabilities by facility and vehicle class to this time period
+ self.apply_availabilities(period, scenario, availabilities)
+ else:
+ base_scenario = main_emmebank.scenario(scenario_id)
+
+ if not skipInitialization:
+ # initialize traffic demand, skims, truck, CV, EI, EE matrices
+ traffic_components = [
+ "traffic_skims",
+ "truck_model",
+ "external_internal_model", "external_external_model"]
+ if not skipCopyWarmupTripTables:
+ traffic_components.append("traffic_demand")
+ init_matrices(traffic_components, periods, base_scenario, deleteAllMatrices)
+
+ transit_scenario = init_transit_db(base_scenario, add_database=not useLocalDrive)
+ transit_emmebank = transit_scenario.emmebank
+ transit_components = ["transit_skims"]
+ if not skipCopyWarmupTripTables:
+ transit_components.append("transit_demand")
+ init_matrices(transit_components, periods, transit_scenario, deleteAllMatrices)
+ else:
+ transit_emmebank = _eb.Emmebank(_join(self._path, "emme_project", "Database_transit", "emmebank"))
+ transit_scenario = transit_emmebank.scenario(base_scenario.number)
+
+ if not skipCopyWarmupTripTables:
+ # import seed auto demand and seed truck demand
+ for period in periods:
+ omx_file = _join(input_dir, "trip_%s.omx" % period)
+ import_demand(omx_file, "AUTO", period, base_scenario)
+ import_demand(omx_file, "TRUCK", period, base_scenario)
+
+ if not skipBikeLogsums:
+ self.run_proc("runSandagBikeLogsums.cmd", [drive, path_forward_slash],
+ "Bike - create AT logsums and impedances")
+ if not skipCopyBikeLogsum:
+ self.copy_files(["bikeMgraLogsum.csv", "bikeTazLogsum.csv"], input_dir, output_dir)
+
+ else:
+ base_scenario = main_emmebank.scenario(scenario_id)
+ transit_emmebank = _eb.Emmebank(_join(self._path, "emme_project", "Database_transit", "emmebank"))
+ transit_scenario = transit_emmebank.scenario(base_scenario.number)
+
+ # Check that setup files were generated
+ self.run_proc("CheckOutput.bat", [drive + path_no_drive, 'Setup'], "Check for outputs")
+
+ # Note: iteration indexes from 0, msa_iteration indexes from 1
+ for iteration in range(startFromIteration - 1, end_iteration):
+ msa_iteration = iteration + 1
+ with _m.logbook_trace("Iteration %s" % msa_iteration):
+ if not skipCoreABM[iteration] or not skipOtherSimulateModel[iteration] or not skipMAASModel[iteration]:
+ self.run_proc("runMtxMgr.cmd", [drive, drive + path_no_drive], "Start matrix manager")
+ self.run_proc("runHhMgr.cmd", [drive, drive + path_no_drive], "Start Hh manager")
+
+ if not skipHighwayAssignment[iteration]:
+ # run traffic assignment
+ # export traffic skims
+ with _m.logbook_trace("Traffic assignment and skims"):
+ self.run_traffic_assignments(
+ base_scenario, period_ids, msa_iteration, relative_gap,
+ max_assign_iterations, num_processors)
+ self.run_proc("CreateD2TAccessFile.bat", [drive, path_forward_slash],
+ "Create drive to transit access file", capture_output=True)
+
+ if not skipTransitSkimming[iteration]:
+ # run transit assignment
+ # export transit skims
+ with _m.logbook_trace("Transit assignments and skims"):
+ for number, period in period_ids:
+ src_period_scenario = main_emmebank.scenario(number)
+ transit_assign_scen = build_transit_scen(
+ period=period, base_scenario=src_period_scenario,
+ transit_emmebank=transit_emmebank,
+ scenario_id=src_period_scenario.id,
+ scenario_title="%s %s transit assign" % (base_scenario.title, period),
+ data_table_name=scenarioYear, overwrite=True)
+ transit_assign(period, transit_assign_scen, data_table_name=scenarioYear,
+ skims_only=True, num_processors=num_processors)
+
+ omx_file = _join(output_dir, "transit_skims.omx")
+ export_transit_skims(omx_file, periods, transit_scenario)
+
+ if not skipTransponderExport[iteration]:
+ am_scenario = main_emmebank.scenario(base_scenario.number + 2)
+ export_for_transponder(output_dir, num_processors, am_scenario)
+
+ # For each step move trip matrices so run will stop if ctramp model
+ # doesn't produced csv/omx files for assignment
+ # also needed as CT-RAMP does not overwrite existing files
+ if not skipCoreABM[iteration]:
+ self.remove_prev_iter_files(core_abm_files, output_dir, iteration)
+ self.run_proc(
+ "runSandagAbm_SDRM.cmd",
+ [drive, drive + path_forward_slash, sample_rate[iteration], msa_iteration],
+ "Java-Run CT-RAMP", capture_output=True)
+ if not skipOtherSimulateModel[iteration]:
+ self.remove_prev_iter_files(smm_abm_files, output_dir, iteration)
+ self.run_proc(
+ "runSandagAbm_SMM.cmd",
+ [drive, drive + path_forward_slash, sample_rate[iteration], msa_iteration],
+ "Java-Run airport model, visitor model, cross-border model", capture_output=True)
+
+ if not skipMAASModel[iteration]:
+ self.remove_prev_iter_files(maas_abm_files, output_dir, iteration)
+ self.run_proc(
+ "runSandagAbm_MAAS.cmd",
+ [drive, drive + path_forward_slash, sample_rate[iteration], msa_iteration],
+ "Java-Run AV allocation model and TNC routing model", capture_output=True)
+
+ if not skipCTM[iteration]:
+ export_for_commercial_vehicle(output_dir, base_scenario)
+ self.run_proc(
+ "cvm.bat",
+ [drive, path_no_drive, path_forward_slash, scale_factor, mgra_lu_input_file,
+ "tazcentroids_cvm.csv"],
+ "Commercial vehicle model", capture_output=True)
+ if msa_iteration == startFromIteration:
+ external_zones = "1-12"
+ if not skipTruck[iteration]:
+ # run truck model (generate truck trips)
+ run_truck(True, input_dir, input_truck_dir, num_processors, base_scenario)
+ # run EI model "US to SD External Trip Model"
+ if not skipEI[iteration]:
+ external_internal(input_dir, base_scenario)
+ # run EE model
+ if not skipExternal[iteration]:
+ external_external(input_dir, external_zones, base_scenario)
+
+ # import demand from all sub-market models from CT-RAMP and
+ # add CV trips to auto demand
+ # add EE and EI trips to auto demand
+ if not skipTripTableCreation[iteration]:
+ import_auto_demand(output_dir, external_zones, num_processors, base_scenario)
+
+ if not skipFinalHighwayAssignment:
+ with _m.logbook_trace("Final traffic assignments"):
+ # Final iteration is assignment only, no skims
+ final_iteration = 4
+ self.run_traffic_assignments(
+ base_scenario, period_ids, final_iteration, relative_gap, max_assign_iterations,
+ num_processors, select_link, makeFinalHighwayAssignmentStochastic, input_dir)
+
+ if not skipFinalTransitAssignment:
+ import_transit_demand(output_dir, transit_scenario)
+ with _m.logbook_trace("Final transit assignments"):
+ # Final iteration includes the transit skims per ABM-1072
+ for number, period in period_ids:
+ src_period_scenario = main_emmebank.scenario(number)
+ transit_assign_scen = build_transit_scen(
+ period=period, base_scenario=src_period_scenario,
+ transit_emmebank=transit_emmebank, scenario_id=src_period_scenario.id,
+ scenario_title="%s - %s transit assign" % (base_scenario.title, period),
+ data_table_name=scenarioYear, overwrite=True)
+ transit_assign(period, transit_assign_scen, data_table_name=scenarioYear,
+ num_processors=num_processors)
+ omx_file = _join(output_dir, "transit_skims.omx")
+ export_transit_skims(omx_file, periods, transit_scenario, big_to_zero=True)
+
+ if not skipTransitShed:
+ # write walk and drive transit sheds
+ self.run_proc("runtransitreporter.cmd", [drive, path_forward_slash, transitShedThreshold, transitShedTOD],
+ "Create walk and drive transit sheds",
+ capture_output=True)
+
+ if not skipVisualizer:
+ self.run_proc("RunViz.bat",
+ [drive, path_no_drive, visualizer_reference_path, visualizer_output_file, "NO", visualizer_reference_label, visualizer_build_label, mgraInputFile],
+ "HTML Visualizer", capture_output=True)
+
+ if not skipDataExport:
+ # export network and matrix results from Emme directly to T if using local drive
+ output_directory = _join(self._path, "output")
+ export_network_data(self._path, scenario_id, main_emmebank, transit_emmebank, num_processors)
+ export_matrix_data(output_directory, base_scenario, transit_scenario)
+ # export core ABM data
+ # Note: uses relative project structure, so cannot redirect to T drive
+ self.run_proc("DataExporter.bat", [drive, path_no_drive], "Export core ABM data",capture_output=True)
+
+ #Validation for 2016 scenario
+ if scenarioYear == "2016":
+ validation(self._path, main_emmebank, base_scenario) # to create source_EMME.xlsx
+
+ # #Create Worksheet for ABM Validation using PowerBI Visualization #JY: can be uncommented if deciding to incorporate PowerBI vis in ABM workflow
+ # self.run_proc("VisPowerBI.bat", # forced to update excel links
+ # [drive, path_no_drive, scenarioYear, 0],
+ # "VisPowerBI",
+ # capture_output=True)
+
+ ### CL: Below step is temporarily used to update validation output files. When Gregor complete Upload procedure, below step should be removed. 05/31/20
+ # self.run_proc("ExcelUpdate.bat", # forced to update excel links
+ # [drive, path_no_drive, scenarioYear, 0],
+ # "ExcelUpdate",
+ # capture_output=True)
+
+ ### ES: Commented out until this segment is updated to reference new database. 9/10/20 ###
+ # add segments below for auto-reporting, YMA, 1/23/2019
+ # add this loop to find the sceanro_id in the [dimension].[scenario] table
+
+ #database_scenario_id = 0
+ #int_hour = 0
+ #while int_hour <= 96:
+
+ # database_scenario_id = self.sql_select_scenario(scenarioYear, end_iteration,
+ # sample_rate[end_iteration - 1], path_no_drive,
+ # start_db_time)
+ # if database_scenario_id > 0:
+ # break
+
+ # int_hour = int_hour + 1
+ # _time.sleep(900) # wait for 15 mins
+
+ # if load failed, then send notification email
+ #if database_scenario_id == 0 and int_hour > 96:
+ # str_request_check_result = self.sql_check_load_request(scenarioYear, path_no_drive, username,
+ # start_db_time)
+ # print(str_request_check_result)
+ # sys.exit(0)
+ # self.send_notification(str_request_check_result,username) #not working in server
+ #else:
+ # print(database_scenario_id)
+ # self.run_proc("DataSummary.bat", # get summary from database, added for auto-reporting
+ # [drive, path_no_drive, scenarioYear, database_scenario_id],
+ # "Data Summary")
+
+ # self.run_proc("ExcelUpdate.bat", # forced to update excel links
+ # [drive, path_no_drive, scenarioYear, database_scenario_id],
+ # "Excel Update",
+ # capture_output=True)
+
+ # terminate all java processes
+ _subprocess.call("taskkill /F /IM java.exe")
+
+ # close all DOS windows
+ _subprocess.call("taskkill /F /IM cmd.exe")
+
+ # UPLOAD DATA AND SWITCH PATHS
+ if useLocalDrive:
+ file_manager("UPLOAD", main_directory, username, scenario_id,
+ delete_local_files=not skipDeleteIntermediateFiles)
+ self._path = main_directory
+ drive, path_no_drive = os.path.splitdrive(self._path)
+ # self._path = main_directory
+ # drive, path_no_drive = os.path.splitdrive(self._path)
+ init_transit_db.add_database(
+ _eb.Emmebank(_join(main_directory, "emme_project", "Database_transit", "emmebank")))
+
+ if not skipDataLoadRequest:
+ start_db_time = datetime.datetime.now() # record the time to search for request id in the load request table, YMA, 1/23/2019
+ # start_db_time = start_db_time + datetime.timedelta(minutes=0)
+ self.run_proc("DataLoadRequest.bat",
+ [drive + path_no_drive, end_iteration, scenarioYear, sample_rate[end_iteration - 1]],
+ "Data load request")
+
+ # delete trip table files in iteration sub folder if model finishes without errors
+ if not useLocalDrive and not skipDeleteIntermediateFiles:
+ for msa_iteration in range(startFromIteration, end_iteration + 1):
+ self.delete_files(
+ ["auto*Trips*.omx", "tran*Trips*.omx", "nmot*.omx", "othr*.omx", "trip*.omx"],
+ _join(output_dir, "iter%s" % (msa_iteration)))
+
+ # record run time
+ run_summary(path=self._path)
+
+ def set_global_logbook_level(self, props):
+ self._log_level = props.get("RunModel.LogbookLevel", "ENABLED")
+ log_all = _m.LogbookLevel.ATTRIBUTE | _m.LogbookLevel.VALUE | _m.LogbookLevel.COOKIE | _m.LogbookLevel.TRACE | _m.LogbookLevel.LOG
+ log_states = {
+ "ENABLED": log_all,
+ "DISABLE_ON_ERROR": log_all,
+ "NO_EXTERNAL_REPORTS": log_all,
+ "NO_REPORTS": _m.LogbookLevel.ATTRIBUTE | _m.LogbookLevel.COOKIE | _m.LogbookLevel.TRACE | _m.LogbookLevel.LOG,
+ "TITLES_ONLY": _m.LogbookLevel.TRACE | _m.LogbookLevel.LOG,
+ "DISABLED": _m.LogbookLevel.NONE,
+ }
+ _m.logbook_write("Setting logbook level to %s" % self._log_level)
+ try:
+ _m.logbook_level(log_states[self._log_level])
+ except KeyError:
+ raise Exception("properties.RunModel.LogLevel: value must be one of %s" % ",".join(log_states.keys()))
+
+ def run_traffic_assignments(self, base_scenario, period_ids, msa_iteration, relative_gap,
+ max_assign_iterations, num_processors, select_link=None,
+ makeFinalHighwayAssignmentStochastic=False, input_dir=None):
+ modeller = _m.Modeller()
+ traffic_assign = modeller.tool("sandag.assignment.traffic_assignment")
+ export_traffic_skims = modeller.tool("sandag.export.export_traffic_skims")
+ output_dir = _join(self._path, "output")
+ main_emmebank = base_scenario.emmebank
+
+ machine_name = _socket.gethostname().lower()
+ with open(_join(self._path, "conf", "server-config.csv")) as f:
+ columns = f.next().split(",")
+ for line in f:
+ values = dict(zip(columns, line.split(",")))
+ name = values["ServerName"].lower()
+ if name == machine_name:
+ server_config = values
+ break
+ else:
+ _m.logbook_write("Warning: current machine name not found in "
+ "conf\\server-config.csv ServerName column")
+ server_config = {"SNODE": "yes"}
+ distributed = server_config["SNODE"] == "no"
+ if distributed and not makeFinalHighwayAssignmentStochastic:
+ scen_map = dict((p, main_emmebank.scenario(n)) for n, p in period_ids)
+ input_args = {
+ "msa_iteration": msa_iteration,
+ "relative_gap": relative_gap,
+ "max_assign_iterations": max_assign_iterations,
+ "select_link": select_link
+ }
+
+ periods_node1 = ["PM", "MD"]
+ input_args["num_processors"] = server_config["THREADN1"],
+ database_path1, skim_names1 = self.setup_remote_database(
+ [scen_map[p] for p in periods_node1], periods_node1, 1, msa_iteration)
+ self.start_assignments(
+ server_config["NODE1"], database_path1, periods_node1, scen_map, input_args)
+
+ periods_node2 = ["AM"]
+ input_args["num_processors"] = server_config["THREADN2"]
+ database_path2, skim_names2 = self.setup_remote_database(
+ [scen_map[p] for p in periods_node2], periods_node2, 2, msa_iteration)
+ self.start_assignments(
+ server_config["NODE2"], database_path2, periods_node2, scen_map, input_args)
+
+ try:
+ # run assignments locally
+ periods_local = ["EA", "EV"]
+ for period in periods_local:
+ local_scenario = scen_map[period]
+ traffic_assign(period, msa_iteration, relative_gap, max_assign_iterations,
+ num_processors, local_scenario, select_link)
+ omx_file = _join(output_dir, "traffic_skims_%s.omx" % period)
+ if msa_iteration <= 4:
+ export_traffic_skims(period, omx_file, base_scenario)
+ scenarios = {
+ database_path1: [scen_map[p] for p in periods_node1],
+ database_path2: [scen_map[p] for p in periods_node2]
+ }
+ skim_names = {
+ database_path1: skim_names1, database_path2: skim_names2
+ }
+ self.wait_and_copy([database_path1, database_path2], scenarios, skim_names)
+ except:
+ # Note: this will kill ALL python processes - not suitable if servers are being
+ # used for other tasks
+ _subprocess.call("taskkill /F /T /S \\\\%s /IM python.exe" % server_config["NODE1"])
+ _subprocess.call("taskkill /F /T /S \\\\%s /IM python.exe" % server_config["NODE2"])
+ raise
+ else:
+ for number, period in period_ids:
+ period_scenario = main_emmebank.scenario(number)
+ traffic_assign(period, msa_iteration, relative_gap, max_assign_iterations,
+ num_processors, period_scenario, select_link, stochastic=makeFinalHighwayAssignmentStochastic, input_directory=input_dir)
+ omx_file = _join(output_dir, "traffic_skims_%s.omx" % period)
+ if msa_iteration <= 4:
+ export_traffic_skims(period, omx_file, base_scenario)
+
+ def run_proc(self, name, arguments, log_message, capture_output=False):
+ path = _join(self._path, "bin", name)
+ if not os.path.exists(path):
+ raise Exception("No command / batch file '%s'" % path)
+ command = path + " " + " ".join([str(x) for x in arguments])
+ attrs = {"command": command, "name": name, "arguments": arguments}
+ with _m.logbook_trace(log_message, attributes=attrs):
+ if capture_output and self._log_level != "NO_EXTERNAL_REPORTS":
+ report = _m.PageBuilder(title="Process run %s" % name)
+ report.add_html('Command:
' % output)
+ except _subprocess.CalledProcessError as error:
+ report.add_html('Output:
%s
' % error.output)
+ raise
+ finally:
+ err_file.close()
+ with open(err_file_path, 'r') as f:
+ error_msg = f.read()
+ os.remove(err_file_path)
+ if error_msg:
+ report.add_html('Error message(s):
%s
' % error_msg)
+ try:
+ # No raise on writing report error
+ # due to observed issue with runs generating reports which cause
+ # errors when logged
+ _m.logbook_write("Process run %s report" % name, report.render())
+ except Exception as error:
+ print _time.strftime("%Y-%M-%d %H:%m:%S")
+ print "Error writing report '%s' to logbook" % name
+ print error
+ print _traceback.format_exc(error)
+ if self._log_level == "DISABLE_ON_ERROR":
+ _m.logbook_level(_m.LogbookLevel.NONE)
+ else:
+ _subprocess.check_call(command, cwd=self._path, shell=True)
+
+ @_m.logbook_trace("Check free space on C")
+ def check_free_space(self, min_space):
+ path = "c:\\"
+ temp, total, free = _ctypes.c_ulonglong(), _ctypes.c_ulonglong(), _ctypes.c_ulonglong()
+ if sys.version_info >= (3,) or isinstance(path, unicode):
+ fun = _ctypes.windll.kernel32.GetDiskFreeSpaceExW
+ else:
+ fun = _ctypes.windll.kernel32.GetDiskFreeSpaceExA
+ ret = fun(path, _ctypes.byref(temp), _ctypes.byref(total), _ctypes.byref(free))
+ if ret == 0:
+ raise _ctypes.WinError()
+ total = total.value / (1024.0 ** 3)
+ free = free.value / (1024.0 ** 3)
+ if free < min_space:
+ raise Exception("Free space on C drive %s is less than %s" % (free, min_space))
+
+ def remove_prev_iter_files(self, file_names, output_dir, iteration):
+ if iteration == 0:
+ self.delete_files(file_names, output_dir)
+ else:
+ self.move_files(file_names, output_dir, _join(output_dir, "iter%s" % (iteration)))
+
+ def copy_files(self, file_names, from_dir, to_dir):
+ with _m.logbook_trace("Copy files %s" % ", ".join(file_names)):
+ for file_name in file_names:
+ from_file = _join(from_dir, file_name)
+ _shutil.copy(from_file, to_dir)
+
+ def complete_work(self, scenarioYear, input_dir, output_dir, input_file, output_file):
+
+ fullList = np.array(pd.read_csv(_join(input_dir, input_file))['mgra'])
+ workList = np.array(pd.read_csv(_join(output_dir, output_file))['i'])
+
+ list_set = set(workList)
+ unique_list = (list(list_set))
+ notMatch = [x for x in fullList if x not in unique_list]
+
+ if notMatch:
+ out_file = _join(output_dir, output_file)
+ with open(out_file, 'ab') as csvfile:
+ spamwriter = csv.writer(csvfile)
+ # spamwriter.writerow([])
+ for item in notMatch:
+ # pdb.set_trace()
+ spamwriter.writerow([item, item, '30', '30', '30'])
+
+ def move_files(self, file_names, from_dir, to_dir):
+ with _m.logbook_trace("Move files %s" % ", ".join(file_names)):
+ if not os.path.exists(to_dir):
+ os.mkdir(to_dir)
+ for file_name in file_names:
+ all_files = _glob.glob(_join(from_dir, file_name))
+ for path in all_files:
+ try:
+ dst_file = _join(to_dir, os.path.basename(path))
+ if os.path.exists(dst_file):
+ os.remove(dst_file)
+ _shutil.move(path, to_dir)
+ except Exception as error:
+ _m.logbook_write(
+ "Error moving file %s" % path, {"error": _traceback.format_exc(error)})
+
+ def delete_files(self, file_names, directory):
+ with _m.logbook_trace("Delete files %s" % ", ".join(file_names)):
+ for file_name in file_names:
+ all_files = _glob.glob(_join(directory, file_name))
+ for path in all_files:
+ os.remove(path)
+
+ def check_for_fatal(self, file_name, error_msg):
+ with open(file_name, 'a+') as f:
+ for line in f:
+ if "FATAL" in line:
+ raise Exception(error_msg)
+
+ def set_active(self, emmebank):
+ modeller = _m.Modeller()
+ desktop = modeller.desktop
+ data_explorer = desktop.data_explorer()
+ for db in data_explorer.databases():
+ if _norm(db.path) == _norm(unicode(emmebank)):
+ db.open()
+ return db
+ return None
+
+ def parse_availability_file(self, file_path, periods):
+ if os.path.exists(file_path):
+ availabilities = _defaultdict(lambda: _defaultdict(lambda: dict()))
+ # NOTE: CSV Reader sets the field names to UPPERCASE for consistency
+ with gen_utils.CSVReader(file_path) as r:
+ for row in r:
+ name = row.pop("FACILITY_NAME")
+ class_name = row.pop("VEHICLE_CLASS")
+ for period in periods:
+ is_avail = int(row[period + "_AVAIL"])
+ if is_avail not in [1, 0]:
+ msg = "Error processing file '%s': value for period %s class %s facility %s is not 1 or 0"
+ raise Exception(msg % (file_path, period, class_name, name))
+ availabilities[period][name][class_name] = is_avail
+ else:
+ availabilities = None
+ return availabilities
+
+ def apply_availabilities(self, period, scenario, availabilities):
+ if availabilities is None:
+ return
+
+ network = scenario.get_network()
+ hov2 = network.mode("h")
+ hov2_trnpdr = network.mode("H")
+ hov3 = network.mode("i")
+ hov3_trnpdr = network.mode("I")
+ sov = network.mode("s")
+ sov_trnpdr = network.mode("S")
+ heavy_trk = network.mode("v")
+ heavy_trk_trnpdr = network.mode("V")
+ medium_trk = network.mode("m")
+ medium_trk_trnpdr = network.mode("M")
+ light_trk = network.mode("t")
+ light_trk_trnpdr = network.mode("T")
+
+ class_mode_map = {
+ "DA": set([sov_trnpdr, sov]),
+ "S2": set([hov2_trnpdr, hov2]),
+ "S3": set([hov3_trnpdr, hov3]),
+ "TRK_L": set([light_trk_trnpdr, light_trk]),
+ "TRK_M": set([medium_trk_trnpdr, medium_trk]),
+ "TRK_H": set([heavy_trk_trnpdr, heavy_trk]),
+ }
+ report = ["
Link mode changes
"]
+ for name, class_availabilities in availabilities[period].iteritems():
+ report.append("
%s
" % name)
+ changes = _defaultdict(lambda: 0)
+ for link in network.links():
+ if name in link["#name"]:
+ for class_name, is_avail in class_availabilities.iteritems():
+ modes = class_mode_map[class_name]
+ if is_avail == 1 and not modes.issubset(link.modes):
+ link.modes |= modes
+ changes["added %s to" % class_name] += 1
+ elif is_avail == 0 and modes.issubset(link.modes):
+ link.modes -= modes
+ changes["removed %s from" % class_name] += 1
+ report.append("
")
+ for x in changes.iteritems():
+ report.append("
%s %s links
" % x)
+ report.append("
")
+ scenario.publish_network(network)
+
+ title = "Apply global class availabilities by faclity name for period %s" % period
+ log_report = _m.PageBuilder(title=title)
+ for item in report:
+ log_report.add_html(item)
+ _m.logbook_write(title, log_report.render())
+
+ def setup_remote_database(self, src_scenarios, periods, remote_num, msa_iteration):
+ with _m.logbook_trace("Set up remote database #%s for %s" % (remote_num, ", ".join(periods))):
+ init_matrices = _m.Modeller().tool("sandag.initialize.initialize_matrices")
+ create_function = _m.Modeller().tool("inro.emme.data.function.create_function")
+ src_emmebank = src_scenarios[0].emmebank
+ remote_db_dir = _join(self._path, "emme_project", "Database_remote" + str(remote_num))
+ if msa_iteration == 1:
+ # Create and initialize database at first iteration, overwrite existing
+ if os.path.exists(remote_db_dir):
+ _shutil.rmtree(remote_db_dir)
+ _time.sleep(1)
+ os.mkdir(remote_db_dir)
+ dimensions = src_emmebank.dimensions
+ dimensions["scenarios"] = len(src_scenarios)
+ remote_emmebank = _eb.create(_join(remote_db_dir, "emmebank"), dimensions)
+ try:
+ remote_emmebank.title = src_emmebank.title
+ remote_emmebank.coord_unit_length = src_emmebank.coord_unit_length
+ remote_emmebank.unit_of_length = src_emmebank.unit_of_length
+ remote_emmebank.unit_of_cost = src_emmebank.unit_of_cost
+ remote_emmebank.unit_of_energy = src_emmebank.unit_of_energy
+ remote_emmebank.use_engineering_notation = src_emmebank.use_engineering_notation
+ remote_emmebank.node_number_digits = src_emmebank.node_number_digits
+
+ for src_scen in src_scenarios:
+ remote_scen = remote_emmebank.create_scenario(src_scen.id)
+ remote_scen.title = src_scen.title
+ for attr in sorted(src_scen.extra_attributes(), key=lambda x: x._id):
+ dst_attr = remote_scen.create_extra_attribute(
+ attr.type, attr.name, attr.default_value)
+ dst_attr.description = attr.description
+ for field in src_scen.network_fields():
+ remote_scen.create_network_field(
+ field.type, field.name, field.atype, field.description)
+ remote_scen.has_traffic_results = src_scen.has_traffic_results
+ remote_scen.has_transit_results = src_scen.has_transit_results
+ remote_scen.publish_network(src_scen.get_network())
+ for function in src_emmebank.functions():
+ create_function(function.id, function.expression, remote_emmebank)
+ init_matrices(["traffic_skims", "traffic_demand"], periods, remote_scen)
+ finally:
+ remote_emmebank.dispose()
+
+ src_scen = src_scenarios[0]
+ with _m.logbook_trace("Copy demand matrices to remote database"):
+ with _eb.Emmebank(_join(remote_db_dir, "emmebank")) as remote_emmebank:
+ demand_matrices = init_matrices.get_matrix_names("traffic_demand", periods, src_scen)
+ for matrix_name in demand_matrices:
+ matrix = remote_emmebank.matrix(matrix_name)
+ src_matrix = src_emmebank.matrix(matrix_name)
+ if matrix.type == "SCALAR":
+ matrix.data = src_matrix.data
+ else:
+ matrix.set_data(src_matrix.get_data(src_scen.id), src_scen.id)
+ skim_matrices = init_matrices.get_matrix_names("traffic_skims", periods, src_scen)
+ return remote_db_dir, skim_matrices
+
+ def start_assignments(self, machine, database_path, periods, scenarios, assign_args):
+ with _m.logbook_trace("Start remote process for traffic assignments %s" % (", ".join(periods))):
+ assign_args["database_path"] = database_path
+ end_path = _join(database_path, "finish")
+ if os.path.exists(end_path):
+ os.remove(end_path)
+ for period in periods:
+ assign_args["period_scenario"] = scenarios[period].id
+ assign_args["period"] = period
+ with open(_join(database_path, "start_%s.args" % period), 'w') as f:
+ _json.dump(assign_args, f, indent=4)
+ script_dir = _join(self._path, "python")
+ bin_dir = _join(self._path, "bin")
+ args = [
+ 'start %s\\PsExec.exe' % bin_dir,
+ '-c',
+ '-f',
+ '\\\\%s' % machine,
+ '-u \%s' % self.username,
+ '-p %s' % self.password,
+ "-d",
+ '%s\\emme_python.bat' % bin_dir,
+ "T:",
+ self._path,
+ '%s\\remote_run_traffic.py' % script_dir,
+ database_path,
+ ]
+ command = " ".join(args)
+ p = _subprocess.Popen(command, shell=True)
+
+ @_m.logbook_trace("Wait for remote assignments to complete and copy results")
+ def wait_and_copy(self, database_dirs, scenarios, matrices):
+ database_dirs = database_dirs[:]
+ wait = True
+ while wait:
+ _time.sleep(5)
+ for path in database_dirs:
+ end_path = _join(path, "finish")
+ if os.path.exists(end_path):
+ database_dirs.remove(path)
+ _time.sleep(2)
+ self.check_for_fatal(
+ end_path, "error during remote run of traffic assignment. "
+ "Check logFiles/traffic_assign_database_remote*.log")
+ self.copy_results(path, scenarios[path], matrices[path])
+ if not database_dirs:
+ wait = False
+
+ @_m.logbook_trace("Copy skim results from remote database", save_arguments=True)
+ def copy_results(self, database_path, scenarios, matrices):
+ with _eb.Emmebank(_join(database_path, "emmebank")) as remote_emmebank:
+ for dst_scen in scenarios:
+ remote_scen = remote_emmebank.scenario(dst_scen.id)
+ # Create extra attributes and network fields which do not exist
+ for attr in sorted(remote_scen.extra_attributes(), key=lambda x: x._id):
+ if not dst_scen.extra_attribute(attr.name):
+ dst_attr = dst_scen.create_extra_attribute(
+ attr.type, attr.name, attr.default_value)
+ dst_attr.description = attr.description
+ for field in remote_scen.network_fields():
+ if not dst_scen.network_field(field.type, field.name):
+ dst_scen.create_network_field(
+ field.type, field.name, field.atype, field.description)
+ dst_scen.has_traffic_results = remote_scen.has_traffic_results
+ dst_scen.has_transit_results = remote_scen.has_transit_results
+
+ dst_scen.publish_network(remote_scen.get_network())
+
+ dst_emmebank = dst_scen.emmebank
+ scen_id = dst_scen.id
+ for matrix_id in matrices:
+ src_matrix = remote_emmebank.matrix(matrix_id)
+ dst_matrix = dst_emmebank.matrix(matrix_id)
+ dst_matrix.set_data(src_matrix.get_data(scen_id), scen_id)
+
+ @_m.method(return_type=unicode)
+ def get_link_attributes(self):
+ export_utils = _m.Modeller().module("inro.emme.utility.export_utilities")
+ return export_utils.get_link_attributes(_m.Modeller().scenario)
+
+ def update_centroid_connectors(self, source, base_scenario, emmebank, external_zone, taz_cwk_file, cluster_zone_file):
+ adjust_network = _m.Modeller().module("inro.import.adjust_network_links")
+ return adjust_network.adjust_network_links(source, base_scenario, emmebank, external_zone, taz_cwk_file, cluster_zone_file)
+
+ def sql_select_scenario(self, year, iteration, sample, path, dbtime): # YMA, 1/24/2019
+ """Return scenario_id from [dimension].[scenario] given path"""
+
+ import datetime
+
+ sql_con = pyodbc.connect(driver='{SQL Server}',
+ server='sql2014a8',
+ database='abm_2',
+ trusted_connection='yes')
+
+ # dbtime = dbtime + datetime.timedelta(days=0)
+
+ df = pd.read_sql_query(
+ sql=("SELECT [scenario_id] "
+ "FROM [dimension].[scenario]"
+ "WHERE [year] = ? AND [iteration] = ? AND [sample_rate]= ? AND [path] Like ('%' + ? + '%') AND [date_loaded] > ? "),
+ con=sql_con,
+ params=[year, iteration, sample, path, dbtime]
+ )
+
+ if len(df) > 0:
+ return (df.iloc[len(df) - 1]['scenario_id'])
+ else:
+ return 0
+
+ def sql_check_load_request(self, year, path, user, ldtime): # YMA, 1/24/2019
+ """Return information from [data_load].[load_request] given path,username,and requested time"""
+
+ import datetime
+
+ t0 = ldtime + datetime.timedelta(minutes=-1)
+ t1 = t0 + datetime.timedelta(minutes=30)
+
+ sql_con = pyodbc.connect(driver='{SQL Server}',
+ server='sql2014a8',
+ database='abm_2',
+ trusted_connection='yes')
+
+ df = pd.read_sql_query(
+ sql=(
+ "SELECT [load_request_id],[year],[name],[path],[iteration],[sample_rate],[abm_version],[user_name],[date_requested],[loading],[loading_failed],[scenario_id] "
+ "FROM [data_load].[load_request] "
+ "WHERE [year] = ? AND [path] LIKE ('%' + ? + '%') AND [user_name] LIKE ('%' + ? + '%') AND [date_requested] >= ? AND [date_requested] <= ? "),
+ con=sql_con,
+ params=[year, path, user, t0, t1]
+ )
+
+ if len(df) > 0:
+ return "You have successfully made the loading request, but the loading to the database failed. \r\nThe information is below. \r\n\r\n" + df.to_string()
+ else:
+ return "The data load request was not successfully made, please double check the [data_load].[load_request] table to confirm."
+
+
+'''
+ def send_notification(self,str_message,user): # YMA, 1/24/2019, not working on server
+ """automate to send email notification if load request or loading failed"""
+
+ import win32com.client as win32
+
+ outlook = win32.Dispatch('outlook.application')
+ Msg = outlook.CreateItem(0)
+ Msg.To = user + '@sandag.org'
+ Msg.CC = 'yma@sandag.org'
+ Msg.Subject = 'Loading Scenario to Database Failed'
+ Msg.body = str_message + '\r\n' + '\r\n' + 'This email alert is auto generated.\r\n' + 'Please do not respond.\r\n'
+ Msg.send'''
diff --git a/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/distribution.py b/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/distribution.py
new file mode 100644
index 0000000..c271e56
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/distribution.py
@@ -0,0 +1,197 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// model/commercial_vehicle/distribution.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Distributes the total daily trips from production and attraction vectors.
+# Friction factors are calculated based on a blended travel time skim of
+# 1/3 AM_SOV_NT_M_TIME and 2/3 MD_SOV_NT_M_TIME, and a table of friction factor
+# lookup values from commVehFF.csv.
+#
+# Inputs:
+# input_directory: source directory for input files
+# scenario: traffic scenario to use for reference zone system
+#
+# Files referenced:
+# input/commVehFF.csv
+#
+# Matrix inputs:
+# moCOMVEH_PROD, mdCOMVEH_ATTR
+# mfAM_SOV_NT_M_TIME, mfMD_SOV_NT_M_TIME
+#
+# Matrix intermediates (only used internally):
+# mfCOMVEH_BLENDED_SKIM, mfCOMVEH_FRICTION
+#
+# Matrix results:
+# mfCOMVEH_TOTAL_DEMAND
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ project_dir = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ input_dir = os.path.join(project_dir, "input")
+ base_scenario = modeller.scenario
+ distribution = modeller.tool("sandag.model.commercial_vehicle.distribution")
+ distribution(input_dir, base_scenario)
+"""
+
+
+TOOLBOX_ORDER = 53
+
+
+import inro.modeller as _m
+import traceback as _traceback
+
+import pandas as pd
+import os
+import numpy as np
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class CommercialVehicleDistribution(_m.Tool(), gen_utils.Snapshot):
+
+ input_directory = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ self.input_directory = os.path.join(os.path.dirname(project_dir), "input")
+ self.attributes = ["input_directory"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Commercial Vehicle Distribution"
+ pb.description = """
+
+ Calculates total daily trips.
+ The very small truck generation model is based on the Phoenix
+ four-tire truck model documented in the TMIP Quick Response Freight Manual.
+
+ A simple gravity model is used to distribute the truck trips.
+ A blended travel time of
+ 1/3 AM_SOV_NT_M_TIME and 2/3 MD_SOV_NT_M_TIME is used, along with
+ friction factor lookup table stored in commVehFF.csv.
+
+ Input:
+
+ (1) Level-of-service matrices for the AM peak period (6 am to 10 am) 'mfAM_SOVGPM_TIME'
+ and midday period (10 am to 3 pm) 'mfMD_SOVGPM_TIME'
+ which contain congested travel time (in minutes).
+
+ (2) Trip generation results 'moCOMVEH_PROD' and 'mdCOMVEH_ATTR'
+
+ (4) A table of friction factors in commVehFF.csv with:
+
+ Calculate commerical vehicle productions and attractions
+ based on mgra13_based_inputYYYY.csv.
+ The very small truck generation model is based on the Phoenix
+ four-tire truck model documented in the TMIP Quick Response Freight Manual.
+
+ Linear regression models generate trip ends, balancing attractions to productions.
+
+ Input: MGRA file in CSV format with the following fields:
+
+
+ (a) TOTEMP, total employment (same regardless of classification system);
+
+ (b) RETEMPN, retail trade employment per the NAICS classification system;
+
+ (c) FPSEMPN, financial and professional services employment per the NAICS classification system;
+
+ (d) HEREMPN, health, educational, and recreational employment per the NAICS classification system;
+
+ (e) OTHEMPN, other employment per the NAICS classification system;
+
+ (f) AGREMPN, agricultural employmentper the NAICS classificatin system;
+
+ (g) MWTEMPN, manufacturing, warehousing, and transportation emp;loyment per the NAICS classification system; and,
+
+ (h) TOTHH, total households.
+
+
+ Output: Trip productions and attractions in matrices 'moCOMMVEH_PROD' and 'mdCOMMVEH_ATTR' respectively.
+
+ Run the 4 steps of the commercial vehicle model: generation, distribution,
+ time of day, toll diversion.
+
+ The very small truck generation model is based on the Phoenix
+ four-tire truck model documented in the TMIP Quick Response Freight Manual.
+
+"""
+ pb.branding_text = "- SANDAG - Model - Commercial vehicle"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+ pb.add_checkbox("run_generation", title=" ", label="Run generation (first iteration)")
+
+ pb.add_select_file('input_directory', 'directory',
+ title='Select input directory')
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.run_generation, self.input_directory, scenario)
+ run_msg = "Tool complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('Commercial vehicle model', save_arguments=True)
+ def __call__(self, run_generation, input_directory, scenario):
+ attributes = {"run_generation": run_generation, "input_directory": input_directory}
+ gen_utils.log_snapshot("Commercial vehicle model", str(self), attributes)
+ generation = _m.Modeller().tool(
+ 'sandag.model.commercial_vehicle.generation')
+ distribution = _m.Modeller().tool(
+ 'sandag.model.commercial_vehicle.distribution')
+ time_of_day = _m.Modeller().tool(
+ 'sandag.model.commercial_vehicle.time_of_day')
+ diversion = _m.Modeller().tool(
+ 'sandag.model.commercial_vehicle.toll_diversion')
+ if run_generation:
+ generation(input_directory, scenario)
+ distribution(input_directory, scenario)
+ time_of_day(scenario)
+ diversion(scenario)
diff --git a/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/time_of_day.py b/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/time_of_day.py
new file mode 100644
index 0000000..163bba7
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/time_of_day.py
@@ -0,0 +1,99 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// model/commercial_vehicle/time_of_day.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Applies time-of-day factoring to the Commercial vehicle total daily demand.
+# The diurnal factors are taken from the BAYCAST-90 model with adjustments
+# made during calibration to the very small truck values to better match counts.
+#
+# Inputs:
+# scenario: traffic scenario to use for reference zone system
+#
+# Matrix inputs:
+# mfCOMVEH_TOTAL_DEMAND
+#
+# Matrix results:
+# Note: pp is time period, one of EA, AM, MD, PM, EV
+# mfpp_COMVEH
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ base_scenario = modeller.scenario
+ time_of_day = modeller.tool("sandag.model.commercial_vehicle.time_of_day")
+ time_of_day(base_scenario)
+"""
+
+TOOLBOX_ORDER = 54
+
+
+import inro.modeller as _m
+import traceback as _traceback
+
+
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+
+
+class TimeOfDay(_m.Tool()):
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Commercial Vehicle Time of Day split"
+ pb.description = """
+
+ Commercial vehicle time-of-day factoring.
+ The very small truck generation model is based on the Phoenix
+ four-tire truck model documented in the TMIP Quick Response Freight Manual.
+
+ The diurnal factors are taken from the BAYCAST-90 model with adjustments
+ made during calibration to the very small truck values to better match counts.
+
Input: A production/attraction format trip table matrix of daily very small truck trips.
+
Output: Five, time-of-day-specific trip table matrices for very small trucks,
+ of the form 'mfpp_COMVEH'.
+
+
"""
+ pb.branding_text = "- SANDAG - Model - Commercial vehicle"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(scenario)
+ run_msg = "Tool complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('Commercial vehicle Time of Day split')
+ def __call__(self, scenario):
+ matrix_calc = dem_utils.MatrixCalculator(scenario, 0)
+ periods = ['EA', 'AM', 'MD', 'PM', 'EV']
+ period_factors = [0.0235, 0.1, 0.5080, 0.1980, 0.1705]
+ for p, f in zip(periods, period_factors):
+ matrix_calc.add(
+ "mf%s_COMVEH" % p,
+ "%s * 0.5 * (mfCOMVEH_TOTAL_DEMAND + mfCOMVEH_TOTAL_DEMAND')" % f)
+ matrix_calc.run()
diff --git a/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/toll_diversion.py b/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/toll_diversion.py
new file mode 100644
index 0000000..07bc4fe
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/model/commercial_vehicle/toll_diversion.py
@@ -0,0 +1,119 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// model/commercial_vehicle/toll_diversion.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+# Applies toll and non-toll split to Commercial vehicle time period demand.
+# Uses the travel TIME for GP and TOLL modes as well as the TOLLCOST
+# by time period.
+#
+# Inputs:
+# scenario: traffic scenario to use for reference zone system
+#
+# Matrix inputs:
+# Note: pp is time period, one of EA, AM, MD, PM, EV
+# mfpp_COMVEH
+# mfpp_SOVGPM_TIME, mfpp_SOVTOLLM_TIME, mfpp_SOVTOLLM_TOLLCOST
+#
+# Matrix results:
+# Note: pp is time period, one of EA, AM, MD, PM, EV
+# mfpp_COMVEHGP, mfpp_COMVEHTOLL
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ base_scenario = modeller.scenario
+ toll_diversion = modeller.tool("sandag.model.commercial_vehicle.toll_diversion")
+ toll_diversion(base_scenario)
+"""
+
+TOOLBOX_ORDER = 55
+
+
+import inro.modeller as _m
+import traceback as _traceback
+
+
+gen_utils = _m.Modeller().module('sandag.utilities.general')
+dem_utils = _m.Modeller().module('sandag.utilities.demand')
+
+
+class TollDiversion(_m.Tool()):
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Commercial vehicle toll diversion"
+ pb.description = """
+
+ Commercial vehicle toll and non-toll (GP) split.
+ The very small truck generation model is based on the Phoenix
+ four-tire truck model documented in the TMIP Quick Response Freight Manual.
+
+
Input: Time-of-day-specific trip table matrices 'mfpp_COMVEH',
+ and travel time for GP and TOLL modes 'mfpp_SOVGPM_TIME', 'mfpp_SOVTOLLM_TIME',
+ and toll cost 'mfpp_SOVTOLLM_TOLLCOST' (medium VOT bin).
+
+
Output: Corresponding time-of-day 'mfpp_COMVEHGP' and 'mfpp_COMVEHTOLL'
+ trip demand matrices.
+
+"""
+ pb.branding_text = "- SANDAG - Model - Commercial vehicle"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(scenario)
+ run_msg = "Tool complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('Commercial vehicle toll diversion')
+ def __call__(self, scenario):
+ emmebank = scenario.emmebank
+ matrix_calc = dem_utils.MatrixCalculator(scenario, "MAX-1")
+ init_matrix = _m.Modeller().tool(
+ "inro.emme.data.matrix.init_matrix")
+
+ periods = ['EA', 'AM', 'MD', 'PM', 'EV']
+ for p in periods:
+ init_matrix("mf%s_COMVEHTOLL" % p, scenario=scenario)
+
+ nest = 10
+ vot = 0.02
+ toll_factor = 1
+ for p in periods:
+ with matrix_calc.trace_run("Diversion for %s" % p):
+ init_matrix("mf%s_COMVEHTOLL" % p, scenario=scenario)
+ params = {'p': p, 'v': vot, 'tf': toll_factor, 'n': nest}
+ utility = ('(mf%(p)s_SOVGPM_TIME - mf%(p)s_SOVTOLLM_TIME'
+ '- %(v)s * mf%(p)s_SOVTOLLM_TOLLCOST * %(tf)s) / %(n)s') % params
+ matrix_calc.add(
+ "mf%s_COMVEHTOLL" % p,
+ "mf%s_COMVEH / (1 + exp(- %s))" % (p, utility),
+ ["mf%s_SOVTOLLM_TOLLCOST" % p, 0, 0, "EXCLUDE"])
+ matrix_calc.add(
+ "mf%s_COMVEHGP" % p, "mf%(p)s_COMVEH - mf%(p)s_COMVEHTOLL" % {'p': p})
diff --git a/sandag_abm/src/main/emme/toolbox/model/external_external.py b/sandag_abm/src/main/emme/toolbox/model/external_external.py
new file mode 100644
index 0000000..c928d36
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/model/external_external.py
@@ -0,0 +1,190 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// model/external_external.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# Runs the external-external, cross-regional demand model. Imports the total
+# daily demand from file and splits by time-of-day and SOVGP, HOV2HOV, and
+# HOV3HOV classes using fixed factors.
+#
+#
+# Inputs:
+# input_directory: source directory for input file
+# external_zones: the set of external zones specified as a range, default is "1-12"
+# scenario: traffic scenario to use for reference zone system
+#
+# Files referenced:
+# Note: YEAR is replaced by scenarioYear in the conf/sandag_abm.properties file
+# input/mgra13_based_inputYEAR.csv
+# input/externalInternalControlTotalsByYear.csv
+# input/externalInternalControlTotals.csv
+# (if externalInternalControlTotalsByYear.csv is unavailable)
+#
+# Matrix results:
+# pp_SOV_EETRIPS, pp_HOV2_EETRIPS, pp_HOV3_EETRIPS
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ input_dir = os.path.join(main_directory, "input")
+ external_zones = "1-12"
+ base_scenario = modeller.scenario
+ external_external = modeller.tool("sandag.model.external_external")
+ external_external(input_dir, external_zones, base_scenario)
+"""
+
+
+TOOLBOX_ORDER = 62
+
+
+import inro.modeller as _m
+
+import multiprocessing as _multiprocessing
+import traceback as _traceback
+import os
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class ExternalExternal(_m.Tool(), gen_utils.Snapshot):
+ input_directory = _m.Attribute(unicode)
+ external_zones = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ self.input_directory = os.path.join(os.path.dirname(project_dir), "input")
+ self.external_zones = "1-12"
+ self.attributes = ["external_zones", "num_processors"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "External external model"
+ pb.description = """
+ Total trips are read from externalExternalTripsByYear.csv for
+ the year in sandag_abm.properties. If this file does not exist
+ externalExternalTrips.csv will be used instead.
+ The total trips are split by time-of-day and traffic class
+ SOVGP, HOV2HOV, and HOV3HOV using fixed factors.
+ """
+ pb.branding_text = "- SANDAG - Model"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('input_directory', 'directory',
+ title='Select input directory')
+ pb.add_text_box("external_zones", title="External zones:")
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.input_directory, self.external_zones, scenario)
+ run_msg = "Tool complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('External-external model', save_arguments=True)
+ def __call__(self, input_directory, external_zones, scenario):
+ attributes = {
+ "external_zones": external_zones,
+ "input_directory": input_directory,
+ }
+ gen_utils.log_snapshot("External-external model", str(self), attributes)
+ emmebank = scenario.emmebank
+ matrix_calc = _m.Modeller().tool(
+ "inro.emme.matrix_calculation.matrix_calculator")
+
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(
+ os.path.join(os.path.dirname(input_directory), "conf", "sandag_abm.properties"))
+ year = int(props['scenarioYear'])
+
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ time_of_day_factors = [0.074, 0.137, 0.472, 0.183, 0.133]
+ modes = ["SOV", "HOV2", "HOV3"]
+ mode_factors = [0.43, 0.42, 0.15]
+
+ ee_matrix = emmebank.matrix("ALL_TOTAL_EETRIPS")
+ matrix_data = ee_matrix.get_data(scenario)
+ file_path = os.path.join(
+ input_directory, "externalExternalTripsByYear.csv")
+ if os.path.isfile(file_path):
+ with open(file_path, 'r') as f:
+ header = f.readline()
+ for line in f:
+ tyear, orig, dest, trips = line.split(",")
+ if int(tyear) == year:
+ matrix_data.set(int(orig), int(dest), float(trips))
+ else:
+ file_path = os.path.join(
+ input_directory, "externalExternalTrips.csv")
+ if not os.path.isfile(file_path):
+ raise Exception("External-external model: no file 'externalExternalTrips.csv' or 'externalExternalTripsByYear.csv'")
+ with open(file_path, 'r') as f:
+ header = f.readline()
+ for line in f:
+ orig, dest, trips = line.split(",")
+ matrix_data.set(int(orig), int(dest), float(trips))
+ _m.logbook_write("Control totals read from %s" % file_path)
+ ee_matrix.set_data(matrix_data, scenario)
+
+ # factor for final demand matrix by time and mode type
+ # all external-external trips are non-toll
+ # SOV_GP, SR2_HOV SR3_HOV = "SOV", "HOV2", "HOV3"
+ for period, tod_fac in zip(periods, time_of_day_factors):
+ for mode, mode_fac in zip(modes, mode_factors):
+ spec = {
+ "expression": "ALL_TOTAL_EETRIPS * %s * %s" % (tod_fac, mode_fac),
+ "result": "mf%s_%s_EETRIPS" % (period, mode),
+ "constraint": {
+ "by_zone": {
+ "origins": external_zones,
+ "destinations": external_zones
+ }
+ },
+ "type": "MATRIX_CALCULATION"
+ }
+ matrix_calc(spec, scenario=scenario)
+
+ precision = float(props['RunModel.MatrixPrecision'])
+ self.matrix_rounding(scenario, precision)
+
+ @_m.logbook_trace('Controlled rounding of demand')
+ def matrix_rounding(self, scenario, precision):
+ round_matrix = _m.Modeller().tool(
+ "inro.emme.matrix_calculation.matrix_controlled_rounding")
+ emmebank = scenario.emmebank
+ periods = ['EA', 'AM', 'MD', 'PM', 'EV']
+ modes = ["SOV", "HOV2", "HOV3"]
+ for period in periods:
+ for mode in modes:
+ matrix = emmebank.matrix("mf%s_%s_EETRIPS" % (period, mode))
+ report = round_matrix(demand_to_round=matrix,
+ rounded_demand=matrix,
+ min_demand=precision,
+ values_to_round="SMALLER_THAN_MIN",
+ scenario=scenario)
+
diff --git a/sandag_abm/src/main/emme/toolbox/model/external_internal.py b/sandag_abm/src/main/emme/toolbox/model/external_internal.py
new file mode 100644
index 0000000..19203e6
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/model/external_internal.py
@@ -0,0 +1,331 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// model/external_internal.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# Runs the external USA to internal demand model.
+# 1) Work and non-work trip gateway total trips are read from control totals
+# 2) Generates internal trip ends based on relative attractiveness from employment (by category) and households
+# 3) Applies time-of-day and occupancy factors
+# 4) Applies toll diversion model with toll and non-toll skims
+# Control totals are read from externalInternalControlTotalsByYear.csv for
+# the specified year in sandag_abm.properties. If this file does not exist
+# externalInternalControlTotals.csv will be used instead.
+#
+# Inputs:
+# input_directory: source directory for most input files, including demographics and trip rates
+# scenario: traffic scenario to use for reference zone system
+#
+# Files referenced:
+# Note: YEAR is replaced by scenarioYear in the conf/sandag_abm.properties file
+# input/mgra13_based_inputYEAR.csv
+# input/externalInternalControlTotalsByYear.csv
+# input/externalInternalControlTotals.csv
+# (if externalInternalControlTotalsByYear.csv is unavailable)
+#
+# Matrix results:
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ input_dir = os.path.join(main_directory, "input")
+ base_scenario = modeller.scenario
+ external_internal = modeller.tool("sandag.model.external_internal")
+ external_internal(input_dir, input_truck_dir, base_scenario)
+"""
+
+TOOLBOX_ORDER = 61
+
+
+import inro.modeller as _m
+import numpy as np
+import pandas as pd
+import traceback as _traceback
+import os
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+dem_utils = _m.Modeller().module("sandag.utilities.demand")
+
+
+class ExternalInternal(_m.Tool(), gen_utils.Snapshot):
+ input_directory = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ self.input_directory = os.path.join(os.path.dirname(project_dir), "input")
+ self.attributes = ["input_directory"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "External internal model"
+ pb.description = """
+ Runs the external USA to internal demand model.
+ Control totals are read from externalInternalControlTotalsByYear.csv for
+ the specified year in sandag_abm.properties. If this file does not exist
+ externalInternalControlTotals.csv will be used instead."""
+ pb.branding_text = "- SANDAG - Model"
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('input_directory', 'directory',
+ title='Select input directory')
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.input_directory, scenario)
+ run_msg = "Tool complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('External-internal model', save_arguments=True)
+ def __call__(self, input_directory, scenario):
+ attributes = {"input_directory": input_directory}
+ gen_utils.log_snapshot("External-internal model", str(self), attributes)
+ np.seterr(divide='ignore', invalid='ignore')
+
+ emmebank = scenario.emmebank
+ zones = scenario.zone_numbers
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(
+ os.path.join(os.path.dirname(input_directory), "conf", "sandag_abm.properties"))
+
+ year = int(props['scenarioYear'])
+ mgra = pd.read_csv(
+ os.path.join(input_directory, 'mgra13_based_input%s.csv' % year))
+
+ # Load data
+ file_path = os.path.join(
+ input_directory, "externalInternalControlTotalsByYear.csv")
+ if os.path.isfile(file_path):
+ control_totals = pd.read_csv(file_path)
+ control_totals = control_totals[control_totals.year==year]
+ control_totals = control_totals.drop("year", axis=1)
+ else:
+ file_path = os.path.join(
+ input_directory, 'externalInternalControlTotals.csv')
+ if not os.path.isfile(file_path):
+ raise Exception(
+ "External-internal model: no file 'externalInternalControlTotals.csv' "
+ "or 'externalInternalControlTotalsByYear.csv'")
+ control_totals = pd.read_csv(file_path)
+ _m.logbook_write("Control totals read from %s" % file_path)
+
+ # Aggregate purposes
+ mgra['emp_blu'] = (mgra.emp_const_non_bldg_prod
+ + mgra.emp_const_non_bldg_office
+ + mgra.emp_utilities_prod
+ + mgra.emp_utilities_office
+ + mgra.emp_const_bldg_prod
+ + mgra.emp_const_bldg_office
+ + mgra.emp_mfg_prod
+ + mgra.emp_mfg_office
+ + mgra.emp_whsle_whs
+ + mgra.emp_trans)
+
+ mgra['emp_svc'] = (mgra.emp_prof_bus_svcs
+ + mgra.emp_prof_bus_svcs_bldg_maint
+ + mgra.emp_personal_svcs_office
+ + mgra.emp_personal_svcs_retail)
+
+ mgra['emp_edu'] = (mgra.emp_pvt_ed_k12
+ + mgra.emp_pvt_ed_post_k12_oth
+ + mgra.emp_public_ed)
+
+ mgra['emp_gov'] = (mgra.emp_state_local_gov_ent
+ + mgra.emp_fed_non_mil
+ + mgra.emp_fed_non_mil
+ + mgra.emp_state_local_gov_blue
+ + mgra.emp_state_local_gov_white)
+
+ mgra['emp_ent'] = (mgra.emp_amusement
+ + mgra.emp_hotel
+ + mgra.emp_restaurant_bar)
+
+ mgra['emp_oth'] = (mgra.emp_religious
+ + mgra.emp_pvt_hh
+ + mgra.emp_fed_mil)
+
+ mgra['work_size'] = (mgra.emp_blu +
+ 1.364 * mgra.emp_retail +
+ 4.264 * mgra.emp_ent +
+ 0.781 * mgra.emp_svc +
+ 1.403 * mgra.emp_edu +
+ 1.779 * mgra.emp_health +
+ 0.819 * mgra.emp_gov +
+ 0.708 * mgra.emp_oth)
+
+ mgra['non_work_size'] = (mgra.hh +
+ 1.069 * mgra.emp_blu +
+ 4.001 * mgra.emp_retail +
+ 6.274 * mgra.emp_ent +
+ 0.901 * mgra.emp_svc +
+ 1.129 * mgra.emp_edu +
+ 2.754 * mgra.emp_health +
+ 1.407 * mgra.emp_gov +
+ 0.304 * mgra.emp_oth)
+
+ # aggregate to TAZ
+ taz = mgra[['taz', 'work_size', 'non_work_size']].groupby('taz').sum()
+ taz.reset_index(inplace=True)
+ taz = dem_utils.add_missing_zones(taz, scenario)
+ taz.sort_values('taz', ascending=True, inplace=True) # method sort was deprecated since pandas version 0.20.0, yma, 2/12/2019
+ taz.reset_index(inplace=True, drop=True)
+ control_totals = pd.merge(control_totals, taz[['taz']], how='outer')
+ control_totals.sort_values('taz', inplace=True) # method sort was deprecated since pandas version 0.20.0, yma, 2/12/2019
+
+ length_skim = emmebank.matrix('mf"MD_SOV_TR_M_DIST"').get_numpy_data(scenario)
+
+ # Compute probabilities for work purpose
+ wrk_dist_coef = -0.029
+ wrk_prob = taz.work_size.values * np.exp(wrk_dist_coef * length_skim)
+ wrk_sum = np.sum(wrk_prob, 1)
+ wrk_prob = wrk_prob / wrk_sum[:, np.newaxis]
+ wrk_prob = np.nan_to_num(wrk_prob)
+ # Apply probabilities to control totals
+ wrk_pa_mtx = wrk_prob * control_totals.work.values[:, np.newaxis]
+ wrk_pa_mtx = np.nan_to_num(wrk_pa_mtx)
+ wrk_pa_mtx = wrk_pa_mtx.astype("float32")
+
+ # compute probabilities for non work purpose
+ non_wrk_dist_coef = -0.006
+ nwrk_prob = taz.non_work_size.values * np.exp(non_wrk_dist_coef * length_skim)
+ non_wrk_sum = np.sum(nwrk_prob, 1)
+ nwrk_prob = nwrk_prob / non_wrk_sum[:, np.newaxis]
+ nwrk_prob = np.nan_to_num(nwrk_prob)
+ # Apply probabilities to control totals
+ nwrk_pa_mtx = nwrk_prob * control_totals.nonwork.values[:, np.newaxis]
+ nwrk_pa_mtx = np.nan_to_num(nwrk_pa_mtx)
+ nwrk_pa_mtx = nwrk_pa_mtx.astype("float32")
+
+ # Convert PA to OD and apply Diurnal Facotrs
+ wrk_ap_mtx = 0.5 * np.transpose(wrk_pa_mtx)
+ wrk_pa_mtx = 0.5 * wrk_pa_mtx
+ nwrk_ap_mtx = 0.5 * np.transpose(nwrk_pa_mtx)
+ nwrk_pa_mtx = 0.5 * nwrk_pa_mtx
+
+ # Apply occupancy and diurnal factors
+ work_time_PA_factors = [0.26, 0.26, 0.41, 0.06, 0.02]
+ work_time_AP_factors = [0.08, 0.07, 0.41, 0.42, 0.02]
+
+ nonwork_time_PA_factors = [0.25, 0.39, 0.30, 0.04, 0.02]
+ nonwork_time_AP_factors = [0.12, 0.11, 0.37, 0.38, 0.02]
+
+ work_occupancy_factors = [0.58, 0.31, 0.11]
+ nonwork_occupancy_factors = [0.55, 0.29, 0.15]
+
+ # value of time is in cents per minute (toll cost is in cents)
+ vot_work = 15.00 # $9.00/hr
+ vot_non_work = 22.86 # $13.70/hr
+ ivt_coef = -0.03
+
+ gp_modes = ["SOVGP", "HOV2HOV", "HOV3HOV"]
+ toll_modes = ["SOVTOLL", "HOV2TOLL", "HOV3TOLL"]
+ # TODO: the GP vs. TOLL distinction should be collapsed
+ # (all demand added to transponder demand in import_auto_demand)
+ skim_lookup = {
+ "SOVGP": "SOV_NT_M",
+ "HOV2HOV": "HOV2_M",
+ "HOV3HOV": "HOV3_M",
+ "SOVTOLL": "SOV_TR_M",
+ "HOV2TOLL": "HOV2_M",
+ "HOV3TOLL": "HOV3_M"
+ }
+ periods = ["EA", "AM", "MD", "PM", "EV"]
+ for p, w_d_pa, w_d_ap, nw_d_pa, nw_d_ap in zip(
+ periods, work_time_PA_factors, work_time_AP_factors,
+ nonwork_time_PA_factors, nonwork_time_AP_factors):
+ for gp_mode, toll_mode, w_o, nw_o in zip(
+ gp_modes, toll_modes, work_occupancy_factors, nonwork_occupancy_factors):
+ wrk_mtx = w_o * (w_d_pa * wrk_pa_mtx + w_d_ap * wrk_ap_mtx)
+ nwrk_mtx = nw_o * (nw_d_pa * nwrk_pa_mtx + nw_d_ap * nwrk_ap_mtx)
+
+ # Toll choice split
+ f_tm_imp = emmebank.matrix('mf%s_%s_TIME' % (p, skim_lookup[gp_mode])).get_numpy_data(scenario)
+ t_tm_imp = emmebank.matrix('mf%s_%s_TIME' % (p, skim_lookup[toll_mode])).get_numpy_data(scenario)
+ t_cst_imp = emmebank.matrix('mf%s_%s_TOLLCOST' % (p, skim_lookup[toll_mode])).get_numpy_data(scenario)
+
+ # Toll diversion for work purpose
+ # TODO: .mod no longer needed, to confirm
+ wrk_toll_prb = np.exp(
+ ivt_coef * (t_tm_imp - f_tm_imp + np.mod(t_cst_imp, 10000) / vot_work) - 3.39
+ )
+ wrk_toll_prb[t_cst_imp <= 0] = 0
+ wrk_toll_prb = wrk_toll_prb / (1 + wrk_toll_prb)
+ work_matrix_toll = wrk_mtx * wrk_toll_prb
+ work_matrix_non_toll = wrk_mtx * (1 - wrk_toll_prb)
+
+ toll_eiwork = emmebank.matrix('%s_%s_EIWORK' % (p, toll_mode))
+ gp_ei_work = emmebank.matrix('%s_%s_EIWORK' % (p, gp_mode))
+ toll_eiwork.set_numpy_data(work_matrix_toll, scenario)
+ gp_ei_work.set_numpy_data(work_matrix_non_toll, scenario)
+
+ # Toll diversion for non work purpose
+ nwrk_toll_prb = np.exp(
+ ivt_coef * (t_tm_imp - f_tm_imp + np.mod(t_cst_imp, 10000) / vot_non_work) - 3.39
+ )
+
+ nwrk_toll_prb[t_cst_imp <= 0] = 0
+ nwrk_toll_prb = nwrk_toll_prb / (1 + nwrk_toll_prb)
+
+ non_work_toll_matrix = nwrk_mtx * nwrk_toll_prb
+ non_work_gp_matrix = nwrk_mtx * (1 - nwrk_toll_prb)
+
+ toll_einonwork = emmebank.matrix('%s_%s_EINONWORK' % (p, toll_mode))
+ gp_einonwork = emmebank.matrix('%s_%s_EINONWORK' % (p, gp_mode))
+ toll_einonwork.set_numpy_data(non_work_toll_matrix, scenario)
+ gp_einonwork.set_numpy_data(non_work_gp_matrix, scenario)
+
+ precision = float(props['RunModel.MatrixPrecision'])
+ self.matrix_rounding(scenario, precision)
+
+ @_m.logbook_trace('Controlled rounding of demand')
+ def matrix_rounding(self, scenario, precision):
+ round_matrix = _m.Modeller().tool(
+ "inro.emme.matrix_calculation.matrix_controlled_rounding")
+ emmebank = scenario.emmebank
+ periods = ['EA', 'AM', 'MD', 'PM', 'EV']
+ modes = ["SOVGP", "HOV2HOV", "HOV3HOV", "SOVTOLL", "HOV2TOLL", "HOV3TOLL"]
+ purpose_types = ["EIWORK", "EINONWORK"]
+ for period in periods:
+ for mode in modes:
+ for purpose in purpose_types:
+ matrix = emmebank.matrix("mf%s_%s_%s" % (period, mode, purpose))
+ try:
+ report = round_matrix(demand_to_round=matrix,
+ rounded_demand=matrix,
+ min_demand=precision,
+ values_to_round="SMALLER_THAN_MIN",
+ scenario=scenario)
+ except:
+ max_val = matrix.get_numpy_data(scenario.id).max()
+ if max_val == 0:
+ # if max_val is 0 the error is that the matrix is 0, log a warning
+ _m.logbook_write('Warning: matrix %s is all 0s' % matrix.named_id)
+ else:
+ raise
diff --git a/sandag_abm/src/main/emme/toolbox/model/truck/distribution.py b/sandag_abm/src/main/emme/toolbox/model/truck/distribution.py
new file mode 100644
index 0000000..7a9207a
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/model/truck/distribution.py
@@ -0,0 +1,288 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// model/truck/distribution.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# Runs the truck distribution step. Distributes truck trips with congested
+# skims and splits by time of day.
+# The distribution is based on the mid-day travel time for the "generic"
+# truck skim "mfMD_TRK_TIME". Applies truck toll diversion model with
+# toll and non-toll skims.
+#
+# Inputs:
+# input_directory: source directory for input files
+# num_processors: Number of processors to use, either as a number or "MAX-#"
+# scenario: traffic scenario to use for reference zone system
+#
+# Files referenced:
+# Note: YEAR is replaced by truck.FFyear in the conf/sandag_abm.properties file
+# input/TruckTripRates.csv
+# input/mgra13_based_inputYEAR.csv
+# input/specialGenerators.csv
+#
+# Matrix inputs:
+# Note: pp is time period, one of EA, AM, MD, PM, EV
+# moTRKL_PROD, moTRKM_PROD, moTRKH_PROD, moTRKEI_PROD, moTRKIE_PROD
+# mdTRKL_ATTR, mdTRKM_ATTR, mdTRKH_ATTR, mdTRKEI_ATTR, mdTRKIE_ATTR
+# mfTRKEE_DEMAND
+# mfMD_TRK_TIME
+# mfpp_TRKLGP_TIME, mfpp_TRKLTOLL_TIME, mfpp_TRKLTOLL_TOLLCOST
+# mfpp_TRKMGP_TIME, mfpp_TRKMTOLL_TIME, mfpp_TRKMTOLL_TOLLCOST
+# mfpp_TRKHGP_TIME, mfpp_TRKHTOLL_TIME, mfpp_TRKHTOLL_TOLLCOST
+#
+# Matrix intermediates (only used internally):
+# mfTRKEI_FRICTION, mfTRKIE_FRICTION, mfTRKL_FRICTION, mfTRKM_FRICTION, mfTRKH_FRICTION
+#
+# Matrix results:
+# Note: pp is time period, one of EA, AM, MD, PM, EV
+# mfpp_TRKLGP_VEH, mfpp_TRKMGP_VEH, mfpp_TRKHGP_VEH
+# mfpp_TRKLTOLL_VEH, mfpp_TRKMTOLL_VEH, mfpp_TRKHTOLL_VEH
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ main_directory = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ input_dir = os.path.join(main_directory, "input")
+ num_processors = "MAX-1"
+ base_scenario = modeller.scenario
+ distribution = modeller.tool("sandag.model.truck.distribution")
+ distribution(input_dir, num_processors, base_scenario)
+"""
+
+
+TOOLBOX_ORDER = 43
+
+import traceback as _traceback
+import pandas as pd
+import numpy as np
+import os
+
+import inro.modeller as _m
+
+
+gen_utils = _m.Modeller().module('sandag.utilities.general')
+dem_utils = _m.Modeller().module('sandag.utilities.demand')
+
+
+class TruckModel(_m.Tool(), gen_utils.Snapshot):
+
+ input_directory = _m.Attribute(str)
+ num_processors = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ self.input_directory = os.path.join(os.path.dirname(project_dir), "input")
+ self.num_processors = "MAX-1"
+ self.attributes = ["input_directory", "num_processors"]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Truck distribution"
+ pb.description = """
+
+ Distributes truck trips with congested skims and splits by time of day.
+ The distribution is based on the mid-day travel time for the "generic" truck
+ skim "mfMD_TRK_TIME".
+
+ Applies truck toll diversion model with toll and non-toll skims,
+ and generates truck vehicle trips.
+
+ Note that the truck vehicle trips must be converted to PCE values by the Import auto
+ demand tool and stored in matrices without the _VEH ending for the auto assignment.
+
+ """
+ pb.branding_text = "- SANDAG - Model - Truck"
+
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+
+ pb.add_select_file('input_directory', 'directory',
+ title='Select input directory')
+ dem_utils.add_select_processors("num_processors", pb, self)
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.input_directory, self.num_processors, scenario)
+ run_msg = "Truck trip distribution complete."
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('Truck distribution')
+ def __call__(self, input_directory, num_processors, scenario):
+ attributes = {
+ "input_directory": input_directory,
+ "num_processors": num_processors
+ }
+ gen_utils.log_snapshot("Truck distribution", str(self), attributes)
+ self.scenario = scenario
+ self.num_processors = num_processors
+
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(
+ os.path.join(os.path.dirname(input_directory), "conf", "sandag_abm.properties"))
+
+ with _m.logbook_trace('Daily demand matrices'):
+ coefficents = [0.045, 0.03, 0.03, 0.03, 0.03]
+ truck_list = ['L', 'M', 'H', 'IE', 'EI']
+ # distribution based on the "generic" truck MD time only
+ time_skim = scenario.emmebank.matrix('mf"MD_TRK_TIME"')
+ for truck_type, coeff in zip(truck_list, coefficents):
+ with _m.logbook_trace('Create %s daily demand matrix' % truck_type):
+ self.calc_friction_factors(truck_type, time_skim, coeff)
+ self.matrix_balancing(truck_type)
+
+ self.split_external_demand()
+ self.split_into_time_of_day()
+ # NOTE: TOLL diversion skipped with new class definitions
+ #self.toll_diversion()
+
+ with _m.logbook_trace('Reduce matrix precision'):
+ precision = props['RunModel.MatrixPrecision']
+ matrices = []
+ for t, pce in [('L', 1.3), ('M', 1.5), ('H', 2.5)]:
+ for p in ['EA', 'AM', 'MD', 'PM', 'EV']:
+ matrices.append('mf%s_TRK_%s_VEH' % (p, t))
+ dem_utils.reduce_matrix_precision(matrices, precision, num_processors, scenario)
+
+ @_m.logbook_trace('Create friction factors matrix')
+ def calc_friction_factors(self, truck_type, impedance, coeff):
+ matrix_calc = dem_utils.MatrixCalculator(self.scenario, self.num_processors)
+ matrix_calc.run_single('mfTRK%s_FRICTION' % truck_type,
+ 'exp(-%s*%s)' % (coeff, impedance.named_id))
+ return
+
+ def matrix_balancing(self, truck_type):
+ matrix_calc = dem_utils.MatrixCalculator(self.scenario, self.num_processors)
+ emmebank = self.scenario.emmebank
+ with _m.logbook_trace('Matrix balancing for %s' % truck_type):
+ if truck_type == 'IE':
+ with gen_utils.temp_matrices(emmebank, "DESTINATION") as (temp_md,):
+ temp_md.name = 'TRKIE_ROWTOTAL'
+ matrix_calc.add('md"TRKIE_ROWTOTAL"', 'mf"TRKIE_FRICTION"', aggregation={"origins": "+", "destinations": None})
+ matrix_calc.add('mf"TRKIE_DEMAND"', 'mf"TRKIE_FRICTION" * md"TRKIE_ATTR" / md"TRKIE_ROWTOTAL"',
+ constraint=['md"TRKIE_ROWTOTAL"', 0, 0, "EXCLUDE"])
+ matrix_calc.run()
+
+ elif truck_type == 'EI':
+ with gen_utils.temp_matrices(emmebank, "ORIGIN") as (temp_mo,):
+ temp_mo.name = 'TRKEI_COLTOTAL'
+ matrix_calc.add('mo"TRKEI_COLTOTAL"', 'mf"TRKEI_FRICTION"', aggregation={"origins": None, "destinations": "+"})
+ matrix_calc.add('mf"TRKEI_DEMAND"', 'mf"TRKEI_FRICTION" * mo"TRKEI_PROD" / mo"TRKEI_COLTOTAL"',
+ constraint=['mo"TRKEI_COLTOTAL"', 0, 0, "EXCLUDE"])
+ matrix_calc.run()
+ else:
+ matrix_balancing = _m.Modeller().tool(
+ 'inro.emme.matrix_calculation.matrix_balancing')
+ spec = {
+ "type": "MATRIX_BALANCING",
+ "od_values_to_balance": 'mf"TRK%s_FRICTION"' % truck_type,
+ "origin_totals": 'mo"TRK%s_PROD"' % truck_type,
+ "destination_totals": 'md"TRK%s_ATTR"' % truck_type,
+ "results": {
+ "od_balanced_values": 'mf"TRK%s_DEMAND"' % truck_type,
+ },
+ "max_iterations": 100,
+ "max_relative_error": 0.01
+ }
+ matrix_balancing(spec, self.scenario)
+
+ @_m.logbook_trace('Split cross-regional demand by truck type')
+ def split_external_demand(self):
+ matrix_calc = dem_utils.MatrixCalculator(self.scenario, self.num_processors)
+
+ truck_types = ['L', 'M', 'H']
+ truck_share = [0.307, 0.155, 0.538]
+ for t_type, share in zip(truck_types, truck_share):
+ matrix_calc.add('mf"TRK%s_DEMAND"' % (t_type),
+ '%s * (mf"TRKEI_DEMAND" + mf"TRKIE_DEMAND" + mf"TRKEE_DEMAND")' % (share))
+ # Set intrazonal truck trips to 0
+ matrix_calc.add('mf"TRK%s_DEMAND"' % (t_type), 'mf"TRK%s_DEMAND" * (p.ne.q)' % (t_type))
+ matrix_calc.run()
+
+ @_m.logbook_trace('Distribute daily demand into time of day')
+ def split_into_time_of_day(self):
+ matrix_calc = dem_utils.MatrixCalculator(self.scenario, self.num_processors)
+ periods = ['EA', 'AM', 'MD', 'PM', 'EV']
+ time_share = [0.1018, 0.1698, 0.4284, 0.1543, 0.1457]
+ border_time_share = [0.0188, 0.1812, 0.4629, 0.2310, 0.1061]
+ border_correction = [bs/s for bs, s in zip(border_time_share, time_share)]
+
+ truck_types = ['L', 'M', 'H']
+ truck_names = {"L": "light trucks", "M": "medium trucks", "H": "heavy trucks"}
+
+ for period, share, border_corr in zip(periods, time_share, border_correction):
+ for t in truck_types:
+ with matrix_calc.trace_run('Calculate %s demand matrix for %s' % (period, truck_names[t])):
+ tod_demand = 'mf"%s_TRK_%s_VEH"' % (period, t)
+ matrix_calc.add(tod_demand, 'mf"TRK%s_DEMAND"' % (t))
+ matrix_calc.add(tod_demand, 'mf%s_TRK_%s_VEH * %s' % (period, t, share))
+ matrix_calc.add(tod_demand, 'mf%s_TRK_%s_VEH * %s' % (period, t, border_corr),
+ {"origins": "1-5", "destinations": "1-9999"})
+ matrix_calc.add(tod_demand, 'mf%s_TRK_%s_VEH * %s' % (period, t, border_corr),
+ {"origins": "1-9999", "destinations": "1-5"})
+
+ @_m.logbook_trace('Toll diversion')
+ def toll_diversion(self):
+ # NOTE: toll diversion skipped
+ pass
+ # matrix_calc = dem_utils.MatrixCalculator(self.scenario, self.num_processors)
+ # nest_factor = 10
+ # vot = 0.02 # cent/min
+ # periods = ['EA', 'AM', 'MD', 'PM', 'EV']
+ # truck_types = ['L', 'M', 'H']
+ # truck_toll_factors = [1, 1.03, 2.33]
+
+ # for period in periods:
+ # for truck, toll_factor in zip(truck_types, truck_toll_factors):
+ # with matrix_calc.trace_run('Toll diversion for period %s, truck type %s' % (period, truck) ):
+ # # Define the utility expression
+ # utility = """
+ # (
+ # (mf"%(p)s_TRK%(t)sGP_TIME" - mf"%(p)s_TRK%(t)sTOLL_TIME")
+ # - %(vot)s * mf"%(p)s_TRK%(t)sTOLL_TOLLCOST" * %(t_fact)s
+ # )
+ # / %(n_fact)s
+ # """ % {
+ # 'p': period,
+ # 't': truck,
+ # 'vot': vot,
+ # 't_fact': toll_factor,
+ # 'n_fact': nest_factor
+ # }
+ # # If there is no toll probability of using toll is 0
+ # matrix_calc.add('mf"%s_TRK%sTOLL_VEH"' % (period, truck), '0')
+ # # If there is a non-zero toll value compute the share of
+ # # toll-available passengers using the utility expression defined earlier
+ # matrix_calc.add('mf"%s_TRK%sTOLL_VEH"' % (period, truck),
+ # 'mf"%(p)s_TRK%(t)s" * (1/(1 + exp(- %(u)s)))' % {'p': period, 't': truck, 'u': utility},
+ # ['mf"%s_TRK%sTOLL_TOLLCOST"' % (period, truck), 0, 0 , "EXCLUDE"])
+ # # if non-toll path is not available (GP time=0), set all demand to toll
+ # matrix_calc.add('mf"%s_TRK%sTOLL_VEH"' % (period, truck),
+ # 'mf"%(p)s_TRK%(t)s"' % {'p': period, 't': truck},
+ # ['mf"%(p)s_TRK%(t)sGP_TIME"' % {'p': period, 't': truck}, 0, 0 , "INCLUDE"])
+ # # Compute the truck demand for non toll
+ # matrix_calc.add('mf"%s_TRK%sGP_VEH"' % (period, truck),
+ # '(mf"%(p)s_TRK%(t)s" - mf"%(p)s_TRK%(t)sTOLL_VEH").max.0' % {'p': period, 't': truck})
diff --git a/sandag_abm/src/main/emme/toolbox/model/truck/generation.py b/sandag_abm/src/main/emme/toolbox/model/truck/generation.py
new file mode 100644
index 0000000..c0c10a8
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/model/truck/generation.py
@@ -0,0 +1,443 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// model/truck/generation.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+#
+# Runs the truck generation step. Generates standard truck trip and special (military) truck trips,
+# and generates regional truck trips, IE trips, EI trips and EE trips and balances truck trips.
+#
+# Inputs:
+# input_directory: source directory for most input files, including demographics and trip rates
+# input_truck_directory: source for special truck files
+# scenario: traffic scenario to use for reference zone system
+#
+# Files referenced:
+# Note: YEAR is replaced by truck.FFyear in the conf/sandag_abm.properties file
+# input/TruckTripRates.csv
+# file referenced by key mgra.socec.file, e.g. input/mgra13_based_inputYEAR.csv
+# input/specialGenerators.csv
+# input_truck/regionalIEtripsYEAR.csv
+# input_truck/regionalEItripsYEAR.csv
+# input_truck/regionalEEtripsYEAR.csv
+#
+# Matrix results:
+# moTRKL_PROD, moTRKM_PROD, moTRKH_PROD, moTRKEI_PROD, moTRKIE_PROD
+# mdTRKL_ATTR, mdTRKM_ATTR, mdTRKH_ATTR, mdTRKEI_ATTR, mdTRKIE_ATTR
+# mfTRKEE_DEMAND
+#
+# Script example:
+"""
+ import os
+ modeller = inro.modeller.Modeller()
+ project_dir = os.path.dirname(os.path.dirname(modeller.desktop.project.path))
+ input_dir = os.path.join(project_dir, "input")
+ input_truck_dir = os.path.join(project_dir, "input_truck")
+ base_scenario = modeller.scenario
+ generation = modeller.tool("sandag.model.truck.generation")
+ generation(input_dir, input_truck_dir, base_scenario)
+"""
+
+
+
+
+TOOLBOX_ORDER = 42
+
+
+import inro.modeller as _m
+import traceback as _traceback
+import numpy as np
+import pandas as pd
+import os
+
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class TruckGeneration(_m.Tool(), gen_utils.Snapshot):
+
+ input_directory = _m.Attribute(str)
+ input_truck_directory = _m.Attribute(str)
+
+ tool_run_msg = ""
+
+ @_m.method(return_type=_m.UnicodeType)
+ def tool_run_msg_status(self):
+ return self.tool_run_msg
+
+ def __init__(self):
+ project_dir = os.path.dirname(_m.Modeller().desktop.project.path)
+ self.input_directory = os.path.join(os.path.dirname(project_dir), "input")
+ self.input_truck_directory = os.path.join(os.path.dirname(project_dir), "input_truck")
+ self.attributes = ["input_directory", "input_truck_directory"]
+ self._properties = None
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "Truck generation"
+ pb.description = """
+
+ Generates standard truck trip and special (military) truck trips as well as
+ regional truck trips, IE trips, EI trips and EE trips and balances truck trips
+ productions / attractions.
+
+ 1) Generates standard truck trip and special (military) truck trips
+ 2) Gets regional truck trips, IE trips, EI trips and EE trips and balances truck trips
+ 3) Distributes truck trips with congested skims and splits by time of day
+ 4) Applies truck toll diversion model with free-flow toll and non-toll skims
+
+"""
+ pb.branding_text = "- SANDAG - Model - Truck"
+
+ if self.tool_run_msg != "":
+ pb.tool_run_status(self.tool_run_msg_status)
+ pb.add_checkbox("run_generation", title=" ", label="Run generation (first iteration)")
+
+ pb.add_select_file('input_directory', 'directory',
+ title='Select input directory')
+ pb.add_select_file('input_truck_directory', 'directory',
+ title='Select truck input directory')
+ dem_utils.add_select_processors("num_processors", pb, self)
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ scenario = _m.Modeller().scenario
+ self(self.run_generation, self.input_directory, self.input_truck_directory, self.num_processors, scenario)
+ run_msg = "Tool complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ @_m.logbook_trace('Truck model', save_arguments=True)
+ def __call__(self, run_generation, input_directory, input_truck_directory, num_processors, scenario):
+ attributes = {
+ "input_directory": input_directory, "input_truck_directory": input_truck_directory,
+ "run_generation": run_generation, "num_processors": num_processors
+ }
+ gen_utils.log_snapshot("Truck model", str(self), attributes)
+
+ generation = _m.Modeller().tool('sandag.model.truck.generation')
+ distribution = _m.Modeller().tool('sandag.model.truck.distribution')
+
+ if run_generation:
+ generation(input_directory, input_truck_directory, scenario)
+ distribution(input_directory, num_processors, scenario)
diff --git a/sandag_abm/src/main/emme/toolbox/utilities/demand.py b/sandag_abm/src/main/emme/toolbox/utilities/demand.py
new file mode 100644
index 0000000..bcd0f5f
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/utilities/demand.py
@@ -0,0 +1,297 @@
+##//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// utilities/demand.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+
+TOOLBOX_ORDER = 101
+
+
+import inro.emme.datatable as _dt
+import inro.modeller as _m
+from collections import OrderedDict
+from contextlib import contextmanager as _context
+from copy import deepcopy as _copy
+import multiprocessing as _multiprocessing
+import re as _re
+import pandas as _pandas
+import numpy as _numpy
+import os
+
+
+class Utils(_m.Tool()):
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self, runnable=False)
+ pb.title = 'Demand utility'
+ pb.description = """Utility tool / module for common code. Not runnable."""
+ pb.branding_text = ' - SANDAG - Utilities'
+ return pb.render()
+
+
+# Read a CSV file, store it as a DataTable and return a representative DataFrame
+def csv_to_data_table(path, overwrite=False):
+ layer_name = os.path.splitext(os.path.basename(path))[0]
+ data_source = _dt.DataSource(path)
+ data = data_source.layer(layer_name).get_data()
+ desktop = _m.Modeller().desktop
+ dt_db = desktop.project.data_tables()
+ table = dt_db.create_table(layer_name, data, overwrite=overwrite)
+ return table_to_dataframe(table)
+
+
+# Convert a DataTable into a DataFrame
+def table_to_dataframe(table):
+ if type(table) == str:
+ desktop = _m.Modeller().desktop
+ dt_db = desktop.project.data_tables()
+ table_name = table
+ table = dt_db.table(table)
+ if not table:
+ raise Exception('%s is not a valid table name.' %table_name)
+
+ df = _pandas.DataFrame()
+ for attribute in table.get_data().attributes():
+ try:
+ df[attribute.name] = attribute.values.astype(float)
+ except Exception, e:
+ df[attribute.name] = attribute.values
+
+ return df
+
+
+# Convert a dataframe to a datatable
+def dataframe_to_table(df, name):
+ desktop = _m.Modeller().desktop
+ dt_db = desktop.project.data_tables()
+ data = _dt.Data()
+ for key in df.columns:
+ found_dtype = False
+ dtypes = [
+ (bool, True, 'BOOLEAN'),
+ (int, 0, 'INTEGER32'),
+ (int, 0, 'INTEGER'),
+ (float, 0, 'REAL')
+ ]
+ for caster, default, name in dtypes:
+ try:
+ df[[key]] = df[[key]].fillna(default)
+ values = df[key].astype(caster)
+ attribute = _dt.Attribute(key, values, name)
+ found_dtype = True
+ break
+ except ValueError:
+ pass
+
+ if not found_dtype:
+ df[[key]] = df[[key]].fillna(0)
+ values = df[key].astype(str)
+ attribute = _dt.Attribute(key, values, 'STRING')
+
+ data.add_attribute(attribute)
+
+ table = dt_db.create_table(name, data, overwrite=True)
+ return table
+
+# Add missing (usually external zones 1 to 12) zones to the DataFrame
+# and populate with zeros
+def add_missing_zones(df, scenario):
+ all_zones = scenario.zone_numbers
+ existing_zones = df['taz'].values
+ missing_zones = set(all_zones) - set(existing_zones)
+ num_missing = len(missing_zones)
+ if num_missing == 0:
+ return df
+
+ ext_df = _pandas.DataFrame()
+ for c in df.columns:
+ ext_df[c] = _numpy.zeros(num_missing)
+ ext_df['taz'] = _numpy.array(list(missing_zones))
+ df = _pandas.concat([df, ext_df])
+ df = df.sort_values('taz', ascending=True) # sort method was deprecated in version 0.20.0,yma,2/12/2019
+ return df
+
+
+def add_select_processors(tool_attr_name, pb, tool):
+ max_processors = _multiprocessing.cpu_count()
+ tool._max_processors = max_processors
+ options = [("MAX-1", "Maximum available - 1"), ("MAX", "Maximum available")]
+ options.extend([(n, "%s processors" % n) for n in range(1, max_processors + 1) ])
+ pb.add_select(tool_attr_name, options, title="Number of processors:")
+
+
+def parse_num_processors(value):
+ max_processors = _multiprocessing.cpu_count()
+ if isinstance(value, int):
+ return value
+ if isinstance(value, basestring):
+ if value == "MAX":
+ return max_processors
+ if _re.match("^[0-9]+$", value):
+ return int(value)
+ result = _re.split("^MAX[\s]*-[\s]*", value)
+ if len(result) == 2:
+ return max(max_processors - int(result[1]), 1)
+ if value:
+ return int(value)
+ return value
+
+class MatrixCalculator(object):
+ def __init__(self, scenario, num_processors=0):
+ self._scenario = scenario
+ self._matrix_calc = _m.Modeller().tool(
+ "inro.emme.matrix_calculation.matrix_calculator")
+ self._specs = []
+ self._last_report = None
+ self.num_processors = num_processors
+
+ @property
+ def num_processors(self):
+ return self._num_processors
+
+ @num_processors.setter
+ def num_processors(self, value):
+ self._num_processors = parse_num_processors(value)
+
+ @property
+ def last_report(self):
+ return _copy(self._last_report)
+
+ @_context
+ def trace_run(self, name):
+ with _m.logbook_trace(name):
+ yield
+ self.run()
+
+ def add(self, result, expression, constraint=None, aggregation=None):
+ spec = self._format_spec(result, expression, constraint, aggregation)
+ self._specs.append(spec)
+
+ def _format_spec(self, result, expression, constraint, aggregation):
+ spec = {
+ "result": result,
+ "expression": expression,
+ "type": "MATRIX_CALCULATION"
+ }
+ if constraint is not None:
+ if isinstance(constraint, (list, tuple)):
+ # specified as list of by_value inputs
+ constraint = {
+ "by_value": {
+ "od_values": constraint[0],
+ "interval_min": constraint[1],
+ "interval_max": constraint[2],
+ "condition": constraint[3]
+ }
+ }
+ elif "od_values" in constraint:
+ # specified as the by_value sub-dictionary only
+ constraint = {"by_value": constraint}
+ # By zone constraints
+ elif ("destinations" in constraint or "origins" in constraint):
+ # specified as the by_zone sub-dictionary only
+ constraint = {"by_zone": constraint}
+ # otherwise, specified as a regular full constraint dictionary
+ if "by_value" in constraint:
+ # cast the inputs to the correct values
+ constraint["by_value"]["od_values"] = \
+ str(constraint["by_value"]["od_values"])
+ constraint["by_value"]["condition"] = \
+ constraint["by_value"]["condition"].upper()
+ spec["constraint"] = constraint
+
+ #Add None for missing key values if needed
+ if "by_value" not in constraint:
+ constraint["by_value"] = None
+ if "by_zone" not in constraint:
+ constraint["by_zone"] = None
+
+ else:
+ spec["constraint"] = None
+
+ if aggregation is not None:
+ if isinstance(aggregation, basestring):
+ aggregation = {"origins": aggregation}
+ spec["aggregation"] = aggregation
+ else:
+ spec["aggregation"] = None
+ return spec
+
+ def add_spec(self, spec):
+ self._specs.append(spec)
+
+ def run(self):
+ specs, self._specs = self._specs, []
+ report = self._matrix_calc(specs, scenario=self._scenario,
+ num_processors=self._num_processors)
+ self._last_report = report
+ return report
+
+ def run_single(self, result, expression, constraint=None, aggregation=None):
+ spec = self._format_spec(result, expression, constraint, aggregation)
+ return self._matrix_calc(spec, scenario=self._scenario,
+ num_processors=self._num_processors)
+
+
+def reduce_matrix_precision(matrices, precision, num_processors, scenario):
+ emmebank = scenario.emmebank
+ calc = MatrixCalculator(scenario, num_processors)
+ gen_utils = _m.Modeller().module('sandag.utilities.general')
+ with gen_utils.temp_matrices(emmebank, "SCALAR", 2) as (sum1, sum2):
+ sum1.name = "ORIGINAL_SUM"
+ sum2.name = "ROUNDED_SUM"
+ for mat in matrices:
+ mat = emmebank.matrix(mat).named_id
+ with calc.trace_run('Reduce precision for matrix %s' % mat):
+ calc.add(sum1.named_id, mat, aggregation={"destinations": "+", "origins": "+"})
+ calc.add(mat, "{mat} * ({mat} >= {precision})".format(
+ mat=mat, precision=precision))
+ calc.add(sum2.named_id, mat, aggregation={"destinations": "+", "origins": "+"})
+ calc.add(sum2.named_id, "({sum2} + ({sum2} == 0))".format(sum2=sum2.named_id))
+ calc.add(mat, "{mat} * ({sum1} / {sum2})".format(
+ mat=mat, sum2=sum2.named_id, sum1=sum1.named_id))
+
+
+def create_full_matrix(name, desc, scenario):
+ create_matrix = _m.Modeller().tool(
+ "inro.emme.data.matrix.create_matrix")
+ emmebank = scenario.emmebank
+ matrix = emmebank.matrix(name)
+ if matrix:
+ ident = matrix.id
+ else:
+ used_ids = set([])
+ for m in emmebank.matrices():
+ if m.prefix == "mf":
+ used_ids.add(int(m.id[2:]))
+ for i in range(900, emmebank.dimensions["full_matrices"]):
+ if i not in used_ids:
+ ident = "mf" + str(i)
+ break
+ else:
+ raise Exception("Not enough available matrix IDs for selected demand. Change database dimensions to increase full matrices.")
+ return create_matrix(ident, name, desc, scenario=scenario, overwrite=True)
+
+
+def demand_report(matrices, label, scenario, report=None):
+ text = ['
']
+ text.append("%-28s %13s" % ("name", "sum"))
+ for name, data in matrices:
+ stats = (name, data.sum())
+ text.append("%-28s %13.7g" % stats)
+ text.append("
")
+ title = "Demand summary"
+ if report is None:
+ report = _m.PageBuilder(title)
+ report.wrap_html('Matrix details', " ".join(text))
+ _m.logbook_write(label, report.render())
+ else:
+ report.wrap_html(label, " ".join(text))
diff --git a/sandag_abm/src/main/emme/toolbox/utilities/file_manager.py b/sandag_abm/src/main/emme/toolbox/utilities/file_manager.py
new file mode 100644
index 0000000..6e56df5
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/utilities/file_manager.py
@@ -0,0 +1,370 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2018. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// utilities/file_manager.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+#
+TOOLBOX_ORDER = 104
+
+
+import inro.modeller as _m
+import inro.emme.database.emmebank as _eb
+import inro.director.logging as _log
+
+import traceback as _traceback
+import shutil as _shutil
+import time as _time
+import os
+from fnmatch import fnmatch as _fnmatch
+from math import log10
+
+_join = os.path.join
+_dir = os.path.dirname
+_norm = os.path.normpath
+
+gen_utils = _m.Modeller().module("sandag.utilities.general")
+
+
+class FileManagerTool(_m.Tool(), gen_utils.Snapshot):
+
+ operation = _m.Attribute(unicode)
+ remote_dir = _m.Attribute(unicode)
+ local_dir = _m.Attribute(unicode)
+ user_folder = _m.Attribute(unicode)
+ scenario_id = _m.Attribute(unicode)
+ initialize = _m.Attribute(_m.BooleanType)
+ delete_local_files = _m.Attribute(_m.BooleanType)
+
+ tool_run_msg = ""
+ LOCAL_ROOT = "C:\\abm_runs"
+
+ def __init__(self):
+ self.operation = "UPLOAD"
+ project_dir = _dir(_m.Modeller().desktop.project.path)
+ self.remote_dir = _dir(project_dir)
+ folder_name = os.path.basename(self.remote_dir)
+ self.user_folder = os.environ.get("USERNAME")
+ self.scenario_id = 100
+ self.initialize = True
+ self.delete_local_files = True
+ self.attributes = [
+ "operation", "remote_dir", "local_dir", "user_folder",
+ "scenario_id", "initialize", "delete_local_files"
+ ]
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self)
+ pb.title = "File run manager utility"
+ pb.description = """
+
+ Utility tool to manually manage the use of the local drive for subsequent model run.
+ The remote data can be downloaded (copied) to the local drive;
+ or the local data can be uploaded to the remote drive.
+ In normal operation this tool does not need to run manually, but in case of an
+ error it may be necessary to upload the project data in order to run on
+ a different machine, or operate directly on the server.
+
+
+ Note that file masks are used from config/sandag_abm.properties to identify which
+ files to copy. See RunModel.FileMask.Upload and RunModel.FileMask.Download for
+ upload and download respectively.
+
"""
+ pb.branding_text = "- SANDAG"
+ if self.tool_run_msg:
+ pb.add_html(self.tool_run_msg)
+
+ pb.add_radio_group('operation', title="File copy operation",
+ keyvalues=[("UPLOAD", "Upload from local directory to remote directory"),
+ ("DOWNLOAD", "Download from remote directory to local directory")], )
+ pb.add_select_file('remote_dir','directory',
+ title='Select remote ABM directory (e.g. on T drive)', note='')
+ pb.add_text_box('user_folder', title="User folder (for local drive):")
+ pb.add_text_box('scenario_id', title="Base scenario ID:")
+ pb.add_checkbox_group(
+ [{"attribute": "delete_local_files", "label": "Delete all local files on completion (upload only)"},
+ {"attribute": "initialize", "label": "Initialize all local files; if false only download files which are different (download only)"}])
+ pb.add_html("""
+""")
+
+ return pb.render()
+
+ def run(self):
+ self.tool_run_msg = ""
+ try:
+ self(self.operation, self.remote_dir, self.user_folder, self.scenario_id,
+ self.initialize, self.delete_local_files)
+ run_msg = "File copying complete"
+ self.tool_run_msg = _m.PageBuilder.format_info(run_msg, escape=False)
+ except Exception as error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error))
+ raise
+
+ def __call__(self, operation, remote_dir, user_folder, scenario_id, initialize=True, delete_local_files=True):
+ load_properties = _m.Modeller().tool('sandag.utilities.properties')
+ props = load_properties(_join(remote_dir, "conf", "sandag_abm.properties"))
+ if operation == "DOWNLOAD":
+ file_masks = props.get("RunModel.FileMask.Download")
+ return self.download(remote_dir, user_folder, scenario_id, initialize, file_masks)
+ elif operation == "UPLOAD":
+ file_masks = props.get("RunModel.FileMask.Upload")
+ self.upload(remote_dir, user_folder, scenario_id, delete_local_files, file_masks)
+ else:
+ raise Exception("operation must be one of UPLOAD or DOWNLOAD")
+
+ @_m.logbook_trace("Copy project data to local drive", save_arguments=True)
+ def download(self, remote_dir, user_folder, scenario_id, initialize, file_masks):
+ folder_name = os.path.basename(remote_dir)
+ user_folder = user_folder or os.environ.get("USERNAME")
+ if not user_folder:
+ raise Exception("Username must be specified for local drive operation "
+ "(or define USERNAME environment variable)")
+ if not os.path.exists(self.LOCAL_ROOT):
+ os.mkdir(self.LOCAL_ROOT)
+ user_directory = _join(self.LOCAL_ROOT, user_folder)
+ if not os.path.exists(user_directory):
+ os.mkdir(user_directory)
+ local_dir = _join(user_directory, folder_name)
+ if not os.path.exists(local_dir):
+ os.mkdir(local_dir)
+
+ self._report = ["Copy"]
+ self._stats = {"size": 0, "count": 0}
+ if not file_masks:
+ # suggested default: "output", "report", "sql", "logFiles"
+ file_masks = []
+ file_masks = [_join(remote_dir, p) for p in file_masks]
+ file_masks.append(_join(remote_dir, "emme_project"))
+ if initialize:
+ # make sure that all of the root directories are created
+ root_dirs = [
+ "application", "bin", "conf", "emme_project", "input", "input_truck",
+ "logFiles", "output", "python", "report", "sql", "uec"
+ ]
+ for name in root_dirs:
+ if not os.path.exists(_join(local_dir, name)):
+ os.mkdir(_join(local_dir, name))
+ # create new Emmebanks with scenario and matrix data
+ title_fcn = lambda t: "(local) " + t[:50]
+ emmebank_paths = self._copy_emme_data(
+ src=remote_dir, dst=local_dir, initialize=True,
+ title_fcn=title_fcn, scenario_id=scenario_id)
+ # add new emmebanks to the open project
+ # db_paths = set([db.core_emmebank.path for db in data_explorer.databases()])
+ # for path in emmebank_paths:
+ # if path not in db_paths:
+ # _m.Modeller().desktop.data_explorer().add_database(path)
+
+ # copy all files (except Emme project, and other file_masks)
+ self._copy_dir(src=remote_dir, dst=local_dir,
+ file_masks=file_masks, check_metadata=not initialize)
+ self.log_report()
+ return local_dir
+
+ @_m.logbook_trace("Copy project data to remote drive", save_arguments=True)
+ def upload(self, remote_dir, user_folder, scenario_id, delete_local_files, file_masks):
+ folder_name = os.path.basename(remote_dir)
+ user_folder = user_folder or os.environ.get("USERNAME")
+ user_directory = _join(self.LOCAL_ROOT, user_folder)
+ local_dir = _join(user_directory, folder_name)
+
+ self._report = []
+ self._stats = {"size": 0, "count": 0}
+ if not file_masks:
+ # suggested defaults: "application", "bin", "input", "input_truck", "uec",
+ # "output\\iter*", "output\\*_1.csv", "output\\*_2.csv"
+ file_masks = []
+ # prepend the src dir to the project masks
+ file_masks = [_join(local_dir, p) for p in file_masks]
+ # add to mask the emme_project folder
+ file_masks.append(_join(local_dir, "emme_project"))
+
+ title_fcn = lambda t: t[8:] if t.startswith("(local)") else t
+ emmebank_paths = self._copy_emme_data(
+ src=local_dir, dst=remote_dir, title_fcn=title_fcn, scenario_id=scenario_id)
+ # copy all files (except Emme project, and other file_masks)
+ self._copy_dir(src=local_dir, dst=remote_dir, file_masks=file_masks)
+ self.log_report()
+
+ # data_explorer = _m.Modeller().desktop.data_explorer()
+ # for path in emmebank_paths:
+ # for db in data_explorer.databases():
+ # if db.core_emmebank.path == path:
+ # db.close()
+ # data_explorer.remove_database(db)
+ # data_explorer.databases()[0].open()
+
+ if delete_local_files:
+ # small pause for file handles to close
+ _time.sleep(2)
+ for name in os.listdir(local_dir):
+ path = os.path.join(local_dir, name)
+ if os.path.isfile(path):
+ try: # no raise, local files can be left behind
+ os.remove(path)
+ except:
+ pass
+ elif os.path.isdir(path):
+ try:
+ _shutil.rmtree(path, ignore_errors=False)
+ except:
+ pass
+
+ _shutil.rmtree(local_dir, ignore_errors=False)
+
+ def _copy_emme_data(self, src, dst, title_fcn, scenario_id, initialize=False):
+ # copy data from Database and Database_transit using API and import tool
+ # create new emmebanks and copy emmebank data to local drive
+ import_from_db = _m.Modeller().tool("inro.emme.data.database.import_from_database")
+ emmebank_paths = []
+ for db_dir in ["Database", "Database_transit"]:
+ src_db_path = _join(src, "emme_project", db_dir, "emmebank")
+ if not os.path.exists(src_db_path):
+ # skip if the database does not exist (will be created later)
+ continue
+ src_db = _eb.Emmebank(src_db_path)
+ dst_db_dir = _join(dst, "emme_project", db_dir)
+ dst_db_path = _join(dst_db_dir, "emmebank")
+ emmebank_paths.append(dst_db_path)
+ self._report.append("Copying Emme data from %s to %s" % (src_db_path, dst_db_path))
+ self._report.append("Start: %s" % _time.strftime("%c"))
+ if initialize:
+ # remove any existing database (overwrite)
+ if os.path.exists(dst_db_path):
+ self._report.append("Warning: overwritting existing Emme database %s" % dst_db_path)
+ dst_db = _eb.Emmebank(dst_db_path)
+ dst_db.dispose()
+ if os.path.exists(dst_db_dir):
+ gen_utils.retry(lambda: _shutil.rmtree(dst_db_dir))
+ gen_utils.retry(lambda: os.mkdir(dst_db_dir))
+ dst_db = _eb.create(dst_db_path, src_db.dimensions)
+ else:
+ if not os.path.exists(dst_db_dir):
+ os.mkdir(dst_db_dir)
+ if os.path.exists(dst_db_path):
+ dst_db = _eb.Emmebank(dst_db_path)
+ else:
+ dst_db = _eb.create(dst_db_path, src_db.dimensions)
+
+ dst_db.title = title_fcn(src_db.title)
+ for prop in ["coord_unit_length", "unit_of_length", "unit_of_cost",
+ "unit_of_energy", "use_engineering_notation", "node_number_digits"]:
+ setattr(dst_db, prop, getattr(src_db, prop))
+
+ if initialize:
+ src_db.dispose()
+ continue
+
+ exfpars = [p for p in dir(src_db.extra_function_parameters) if p.startswith("e")]
+ for exfpar in exfpars:
+ value = getattr(src_db.extra_function_parameters, exfpar)
+ setattr(dst_db.extra_function_parameters, exfpar, value)
+
+ for s in src_db.scenarios():
+ if dst_db.scenario(s.id):
+ dst_db.delete_scenario(s)
+ for f in src_db.functions():
+ if dst_db.function(f.id):
+ dst_db.delete_function(f)
+ for m in src_db.matrices():
+ if dst_db.matrix(m.id):
+ dst_db.delete_matrix(m)
+ for p in dst_db.partitions():
+ p.description = ""
+ p.initialize(0)
+ ref_scen = dst_db.scenario(999)
+ if not ref_scen:
+ ref_scen = dst_db.create_scenario(999)
+ import_from_db(
+ src_database=src_db,
+ src_scenario_ids=[s.id for s in src_db.scenarios()],
+ src_function_ids=[f.id for f in src_db.functions()],
+ copy_path_strat_files=True,
+ dst_database=dst_db,
+ dst_zone_system_scenario=ref_scen)
+ dst_db.delete_scenario(999)
+ src_matrices = [m.id for m in src_db.matrices()]
+ src_partitions = [p.id for p in src_db.partitions()
+ if not(p.description == '' and not (sum(p.raw_data)))]
+ if src_matrices or src_partitions:
+ import_from_db(
+ src_database=src_db,
+ src_zone_system_scenario=src_db.scenario(scenario_id),
+ src_matrix_ids=src_matrices,
+ src_partition_ids=src_partitions,
+ dst_database=dst_db,
+ dst_zone_system_scenario=dst_db.scenario(scenario_id))
+ src_db.dispose()
+ self._report.append("End: %s" % _time.strftime("%c"))
+ return emmebank_paths
+
+ def _copy_dir(self, src, dst, file_masks, check_metadata=False):
+ for name in os.listdir(src):
+ src_path = _join(src, name)
+ skip_file = bool([1 for mask in file_masks if _fnmatch(src_path, mask)])
+ if skip_file:
+ continue
+ dst_path = _join(dst, name)
+ if os.path.isfile(src_path):
+ size = os.path.getsize(src_path)
+ if check_metadata and os.path.exists(dst_path):
+ same_size = os.path.getsize(dst_path) == size
+ same_time = os.path.getmtime(dst_path) == os.path.getmtime(src_path)
+ if same_size and same_time:
+ continue
+ self._report.append(_time.strftime("%c"))
+ self._report.append(dst_path + file_size(size))
+ self._stats["size"] += size
+ self._stats["count"] += 1
+ # shutil.copy2 performs 5-10 times faster on download, and ~20% faster on upload
+ # than os.system copy calls
+ src_time = os.path.getmtime(src_path)
+ if name == 'persons.csv' or "mgra13_based" in name:
+ src_time = os.path.getmtime(src_path)
+ if os.path.exists(dst_path):
+ dest_time = os.path.getmtime(dst_path)
+ if dest_time <= src_time:
+ _shutil.copy2(src_path, dst_path)
+ else:
+ pass
+ else:
+ _shutil.copy2(src_path, dst_path)
+ else:
+ _shutil.copy2(src_path, dst_path)
+ self._report.append(_time.strftime("%c"))
+ elif os.path.isdir(src_path):
+ if not os.path.exists(dst_path):
+ os.mkdir(dst_path)
+ self._report.append(dst_path)
+ self._copy_dir(src_path, dst_path, file_masks, check_metadata)
+
+ def log_report(self):
+ size, count = file_size(self._stats["size"]), self._stats["count"]
+ name = "File copy report: copied {count} files {size}".format(count=count, size=size)
+ report = _m.PageBuilder(title=name)
+ report.add_html(" ".join(self._report))
+ _m.logbook_write(name, report.render())
+
+
+_suffixes = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB']
+
+def file_size(size):
+ order = int(log10(size) / 3) if size else 0
+ return ' {} {}'.format(round(float(size) / (10**(order*3)), 1), _suffixes[order])
diff --git a/sandag_abm/src/main/emme/toolbox/utilities/general.py b/sandag_abm/src/main/emme/toolbox/utilities/general.py
new file mode 100644
index 0000000..6879412
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/utilities/general.py
@@ -0,0 +1,386 @@
+#//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// transit_assignment.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+
+TOOLBOX_ORDER = 102
+
+
+import inro.modeller as _m
+import inro.emme.datatable as _dt
+import inro.emme.core.exception as _except
+from osgeo import ogr as _ogr
+from contextlib import contextmanager as _context
+from itertools import izip as _izip
+import traceback as _traceback
+import re as _re
+import json as _json
+import time as _time
+import os
+import numpy as _numpy
+
+_omx = _m.Modeller().module("sandag.utilities.omxwrapper")
+
+
+class UtilityTool(_m.Tool()):
+
+ tool_run_msg = ""
+
+ def page(self):
+ pb = _m.ToolPageBuilder(self, runnable=False)
+ pb.title = "General utility"
+ pb.description = """Utility tool / module for common code. Not runnable."""
+ pb.branding_text = "- SANDAG"
+ if self.tool_run_msg:
+ pb.add_html(self.tool_run_msg)
+
+ return pb.render()
+
+ def run(self):
+ pass
+
+
+class NetworkCalculator(object):
+ def __init__(self, scenario):
+ self._scenario = scenario
+ self._network_calc = _m.Modeller().tool(
+ "inro.emme.network_calculation.network_calculator")
+
+ def __call__(self, result, expression, selections=None, aggregation=None):
+ spec = {
+ "result": result,
+ "expression": expression,
+ "aggregation": aggregation,
+ "type": "NETWORK_CALCULATION"
+ }
+ if selections is not None:
+ if isinstance(selections, basestring):
+ selections = {"link": selections}
+ spec["selections"] = selections
+ else:
+ spec["selections"] = {"link": "all"}
+ return self._network_calc(spec, self._scenario)
+
+
+@_context
+def temp_matrices(emmebank, mat_type, total=1, default_value=0.0):
+ matrices = []
+ try:
+ while len(matrices) != int(total):
+ try:
+ ident = emmebank.available_matrix_identifier(mat_type)
+ except _except.CapacityError:
+ raise _except.CapacityError(
+ "Insufficient room for %s required temp matrices." % total)
+ matrices.append(emmebank.create_matrix(ident, default_value))
+ yield matrices[:]
+ finally:
+ for matrix in matrices:
+ # In case of transient file conflicts and lag in windows file handles over the network
+ # attempt to delete file 10 times with increasing delays 0.05, 0.2, 0.45, 0.8 ... 5
+ remove_matrix = lambda: emmebank.delete_matrix(matrix)
+ retry(remove_matrix)
+
+
+def retry(fcn, attempts=10, init_wait=0.05, error_types=(RuntimeError, WindowsError)):
+ for attempt in range(1, attempts + 1):
+ try:
+ fcn()
+ return
+ except error_types:
+ if attempt > attempts:
+ raise
+ _time.sleep(init_wait * (attempt**2))
+
+
+@_context
+def temp_attrs(scenario, attr_type, idents, default_value=0.0):
+ attrs = []
+ try:
+ for ident in idents:
+ attrs.append(scenario.create_extra_attribute(attr_type, ident, default_value))
+ yield attrs[:]
+ finally:
+ for attr in attrs:
+ scenario.delete_extra_attribute(attr)
+
+
+@_context
+def backup_and_restore(scenario, backup_attributes):
+ backup = {}
+ for elem_type, attributes in backup_attributes.iteritems():
+ backup[elem_type] = scenario.get_attribute_values(elem_type, attributes)
+ try:
+ yield
+ finally:
+ for elem_type, attributes in backup_attributes.iteritems():
+ scenario.set_attribute_values(elem_type, attributes, backup[elem_type])
+
+
+class DataTableProc(object):
+
+ def __init__(self, table_name, path=None, data=None, convert_numeric=False):
+ modeller = _m.Modeller()
+ desktop = modeller.desktop
+ project = desktop.project
+ self._dt_db = dt_db = project.data_tables()
+ self._convert_numeric = convert_numeric
+ if path:
+ #try:
+ source = _dt.DataSource(path)
+ #except:
+ # raise Exception("Cannot open file at %s" % path)
+ layer = source.layer(table_name)
+ self._data = layer.get_data()
+ elif data:
+ table = dt_db.create_table(table_name, data, overwrite=True)
+ self._data = data
+ else:
+ table = dt_db.table(table_name)
+ self._data = table.get_data()
+ self._load_data()
+
+ def _load_data(self):
+ data = self._data
+ if self._convert_numeric:
+ values = []
+ for a in data.attributes():
+ attr_values = _numpy.copy(a.values)
+ attr_values[attr_values == ''] = 0
+ try:
+ values.append(attr_values.astype("int"))
+ except ValueError:
+ try:
+ values.append(attr_values.astype("float"))
+ except ValueError:
+ values.append(a.values)
+ self._values = values
+ else:
+ self._values = [a.values for a in data.attributes()]
+ self._attr_names = [a.name for a in data.attributes()]
+ self._index = dict((k, i) for i,k in enumerate(self._attr_names))
+ if "geometry" in self._attr_names:
+ geo_coords = []
+ attr = data.attribute("geometry")
+ for record in attr.values:
+ geo_obj = _ogr.CreateGeometryFromWkt(record.text)
+ geo_coords.append(geo_obj.GetPoints())
+ self._values.append(geo_coords)
+ self._attr_names.append("geo_coordinates")
+
+ def __iter__(self):
+ values, attr_names = self._values, self._attr_names
+ return (dict(_izip(attr_names, record))
+ for record in _izip(*values))
+
+ def save(self, name, overwrite=False):
+ self._dt_db.create_table(name, self._data, overwrite=overwrite)
+
+ def values(self, name):
+ index = self._index[name]
+ return self._values[index]
+
+
+class Snapshot(object):
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ def to_snapshot(self):
+ try:
+ attributes = getattr(self, "attributes", [])
+ snapshot = {}
+ for name in attributes:
+ snapshot[name] = unicode(self[name])
+ return _json.dumps(snapshot)
+ except Exception:
+ return "{}"
+
+ def from_snapshot(self, snapshot):
+ try:
+ snapshot = _json.loads(snapshot)
+ attributes = getattr(self, "attributes", [])
+ for name in attributes:
+ self[name] = snapshot[name]
+ except Exception, error:
+ self.tool_run_msg = _m.PageBuilder.format_exception(
+ error, _traceback.format_exc(error), False)
+ return self
+
+ def get_state(self):
+ attributes = getattr(self, "attributes", [])
+ state = {}
+ for name in attributes:
+ try:
+ state[name] = self[name]
+ except _m.AttributeError, error:
+ state[name] = unicode(error)
+ return state
+
+
+def log_snapshot(name, namespace, snapshot):
+ try:
+ _m.logbook_snapshot(name=name, comment="", namespace=namespace,
+ value=_json.dumps(snapshot))
+ except Exception as error:
+ print error
+
+
+class ExportOMX(object):
+ def __init__(self, file_path, scenario, omx_key="NAME"):
+ self.file_path = file_path
+ self.scenario = scenario
+ self.emmebank = scenario.emmebank
+ self.omx_key = omx_key
+
+ @property
+ def omx_key(self):
+ return self._omx_key
+
+ @omx_key.setter
+ def omx_key(self, omx_key):
+ self._omx_key = omx_key
+ text_encoding = self.emmebank.text_encoding
+ if omx_key == "ID_NAME":
+ self.generate_key = lambda m: "%s_%s" % (
+ m.id.encode(text_encoding), m.name.encode(text_encoding))
+ elif omx_key == "NAME":
+ self.generate_key = lambda m: m.name.encode(text_encoding)
+ elif omx_key == "ID":
+ self.generate_key = lambda m: m.id.encode(text_encoding)
+
+ def __enter__(self):
+ self.trace = _m.logbook_trace(name="Export matrices to OMX",
+ attributes={
+ "file_path": self.file_path, "omx_key": self.omx_key,
+ "scenario": self.scenario, "emmebank": self.emmebank.path})
+ self.trace.__enter__()
+ self.omx_file = _omx.open_file(self.file_path, 'w')
+ try:
+ self.omx_file.create_mapping('zone_number', self.scenario.zone_numbers)
+ except LookupError:
+ pass
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.omx_file.close()
+ self.trace.__exit__(exc_type, exc_val, exc_tb)
+
+ def write_matrices(self, matrices):
+ if isinstance(matrices, dict):
+ for key, matrix in matrices.iteritems():
+ self.write_matrix(matrix, key)
+ else:
+ for matrix in matrices:
+ self.write_matrix(matrix)
+
+ def write_matrix(self, matrix, key=None):
+ text_encoding = self.emmebank.text_encoding
+ matrix = self.emmebank.matrix(matrix)
+ if key is None:
+ key = self.generate_key(matrix)
+ numpy_array = matrix.get_numpy_data(self.scenario.id)
+ if matrix.type == "DESTINATION":
+ n_zones = len(numpy_array)
+ numpy_array = _numpy.resize(numpy_array, (1, n_zones))
+ elif matrix.type == "ORIGIN":
+ n_zones = len(numpy_array)
+ numpy_array = _numpy.resize(numpy_array, (n_zones, 1))
+ attrs = {"description": matrix.description.encode(text_encoding)}
+ self.write_array(numpy_array, key, attrs)
+
+ def write_clipped_array(self, numpy_array, key, a_min, a_max=None, attrs={}):
+ if a_max is not None:
+ numpy_array = numpy_array.clip(a_min, a_max)
+ else:
+ numpy_array = numpy_array.clip(a_min)
+ self.write_array(numpy_array, key, attrs)
+
+ def write_array(self, numpy_array, key, attrs={}):
+ shape = numpy_array.shape
+ if len(shape) == 2:
+ chunkshape = (1, shape[0])
+ else:
+ chunkshape = None
+ attrs["source"] = "Emme"
+ numpy_array = numpy_array.astype(dtype="float64", copy=False)
+ omx_matrix = self.omx_file.create_matrix(
+ key, obj=numpy_array, chunkshape=chunkshape, attrs=attrs)
+
+
+class OMXManager(object):
+ def __init__(self, directory, name_tmplt):
+ self._directory = directory
+ self._name_tmplt = name_tmplt
+ self._omx_files = {}
+
+ def lookup(self, name_args, key):
+ file_name = self._name_tmplt % name_args
+ omx_file = self._omx_files.get(file_name)
+ if omx_file is None:
+ file_path = os.path.join(self._directory, file_name)
+ omx_file = _omx.open_file(file_path, 'r')
+ self._omx_files[file_name] = omx_file
+ return omx_file[key].read()
+
+ def file_exists(self, name_args):
+ file_name = self._name_tmplt % name_args
+ file_path = os.path.join(self._directory, file_name)
+ return os.path.isfile(file_path)
+
+ def zone_list(self, file_name):
+ omx_file = self._omx_files[file_name]
+ mapping_name = omx_file.list_mappings()[0]
+ zone_mapping = omx_file.mapping(mapping_name).items()
+ zone_mapping.sort(key=lambda x: x[1])
+ omx_zones = [x[0] for x in zone_mapping]
+ return omx_zones
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ for omx_file in self._omx_files.values():
+ omx_file.close()
+ self._omx_files = {}
+
+
+class CSVReader(object):
+ def __init__(self, path):
+ self._path = path
+ self._f = None
+ self._fields = None
+
+ def __enter__(self):
+ self._f = open(self._path)
+ header = self._f.next()
+ self._fields = [h.strip().upper() for h in header.split(",")]
+ return self
+
+ def __exit__(self, exception_type, exception_value, traceback):
+ self._f.close()
+ self._f = None
+ self._fields = None
+
+ def __iter__(self):
+ return self
+
+ @property
+ def fields(self):
+ return list(self._fields)
+
+ def next(self):
+ line = self._f.next()
+ tokens = [t.strip() for t in line.split(",")]
+ return dict(zip(self._fields, tokens))
diff --git a/sandag_abm/src/main/emme/toolbox/utilities/omxwrapper.py b/sandag_abm/src/main/emme/toolbox/utilities/omxwrapper.py
new file mode 100644
index 0000000..67564f8
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/utilities/omxwrapper.py
@@ -0,0 +1,91 @@
+##//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2019. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// utilities/omxwrapper.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#///////////////////////////////////////////////////////////////////////////////
+import inro.modeller as _m
+
+
+try:
+ import openmatrix as _omx
+
+
+ def open_file(file_path, mode):
+ return OmxMatrix(_omx.open_file(file_path, mode))
+except Exception, e:
+ import omx as _omx
+
+
+ def open_file(file_path, mode):
+ return OmxMatrix(_omx.openFile(file_path, mode))
+
+class OmxMatrix(object):
+
+ def __init__(self, matrix):
+ self.matrix = matrix
+
+ def mapping(self, name):
+ return self.matrix.mapping(name)
+
+ def list_mappings(self):
+ return self.matrix.listMappings()
+
+ def __getitem__(self, key):
+ return self.matrix[key]
+
+ def __setitem__(self, key, value):
+ self.matrix[key] = value
+
+ def create_mapping(self, name, ids):
+ exception_raised = False
+ try:
+ self.matrix.create_mapping(name, ids) # Emme 44 and above
+ except Exception, e:
+ exception_raised = True
+
+ if exception_raised:
+ self.matrix.createMapping(name, ids) # Emme 437
+
+
+ def create_matrix(self, key, obj, chunkshape, attrs):
+ exception_raised = False
+ try: # Emme 44 and above
+ self.matrix.create_matrix(
+ key,
+ obj=obj,
+ chunkshape=chunkshape,
+ attrs=attrs
+ )
+ except Exception, e:
+ exception_raised = True
+
+ if exception_raised: # Emme 437
+ self.matrix.createMatrix(
+ key,
+ obj=obj,
+ chunkshape=chunkshape,
+ attrs=attrs
+ )
+
+ def close(self):
+ self.matrix.close()
+
+
+
+class OmxWrapper(_m.Tool()):
+ def page(self):
+ pb = _m.ToolPageBuilder(
+ self,
+ runnable=False,
+ title="OMX wrapper",
+ description="OMX utility for handling of OMX related libraries"
+ )
+ return pb.render()
\ No newline at end of file
diff --git a/sandag_abm/src/main/emme/toolbox/utilities/properties.py b/sandag_abm/src/main/emme/toolbox/utilities/properties.py
new file mode 100644
index 0000000..228bc07
--- /dev/null
+++ b/sandag_abm/src/main/emme/toolbox/utilities/properties.py
@@ -0,0 +1,599 @@
+##//////////////////////////////////////////////////////////////////////////////
+#//// ///
+#//// Copyright INRO, 2016-2017. ///
+#//// Rights to use and modify are granted to the ///
+#//// San Diego Association of Governments and partner agencies. ///
+#//// This copyright notice must be preserved. ///
+#//// ///
+#//// utilities/properties.py ///
+#//// ///
+#//// ///
+#//// ///
+#//// ///
+#//////////////////////////////////////////////////////////////////////////////
+
+TOOLBOX_ORDER = 103
+
+
+import inro.modeller as _m
+import traceback as _traceback
+from collections import OrderedDict
+import csv
+import os
+import time
+
+
+class PropertiesSetter(object):
+
+ startFromIteration = _m.Attribute(int)
+ sample_rates = _m.Attribute(str)
+
+ useLocalDrive = _m.Attribute(bool)
+ skip4Ds = _m.Attribute(bool)
+ skipBuildNetwork = _m.Attribute(bool)
+ skipInputChecker = _m.Attribute(bool)
+ skipInitialization = _m.Attribute(bool)
+ deleteAllMatrices = _m.Attribute(bool)
+ skipCopyWarmupTripTables = _m.Attribute(bool)
+ skipWalkLogsums = _m.Attribute(bool)
+ skipCopyWalkImpedance = _m.Attribute(bool)
+ skipBikeLogsums = _m.Attribute(bool)
+ skipCopyBikeLogsum = _m.Attribute(bool)
+
+ skipHighwayAssignment_1 = _m.Attribute(bool)
+ skipHighwayAssignment_2 = _m.Attribute(bool)
+ skipHighwayAssignment_3 = _m.Attribute(bool)
+ skipTransitSkimming_1 = _m.Attribute(bool)
+ skipTransitSkimming_2 = _m.Attribute(bool)
+ skipTransitSkimming_3 = _m.Attribute(bool)
+ skipTransponderExport_1 = _m.Attribute(bool)
+ skipTransponderExport_2 = _m.Attribute(bool)
+ skipTransponderExport_3 = _m.Attribute(bool)
+ skipCoreABM_1 = _m.Attribute(bool)
+ skipCoreABM_2 = _m.Attribute(bool)
+ skipCoreABM_3 = _m.Attribute(bool)
+ skipOtherSimulateModel_1 = _m.Attribute(bool)
+ skipOtherSimulateModel_2 = _m.Attribute(bool)
+ skipOtherSimulateModel_3 = _m.Attribute(bool)
+ skipMAASModel_1 = _m.Attribute(bool)
+ skipMAASModel_2 = _m.Attribute(bool)
+ skipMAASModel_3 = _m.Attribute(bool)
+ skipCTM_1 = _m.Attribute(bool)
+ skipCTM_2 = _m.Attribute(bool)
+ skipCTM_3 = _m.Attribute(bool)
+ skipEI_1 = _m.Attribute(bool)
+ skipEI_2 = _m.Attribute(bool)
+ skipEI_3 = _m.Attribute(bool)
+ skipExternalExternal_1 = _m.Attribute(bool)
+ skipExternalExternal_2 = _m.Attribute(bool)
+ skipExternalExternal_3 = _m.Attribute(bool)
+ skipTruck_1 = _m.Attribute(bool)
+ skipTruck_2 = _m.Attribute(bool)
+ skipTruck_3 = _m.Attribute(bool)
+ skipTripTableCreation_1 = _m.Attribute(bool)
+ skipTripTableCreation_2 = _m.Attribute(bool)
+ skipTripTableCreation_3 = _m.Attribute(bool)
+
+ skipFinalHighwayAssignment = _m.Attribute(bool)
+ skipFinalHighwayAssignmentStochastic = _m.Attribute(bool)
+ skipFinalTransitAssignment = _m.Attribute(bool)
+ skipVisualizer = _m.Attribute(bool)
+ skipDataExport = _m.Attribute(bool)
+ skipDataLoadRequest = _m.Attribute(bool)
+ skipDeleteIntermediateFiles = _m.Attribute(bool)
+
+ def _get_list_prop(self, name):
+ return [getattr(self, name + suffix) for suffix in ["_1", "_2", "_3"]]
+
+ def _set_list_prop(self, name, value):
+ try:
+ for v_sub, suffix in zip(value, ["_1", "_2", "_3"]):
+ setattr(self, name + suffix, v_sub)
+ except:
+ for suffix in ["_1", "_2", "_3"]:
+ setattr(self, name + suffix, False)
+
+ skipHighwayAssignment = property(
+ fget=lambda self: self._get_list_prop("skipHighwayAssignment"),
+ fset=lambda self, value: self._set_list_prop("skipHighwayAssignment", value))
+ skipTransitSkimming = property(
+ fget=lambda self: self._get_list_prop("skipTransitSkimming"),
+ fset=lambda self, value: self._set_list_prop("skipTransitSkimming", value))
+ skipTransponderExport = property(
+ fget=lambda self: self._get_list_prop("skipTransponderExport"),
+ fset=lambda self, value: self._set_list_prop("skipTransponderExport", value))
+ skipCoreABM = property(
+ fget=lambda self: self._get_list_prop("skipCoreABM"),
+ fset=lambda self, value: self._set_list_prop("skipCoreABM", value))
+ skipOtherSimulateModel = property(
+ fget=lambda self: self._get_list_prop("skipOtherSimulateModel"),
+ fset=lambda self, value: self._set_list_prop("skipOtherSimulateModel", value))
+ skipMAASModel = property(
+ fget=lambda self: self._get_list_prop("skipMAASModel"),
+ fset=lambda self, value: self._set_list_prop("skipMAASModel", value))
+ skipCTM = property(
+ fget=lambda self: self._get_list_prop("skipCTM"),
+ fset=lambda self, value: self._set_list_prop("skipCTM", value))
+ skipEI = property(
+ fget=lambda self: self._get_list_prop("skipEI"),
+ fset=lambda self, value: self._set_list_prop("skipEI", value))
+ skipExternalExternal = property(
+ fget=lambda self: self._get_list_prop("skipExternalExternal"),
+ fset=lambda self, value: self._set_list_prop("skipExternalExternal", value))
+ skipTruck = property(
+ fget=lambda self: self._get_list_prop("skipTruck"),
+ fset=lambda self, value: self._set_list_prop("skipTruck", value))
+ skipTripTableCreation = property(
+ fget=lambda self: self._get_list_prop("skipTripTableCreation"),
+ fset=lambda self, value: self._set_list_prop("skipTripTableCreation", value))
+
+ def __init__(self):
+ self._run_model_names = (
+ "useLocalDrive", "skip4Ds", "skipInputChecker",
+ "startFromIteration", "skipInitialization", "deleteAllMatrices", "skipCopyWarmupTripTables",
+ "skipCopyBikeLogsum", "skipCopyWalkImpedance", "skipWalkLogsums", "skipBikeLogsums", "skipBuildNetwork",
+ "skipHighwayAssignment", "skipTransitSkimming", "skipTransponderExport", "skipCoreABM", "skipOtherSimulateModel", "skipMAASModel","skipCTM",
+ "skipEI", "skipExternalExternal", "skipTruck", "skipTripTableCreation", "skipFinalHighwayAssignment", 'skipFinalHighwayAssignmentStochastic',
+ "skipFinalTransitAssignment", "skipVisualizer", "skipDataExport", "skipDataLoadRequest",
+ "skipDeleteIntermediateFiles")
+ self._properties = None
+
+ def add_properties_interface(self, pb, disclosure=False):
+ tool_proxy_tag = pb.tool_proxy_tag
+ title = "Run model - skip steps"
+
+ pb.add_text_box('sample_rates', title="Sample rate by iteration:", size=20)
+
+ contents = ["""
+
+
+
+
+
+
+
+
+
+
+
Iteration 1
+
Iteration 2
+
Iteration 3
+
""" % {"tool_proxy_tag": tool_proxy_tag}]
+
+ skip_startup_items = [
+ ("useLocalDrive", "Use the local drive during the model run"),
+ ("skip4Ds", "Skip running 4Ds"),
+ ("skipBuildNetwork", "Skip build of highway and transit network"),
+ ("skipInputChecker", "Skip running input checker"),
+ ("skipInitialization", "Skip matrix and transit database initialization"),
+ ("deleteAllMatrices", " Delete all matrices"),
+ ("skipCopyWarmupTripTables","Skip import of warmup trip tables"),
+ ("skipWalkLogsums", "Skip walk logsums"),
+ ("skipCopyWalkImpedance", "Skip copy of walk impedance"),
+ ("skipBikeLogsums", "Skip bike logsums"),
+ ("skipCopyBikeLogsum", "Skip copy of bike logsum"),
+ ]
+ skip_per_iteration_items = [
+ ("skipHighwayAssignment", "Skip highway assignments and skims"),
+ ("skipTransitSkimming", "Skip transit skims"),
+ ("skipTransponderExport", "Skip transponder accessibilities"),
+ ("skipCoreABM", "Skip core ABM"),
+ ("skipOtherSimulateModel", "Skip other simulation model"),
+ ("skipMAASModel", "Skip MAAS model"),
+ ("skipCTM", "Skip commercial vehicle sub-model"),
+ ("skipTruck", "Skip truck sub-model"),
+ ("skipEI", "Skip external-internal sub-model"),
+ ("skipExternalExternal", "Skip external-external sub-model"),
+ ("skipTripTableCreation", "Skip trip table creation"),
+ ]
+ skip_final_items = [
+ ("skipFinalHighwayAssignment", "Skip final highway assignments"),
+ ("skipFinalHighwayAssignmentStochastic", " Skip stochastic assignment"),
+ ("skipFinalTransitAssignment", "Skip final transit assignments"),
+ ("skipVisualizer", "Skip running visualizer"),
+ ("skipDataExport", "Skip data export"),
+ ("skipDataLoadRequest", "Skip data load request"),
+ ("skipDeleteIntermediateFiles", "Skip delete intermediate files"),
+ ]
+
+ if disclosure:
+ contents.insert(0, """
+
+
%s
""" % title)
+ title = ""
+
+ checkbox = '
'
+ checkbox_no_data = '
'
+
+ for name, label in skip_startup_items:
+ contents.append("
")
+ for i in range(1,4):
+ contents.append(checkbox_no_data % {"name": "all" + "_" + str(i)})
+ for name, label in skip_per_iteration_items:
+ contents.append("
%s
" % label)
+ for i in range(1,4):
+ contents.append(checkbox % {"name": name + "_" + str(i), "tag": tool_proxy_tag})
+ for name, label in skip_final_items:
+ contents.append("