Skip to content

Commit

Permalink
added ref data, use od matrix to create plans
Browse files Browse the repository at this point in the history
  • Loading branch information
rakow committed Aug 11, 2024
1 parent 41a7c35 commit 974db6e
Show file tree
Hide file tree
Showing 11 changed files with 707 additions and 65 deletions.
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ input/$V/$N-activities-$V-10pct.plans.xml.gz: input/$V/$N-static-$V-10pct.plans.
$(sc) prepare create-daily-plans --input $< --output $@\
--persons src/main/python/table-persons.csv\
--activities src/main/python/table-activities.csv\
--commuter src/main/python/work-commuter.csv\
--shp $(kyoto)/data/postalcodes.gpkg\
--facilities $(word 2,$^)\
--network $(word 3,$^)\
Expand Down
4 changes: 3 additions & 1 deletion input/activity_mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
"college": ["edu_higher", "work"],
"sports_hall": ["leisure", "work"],
"stadium": ["leisure", "work"],
"garage": ["parking"],
"apartments": ["resident"],
"yes": ["resident"],
"house": ["resident"],
Expand Down Expand Up @@ -78,7 +79,8 @@
"post_depot": ["p_business", "work"],
"post_office": ["p_business", "work"],
"prison": ["p_business", "work"],
"townhall": ["p_business", "work"]
"townhall": ["p_business", "work"],
"parking": ["parking"]
},
"landuse": {
"commercial": ["shop", "work", "delivery"],
Expand Down
4 changes: 2 additions & 2 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
<!-- <version>15.0</version> -->

<!-- PR-labelled release -->
<version>2025.0-PR3390</version>
<version>2025.0-PR3402</version>

<!-- snapshot == not recommended: rather use PR-labelled release!-->
<!-- <version>2025.0-SNAPSHOT</version>-->
Expand Down Expand Up @@ -68,7 +68,7 @@
<groupId>com.github.matsim-scenarios</groupId>
<artifactId>matsim-berlin</artifactId>
<!-- <version>6.3-SNAPSHOT</version>-->
<version>0b10118f80</version>
<version>93c414be68</version>
</dependency>


Expand Down
80 changes: 65 additions & 15 deletions src/main/java/org/matsim/prepare/population/CreateDailyPlans.java
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
package org.matsim.prepare.population;

import it.unimi.dsi.fastutil.Pair;
import it.unimi.dsi.fastutil.doubles.DoubleList;
import it.unimi.dsi.fastutil.objects.Object2DoubleMap;
import it.unimi.dsi.fastutil.objects.Object2DoubleOpenHashMap;
import me.tongfei.progressbar.ProgressBar;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
Expand Down Expand Up @@ -37,6 +40,7 @@
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
Expand All @@ -53,6 +57,7 @@ public class CreateDailyPlans implements MATSimAppCommand, PersonAlgorithm {
private static final Logger log = LogManager.getLogger(CreateDailyPlans.class);
private final Map<String, CSVRecord> persons = new HashMap<>();
private final Map<Key, List<String>> groups = new HashMap<>();
private final AtomicInteger counter = new AtomicInteger();
@CommandLine.Option(names = "--input", description = "Path to input population.")
private Path input;
@CommandLine.Option(names = "--output", description = "Path to output population", required = true)
Expand All @@ -65,9 +70,10 @@ public class CreateDailyPlans implements MATSimAppCommand, PersonAlgorithm {
private Path facilityPath;
@CommandLine.Option(names = "--network", description = "Path to network file", required = true)
private Path networkPath;
@CommandLine.Option(names = "--commuter", description = "Path to commuter csv file", required = true)
private Path commuterPath;
@CommandLine.Option(names = "--seed", description = "Seed used to sample locations", defaultValue = "1")
private long seed;

@CommandLine.Mixin
private ShpOptions shp;

Expand All @@ -76,15 +82,22 @@ public class CreateDailyPlans implements MATSimAppCommand, PersonAlgorithm {
private Network network;
private Map<String, List<CSVRecord>> activities;
private Map<String, Geometry> zones;
private Object2DoubleMap<Pair<String, String>> commuter;
private PlanBuilder planBuilder;
private AtomicInteger counter = new AtomicInteger();

private ProgressBar pb;

public static void main(String[] args) {
new CreateDailyPlans().execute(args);
}

private static String getZone(String location, String zone) {
if (zone.isBlank() || zone.equals(location))
return location;
else
// The last two digits of the postal code are not known
return location + "_" + zone.substring(0, zone.length() - 2);
}

/**
* Initializes random number generator with person specific seed.
*/
Expand All @@ -104,7 +117,7 @@ public Integer call() throws Exception {
zones = readZones(shp);
activities = RunActivitySampling.readActivities(activityPath);

facilities = new FacilityIndex(facilityPath.toString(), OpenKyotoScenario.CRS);
facilities = new FacilityIndex(facilityPath.toString(), createFacilityFilter(), OpenKyotoScenario.CRS);
planBuilder = new PlanBuilder(createZoneSelector());

// Remove activities with missing leg duration
Expand All @@ -119,6 +132,14 @@ public Integer call() throws Exception {
readPersons(csv);
}

commuter = new Object2DoubleOpenHashMap<>();
try (CSVParser csv = CSVParser.parse(commuterPath, StandardCharsets.UTF_8,
CSVFormat.DEFAULT.builder().setHeader().setSkipHeaderRecord(true).build())) {
for (CSVRecord r : csv) {
commuter.put(Pair.of(r.get("home"), r.get("work")), Double.parseDouble(r.get("n")));
}
}

network = NetworkUtils.readNetwork(networkPath.toString());
population = PopulationUtils.readPopulation(input.toString());

Expand All @@ -133,6 +154,33 @@ public Integer call() throws Exception {
return 0;
}

/**
* Assigns zones to facilities.
*/
private Predicate<ActivityFacility> createFacilityFilter() {

STRtree index = new STRtree();
for (Map.Entry<String, Geometry> e : zones.entrySet()) {
if (e.getKey().contains("_"))
continue;

index.insert(e.getValue().getEnvelopeInternal(), e);
}
index.build();

return facility -> {
Point point = MGC.coord2Point(facility.getCoord());
List<Map.Entry<String, Geometry>> matches = index.query(point.getEnvelopeInternal());
for (Map.Entry<String, Geometry> match : matches) {
if (match.getValue().contains(point)) {
facility.getAttributes().putAttribute("location", match.getKey());
}
}

return true;
};
}

/**
* Build map of zones to facilities.
*/
Expand All @@ -152,6 +200,7 @@ private Function<CSVRecord, Set<ActivityFacility>> createZoneSelector() {
for (Map.Entry<String, Geometry> match : matches) {
if (match.getValue().contains(point)) {
zoneFacilities.computeIfAbsent(match.getKey(), k -> new HashSet<>()).add(facility);
break;
}
}
}
Expand Down Expand Up @@ -239,6 +288,7 @@ public void run(Person person) {
private void sampleLocationsByDist(Person person, Plan plan, SplittableRandom rnd) {

Coord homeCoord = Attributes.getHomeCoord(person);
String homeZone = Objects.toString(person.getAttributes().getAttribute("city"));

List<Activity> acts = TripStructureUtils.getActivities(plan, TripStructureUtils.StageActivityHandling.ExcludeStageActivities);

Expand All @@ -264,8 +314,6 @@ private void sampleLocationsByDist(Person person, Plan plan, SplittableRandom rn
location = fixedLocations.get(type);
}

// TODO: some commuting information should be integrated as well because some zones should be more likely to be selected

if (location == null) {
// Needed for lambda
final Coord refCoord = lastCoord;
Expand All @@ -279,7 +327,17 @@ private void sampleLocationsByDist(Person person, Plan plan, SplittableRandom rn
List<AttributedActivityFacility> res = query.stream().filter(f -> checkDistanceBound(dist, refCoord, f.getCoord(), b)).toList();

if (!res.isEmpty()) {
location = res.get(rnd.nextInt(res.size()));
if (type.equals("work")) {

// Sample a location using the commuting as weight
location = FacilityIndex.sampleWithGrouping(res,
f -> Objects.requireNonNullElse(f.getLocation(), "na"),
// Use a minimum weight of 1 so that all locations have a chance to be chosen
e -> Math.max(1, commuter.getDouble(Pair.of(homeZone, e.getKey()))),
rnd);
} else
location = res.get(rnd.nextInt(res.size()));

break;
}
}
Expand Down Expand Up @@ -423,14 +481,6 @@ private String matchPerson(SplittableRandom rnd, Key key) {
return subgroup.get(rnd.nextInt(subgroup.size()));
}

private static String getZone(String location, String zone) {
if (zone.isBlank() || zone.equals(location))
return location;
else
// The last two digits of the postal code are not known
return location + "_" + zone.substring(0, zone.length() - 2);
}

private Stream<Key> createKey(String gender, int age, String location, String zone) {

String homeZone = getZone(location, zone);
Expand Down
69 changes: 22 additions & 47 deletions src/main/python/calibrate.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,83 +1,58 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import pandas as pd
import geopandas as gpd

from matsim.calibration import create_calibration, ASCCalibrator, utils, analysis
from matsim.calibration import create_calibration, ASCCalibrator, utils

#%%
# %%

if os.path.exists("mid.csv"):
srv = pd.read_csv("mid.csv")
sim = pd.read_csv("sim.csv")

_, adj = analysis.calc_adjusted_mode_share(sim, srv)

print(srv.groupby("mode").sum())

print("Adjusted")
print(adj.groupby("mode").sum())

adj.to_csv("mid_adj.csv", index=False)

#%%

modes = ["walk", "car", "ride", "pt", "bike"]
modes = ["walk", "car", "pt", "bike", "ride"]
fixed_mode = "walk"
initial = {
"bike": -0.141210,
"pt": 0.0781477780346438,
"car": 0.871977390743304,
"ride": -2.22873502992
"bike": -1.4,
"pt": 0.6,
"car": -1,
"ride": -1.4
}

# FIXME: Adjust
# Modal split according to survey of milt
# The modal split is only calibrated for persons living in kyoto
target = {
"walk": 0.1,
"bike": 0.1,
"pt": 0.1,
"car": 0.1,
"ride": 0.1
"walk": 0.239023,
"bike": 0.232813,
"pt": 0.236254,
"car": 0.217186,
"ride": 0.074724
}

region = gpd.read_file("../scenarios/dilutionArea.shp").set_crs("EPSG:25832")
homes = pd.read_csv("template-v1.0-homes.csv", dtype={"person": "str"})
region = gpd.read_file("../input/area.gpkg")


def filter_persons(persons):
persons = pd.merge(persons, homes, how="inner", left_on="person", right_on="person")
persons = gpd.GeoDataFrame(persons, geometry=gpd.points_from_xy(persons.home_x, persons.home_y))

df = gpd.sjoin(persons.set_crs("EPSG:25832"), city, how="inner", predicate="intersects")
df = gpd.sjoin(persons.set_crs("EPSG:32653"), region, how="inner", predicate="intersects")

print("Filtered %s persons" % len(df))

return df


def filter_modes(df):
df = df[df.main_mode != "freight"]
df.loc[df.main_mode.str.startswith("pt_"), "main_mode"] = "pt"

return df

return df[df.main_mode.isin(modes)]

# FIXME: Adjust paths and config

study, obj = create_calibration(
"calib",
ASCCalibrator(modes, initial, target, lr=utils.linear_scheduler(start=0.3, interval=15)),
"matsim-template-1.0.jar",
"../input/v1.0/[name]-v1.0.config.xml",
"matsim-kyoto-1.0-SNAPSHOT.jar",
"../input/v1.0/kyoto-v1.0-10pct.config.xml",
args="--10pct",
jvm_args="-Xmx55G -Xms55G -XX:+AlwaysPreTouch -XX:+UseParallelGC",
jvm_args="-Xmx48G -Xms48G -XX:+AlwaysPreTouch -XX:+UseParallelGC",
transform_persons=filter_persons,
transform_trips=filter_modes,
chain_runs=utils.default_chain_scheduler, debug=False
)

#%%
# %%

study.optimize(obj, 10)
study.optimize(obj, 6)
35 changes: 35 additions & 0 deletions src/main/python/create_od_matrix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import pandas as pd
import swifter


def home_work_relation(x):
""" Searches for home and work location of a person. """

home = pd.NA
work = pd.NA

for t in x.itertuples():
if t.type == "home":
home = t.location
elif t.type == "work":
work = t.location

return pd.Series(data={"home": home, "work": work, "n": x.a_weight.iloc[0]})


if __name__ == "__main__":
df = pd.read_csv("table-activities.csv")
df = df[df.type.isin(["work", "home"])]

aggr = df.swifter.groupby("p_id").apply(home_work_relation)
aggr = aggr.dropna()

aggr.home = aggr.home.astype(int)
aggr.work = aggr.work.astype(int)

aggr = aggr.groupby(["home", "work"]).agg(n=("n", "sum"))

aggr.to_csv("work-commuter.csv", columns=["n"], index=True)
Loading

0 comments on commit 974db6e

Please sign in to comment.