Hex2vec embedder
In [1]:
Copied!
from pytorch_lightning import seed_everything
from srai.embedders import Hex2VecEmbedder
from srai.joiners import IntersectionJoiner
from srai.loaders import OSMOnlineLoader
from srai.neighbourhoods import H3Neighbourhood
from srai.plotting import plot_numeric_data, plot_regions
from srai.regionalizers import H3Regionalizer, geocode_to_region_gdf
from pytorch_lightning import seed_everything
from srai.embedders import Hex2VecEmbedder
from srai.joiners import IntersectionJoiner
from srai.loaders import OSMOnlineLoader
from srai.neighbourhoods import H3Neighbourhood
from srai.plotting import plot_numeric_data, plot_regions
from srai.regionalizers import H3Regionalizer, geocode_to_region_gdf
In [2]:
Copied!
SEED = 71
seed_everything(SEED)
SEED = 71
seed_everything(SEED)
Seed set to 71
Out[2]:
71
Load data from OSM¶
First use geocoding to get the area
In [3]:
Copied!
area_gdf = geocode_to_region_gdf("Wrocław, Poland")
plot_regions(area_gdf, tiles_style="CartoDB positron")
area_gdf = geocode_to_region_gdf("Wrocław, Poland")
plot_regions(area_gdf, tiles_style="CartoDB positron")
Out[3]:
Make this Notebook Trusted to load map: File -> Trust Notebook
Next, download the data for the selected region and the specified tags. We're using OSMOnlineLoader
here, as it's faster for low numbers of tags. In a real life scenario with more tags, you would likely want to use the OSMPbfLoader
.
In [4]:
Copied!
tags = {
"leisure": "park",
"landuse": "forest",
"amenity": ["bar", "restaurant", "cafe"],
"water": "river",
"sport": "soccer",
}
loader = OSMOnlineLoader()
features_gdf = loader.load(area_gdf, tags)
folium_map = plot_regions(area_gdf, colormap=["rgba(0,0,0,0)"], tiles_style="CartoDB positron")
features_gdf.explore(m=folium_map)
tags = {
"leisure": "park",
"landuse": "forest",
"amenity": ["bar", "restaurant", "cafe"],
"water": "river",
"sport": "soccer",
}
loader = OSMOnlineLoader()
features_gdf = loader.load(area_gdf, tags)
folium_map = plot_regions(area_gdf, colormap=["rgba(0,0,0,0)"], tiles_style="CartoDB positron")
features_gdf.explore(m=folium_map)
Out[4]:
Make this Notebook Trusted to load map: File -> Trust Notebook
Prepare the data for embedding¶
After downloading the data, we need to prepare it for embedding. Namely - we need to regionalize the selected area, and join the features with regions.
In [5]:
Copied!
regionalizer = H3Regionalizer(resolution=9)
regions_gdf = regionalizer.transform(area_gdf)
plot_regions(regions_gdf, tiles_style="CartoDB positron")
regionalizer = H3Regionalizer(resolution=9)
regions_gdf = regionalizer.transform(area_gdf)
plot_regions(regions_gdf, tiles_style="CartoDB positron")
Out[5]:
Make this Notebook Trusted to load map: File -> Trust Notebook
In [6]:
Copied!
joiner = IntersectionJoiner()
joint_gdf = joiner.transform(regions_gdf, features_gdf)
joint_gdf
joiner = IntersectionJoiner()
joint_gdf = joiner.transform(regions_gdf, features_gdf)
joint_gdf
Out[6]:
region_id | feature_id |
---|---|
891e2040897ffff | node/280727473 |
891e2040d4bffff | node/300461026 |
node/300461036 | |
891e2040d5bffff | node/300461042 |
891e2040887ffff | node/300461045 |
... | ... |
891e2042637ffff | way/1360073315 |
891e20420cbffff | way/1360073315 |
891e20420dbffff | way/1360073315 |
891e20420c3ffff | way/1360073315 |
891e2040817ffff | way/1366196846 |
4065 rows × 0 columns
Embedding¶
After preparing the data we can proceed with generating embeddings for the regions.
In [7]:
Copied!
import warnings
neighbourhood = H3Neighbourhood(regions_gdf)
embedder = Hex2VecEmbedder([15, 10])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
embeddings = embedder.fit_transform(
regions_gdf,
features_gdf,
joint_gdf,
neighbourhood,
trainer_kwargs={"max_epochs": 5, "accelerator": "cpu"},
batch_size=100,
)
embeddings
import warnings
neighbourhood = H3Neighbourhood(regions_gdf)
embedder = Hex2VecEmbedder([15, 10])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
embeddings = embedder.fit_transform(
regions_gdf,
features_gdf,
joint_gdf,
neighbourhood,
trainer_kwargs={"max_epochs": 5, "accelerator": "cpu"},
batch_size=100,
)
embeddings
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
HPU available: False, using: 0 HPUs
| Name | Type | Params | Mode ----------------------------------------------- 0 | encoder | Sequential | 280 | train ----------------------------------------------- 280 Trainable params 0 Non-trainable params 280 Total params 0.001 Total estimated model params size (MB) 4 Modules in train mode 0 Modules in eval mode
`Trainer.fit` stopped: `max_epochs=5` reached.
Out[7]:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | |
---|---|---|---|---|---|---|---|---|---|---|
region_id | ||||||||||
891e204050fffff | 0.095495 | 0.138996 | 0.566001 | 0.688587 | -0.025211 | -0.170529 | 0.644415 | -0.266062 | 0.530560 | -0.066391 |
891e20434d3ffff | 0.338901 | -0.193323 | -0.014221 | 0.313743 | 0.227657 | -0.132642 | -0.284652 | -0.104796 | -0.200931 | 0.378753 |
891e2051973ffff | -0.310405 | 0.045352 | -0.296344 | -0.414064 | -0.021333 | 0.244782 | -0.422900 | 0.357396 | -0.103730 | 0.055133 |
891e2041497ffff | -0.310405 | 0.045352 | -0.296344 | -0.414065 | -0.021333 | 0.244782 | -0.422900 | 0.357396 | -0.103730 | 0.055133 |
891e204083bffff | 1.038319 | -0.183254 | 0.187057 | 0.140822 | 0.123709 | -0.830628 | 1.430029 | -0.910638 | 0.197307 | -0.844142 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
891e20404c3ffff | 0.338901 | -0.193323 | -0.014221 | 0.313743 | 0.227657 | -0.132642 | -0.284652 | -0.104796 | -0.200931 | 0.378753 |
891e20474afffff | 0.338901 | -0.193323 | -0.014221 | 0.313743 | 0.227657 | -0.132642 | -0.284652 | -0.104796 | -0.200931 | 0.378753 |
891e2042323ffff | -0.578869 | 0.324108 | -0.507577 | -0.754409 | -0.084290 | 0.240572 | -0.252477 | 0.415875 | -0.184438 | -0.246741 |
891e2051873ffff | -0.310405 | 0.045352 | -0.296344 | -0.414064 | -0.021333 | 0.244782 | -0.422900 | 0.357396 | -0.103730 | 0.055133 |
891e2042e2bffff | -0.785091 | 0.567634 | 0.190860 | -0.089924 | -0.219985 | 0.231418 | 0.420068 | 0.015617 | 0.521316 | -0.284058 |
3168 rows × 10 columns
Visualizing the embeddings' similarity¶
In [8]:
Copied!
from sklearn.cluster import KMeans
clusterizer = KMeans(n_clusters=5, random_state=SEED)
clusterizer.fit(embeddings)
embeddings["cluster"] = clusterizer.labels_
embeddings
from sklearn.cluster import KMeans
clusterizer = KMeans(n_clusters=5, random_state=SEED)
clusterizer.fit(embeddings)
embeddings["cluster"] = clusterizer.labels_
embeddings
Out[8]:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | cluster | |
---|---|---|---|---|---|---|---|---|---|---|---|
region_id | |||||||||||
891e204050fffff | 0.095495 | 0.138996 | 0.566001 | 0.688587 | -0.025211 | -0.170529 | 0.644415 | -0.266062 | 0.530560 | -0.066391 | 3 |
891e20434d3ffff | 0.338901 | -0.193323 | -0.014221 | 0.313743 | 0.227657 | -0.132642 | -0.284652 | -0.104796 | -0.200931 | 0.378753 | 0 |
891e2051973ffff | -0.310405 | 0.045352 | -0.296344 | -0.414064 | -0.021333 | 0.244782 | -0.422900 | 0.357396 | -0.103730 | 0.055133 | 1 |
891e2041497ffff | -0.310405 | 0.045352 | -0.296344 | -0.414065 | -0.021333 | 0.244782 | -0.422900 | 0.357396 | -0.103730 | 0.055133 | 1 |
891e204083bffff | 1.038319 | -0.183254 | 0.187057 | 0.140822 | 0.123709 | -0.830628 | 1.430029 | -0.910638 | 0.197307 | -0.844142 | 2 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
891e20404c3ffff | 0.338901 | -0.193323 | -0.014221 | 0.313743 | 0.227657 | -0.132642 | -0.284652 | -0.104796 | -0.200931 | 0.378753 | 0 |
891e20474afffff | 0.338901 | -0.193323 | -0.014221 | 0.313743 | 0.227657 | -0.132642 | -0.284652 | -0.104796 | -0.200931 | 0.378753 | 0 |
891e2042323ffff | -0.578869 | 0.324108 | -0.507577 | -0.754409 | -0.084290 | 0.240572 | -0.252477 | 0.415875 | -0.184438 | -0.246741 | 4 |
891e2051873ffff | -0.310405 | 0.045352 | -0.296344 | -0.414064 | -0.021333 | 0.244782 | -0.422900 | 0.357396 | -0.103730 | 0.055133 | 1 |
891e2042e2bffff | -0.785091 | 0.567634 | 0.190860 | -0.089924 | -0.219985 | 0.231418 | 0.420068 | 0.015617 | 0.521316 | -0.284058 | 3 |
3168 rows × 11 columns