Geovex embedder
In [1]:
Copied!
import warnings
import pandas as pd
import torch
from pytorch_lightning import seed_everything
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from srai.embedders import GeoVexEmbedder, Hex2VecEmbedder
from srai.h3 import ring_buffer_h3_regions_gdf
from srai.joiners import IntersectionJoiner
from srai.loaders import OSMPbfLoader
from srai.loaders.osm_loaders.filters import GEOFABRIK_LAYERS
from srai.neighbourhoods import H3Neighbourhood
from srai.plotting import plot_numeric_data, plot_regions
from srai.regionalizers import H3Regionalizer, geocode_to_region_gdf
import warnings
import pandas as pd
import torch
from pytorch_lightning import seed_everything
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from srai.embedders import GeoVexEmbedder, Hex2VecEmbedder
from srai.h3 import ring_buffer_h3_regions_gdf
from srai.joiners import IntersectionJoiner
from srai.loaders import OSMPbfLoader
from srai.loaders.osm_loaders.filters import GEOFABRIK_LAYERS
from srai.neighbourhoods import H3Neighbourhood
from srai.plotting import plot_numeric_data, plot_regions
from srai.regionalizers import H3Regionalizer, geocode_to_region_gdf
In [2]:
Copied!
SEED = 71
seed_everything(SEED)
SEED = 71
seed_everything(SEED)
Seed set to 71
Out[2]:
71
Load data from OSM¶
First use geocoding to get the area
In [3]:
Copied!
area_gdf = geocode_to_region_gdf("Wrocław, Poland")
plot_regions(area_gdf, tiles_style="CartoDB positron")
area_gdf = geocode_to_region_gdf("Wrocław, Poland")
plot_regions(area_gdf, tiles_style="CartoDB positron")
Out[3]:
Make this Notebook Trusted to load map: File -> Trust Notebook
Buffer the Area¶
The GeoVex embedder requires a buffer around the area of interest, as the hexagon needs to have its radius k neighbors in the dataset as well. The buffer is defined in hexagon radius units, so a buffer of 1 means that the hexagon will have its 1-neighborhood in the dataset as well.
In [4]:
Copied!
area_gdf.head()
area_gdf.head()
Out[4]:
geometry | |
---|---|
region_id | |
Wrocław, Lower Silesian Voivodeship, Poland | POLYGON ((16.80734 51.13895, 16.80859 51.13887... |
In [5]:
Copied!
resolution = 9
k_ring_buffer_radius = 4
regionalizer = H3Regionalizer(resolution=resolution)
base_h3_regions = regionalizer.transform(area_gdf)
buffered_h3_regions = ring_buffer_h3_regions_gdf(base_h3_regions, distance=k_ring_buffer_radius)
buffered_h3_geometry = buffered_h3_regions.unary_union
print("Base regions:", len(base_h3_regions))
print("Buffered regions:", len(buffered_h3_regions))
resolution = 9
k_ring_buffer_radius = 4
regionalizer = H3Regionalizer(resolution=resolution)
base_h3_regions = regionalizer.transform(area_gdf)
buffered_h3_regions = ring_buffer_h3_regions_gdf(base_h3_regions, distance=k_ring_buffer_radius)
buffered_h3_geometry = buffered_h3_regions.unary_union
print("Base regions:", len(base_h3_regions))
print("Buffered regions:", len(buffered_h3_regions))
Base regions: 3168 Buffered regions: 4319
Download the Data¶
Next, download the data for the selected region and the specified tags.
In [6]:
Copied!
tags = GEOFABRIK_LAYERS
loader = OSMPbfLoader()
features_gdf = loader.load(buffered_h3_geometry, tags)
tags = GEOFABRIK_LAYERS
loader = OSMPbfLoader()
features_gdf = loader.load(buffered_h3_geometry, tags)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/srai/loaders/osm_loaders/osm_pbf_loader.py:128: FutureWarning: Use `convert_geometry_to_geodataframe` instead. Deprecated since 0.8.1 version. features_gdf = pbf_reader.get_features_gdf_from_geometry(
Downloading data from 'https://download.geofabrik.de/europe/poland/dolnoslaskie-latest.osm.pbf' to file '/home/runner/work/srai/srai/examples/embedders/files/Geofabrik_dolnoslaskie.osm.pbf'.
0%| | 0.00/158M [00:00<?, ?B/s]
0%| | 8.19k/158M [00:00<53:09, 49.5kB/s]
0%| | 36.9k/158M [00:00<21:53, 120kB/s]
0%| | 94.2k/158M [00:00<11:58, 219kB/s]
0%| | 209k/158M [00:00<06:34, 399kB/s]
0%| | 434k/158M [00:00<03:34, 732kB/s]
1%|▏ | 889k/158M [00:01<01:52, 1.39MB/s]
1%|▍ | 1.80M/158M [00:01<00:58, 2.68MB/s]
2%|▊ | 3.61M/158M [00:01<00:29, 5.21MB/s]
4%|█▌ | 6.27M/158M [00:01<00:17, 8.45MB/s]
5%|██ | 8.47M/158M [00:01<00:15, 9.82MB/s]
7%|██▋ | 11.1M/158M [00:01<00:12, 11.5MB/s]
8%|███▏ | 13.3M/158M [00:02<00:12, 11.9MB/s]
10%|███▊ | 15.7M/158M [00:02<00:11, 12.7MB/s]
11%|████▎ | 17.9M/158M [00:02<00:11, 12.7MB/s]
13%|████▊ | 20.2M/158M [00:02<00:10, 12.8MB/s]
14%|█████▍ | 22.4M/158M [00:02<00:10, 12.8MB/s]
16%|█████▉ | 24.5M/158M [00:02<00:10, 12.8MB/s]
17%|██████▍ | 26.9M/158M [00:03<00:10, 13.1MB/s]
19%|███████ | 29.2M/158M [00:03<00:09, 13.3MB/s]
20%|███████▋ | 31.9M/158M [00:03<00:09, 13.9MB/s]
22%|████████▎ | 34.3M/158M [00:03<00:08, 14.0MB/s]
23%|████████▊ | 36.5M/158M [00:03<00:08, 13.6MB/s]
25%|█████████▎ | 38.7M/158M [00:03<00:08, 13.4MB/s]
26%|█████████▊ | 40.9M/158M [00:04<00:08, 13.3MB/s]
27%|██████████▍ | 43.2M/158M [00:04<00:08, 13.3MB/s]
29%|██████████▉ | 45.6M/158M [00:04<00:08, 13.6MB/s]
31%|███████████▌ | 48.1M/158M [00:04<00:07, 13.9MB/s]
32%|████████████ | 50.3M/158M [00:04<00:07, 13.6MB/s]
33%|████████████▌ | 52.4M/158M [00:05<00:09, 11.5MB/s]
35%|█████████████▏ | 54.7M/158M [00:05<00:07, 13.5MB/s]
36%|█████████████▌ | 56.2M/158M [00:05<00:07, 13.7MB/s]
37%|█████████████▉ | 57.7M/158M [00:05<00:07, 12.8MB/s]
38%|██████████████▍ | 59.8M/158M [00:05<00:06, 14.5MB/s]
39%|██████████████▊ | 61.4M/158M [00:05<00:06, 14.3MB/s]
40%|███████████████▏ | 62.9M/158M [00:05<00:07, 13.2MB/s]
41%|███████████████▌ | 64.4M/158M [00:05<00:06, 13.8MB/s]
42%|███████████████▊ | 65.9M/158M [00:05<00:06, 14.0MB/s]
43%|████████████████▏ | 67.3M/158M [00:06<00:07, 12.9MB/s]
44%|████████████████▌ | 69.0M/158M [00:06<00:06, 13.7MB/s]
45%|████████████████▉ | 70.4M/158M [00:06<00:06, 13.9MB/s]
46%|█████████████████▎ | 71.8M/158M [00:06<00:06, 12.6MB/s]
47%|█████████████████▋ | 73.4M/158M [00:06<00:06, 13.5MB/s]
47%|██████████████████ | 74.9M/158M [00:06<00:05, 13.8MB/s]
48%|██████████████████▍ | 76.3M/158M [00:06<00:06, 12.6MB/s]
49%|██████████████████▊ | 78.0M/158M [00:06<00:05, 13.8MB/s]
50%|███████████████████▏ | 79.5M/158M [00:06<00:05, 13.7MB/s]
51%|███████████████████▍ | 80.9M/158M [00:07<00:06, 12.5MB/s]
52%|███████████████████▉ | 82.5M/158M [00:07<00:05, 13.6MB/s]
53%|████████████████████▏ | 83.9M/158M [00:07<00:05, 13.5MB/s]
54%|████████████████████▌ | 85.3M/158M [00:07<00:05, 12.3MB/s]
55%|████████████████████▉ | 87.1M/158M [00:07<00:05, 13.6MB/s]
56%|█████████████████████▎ | 88.5M/158M [00:07<00:05, 13.7MB/s]
57%|█████████████████████▋ | 89.9M/158M [00:07<00:05, 12.4MB/s]
58%|██████████████████████ | 91.6M/158M [00:07<00:04, 13.7MB/s]
59%|██████████████████████▍ | 93.0M/158M [00:07<00:04, 13.7MB/s]
60%|██████████████████████▊ | 94.4M/158M [00:08<00:05, 12.4MB/s]
61%|███████████████████████▏ | 96.1M/158M [00:08<00:04, 13.4MB/s]
62%|███████████████████████▍ | 97.5M/158M [00:08<00:04, 13.6MB/s]
63%|███████████████████████▊ | 98.9M/158M [00:08<00:04, 12.4MB/s]
64%|████████████████████████▊ | 100M/158M [00:08<00:04, 13.4MB/s]
65%|█████████████████████████▏ | 102M/158M [00:08<00:04, 13.4MB/s]
65%|█████████████████████████▌ | 103M/158M [00:08<00:04, 12.2MB/s]
67%|█████████████████████████▉ | 105M/158M [00:08<00:03, 13.5MB/s]
67%|██████████████████████████▎ | 106M/158M [00:09<00:03, 13.4MB/s]
68%|██████████████████████████▋ | 108M/158M [00:09<00:04, 12.3MB/s]
69%|███████████████████████████ | 109M/158M [00:09<00:03, 13.5MB/s]
70%|███████████████████████████▍ | 111M/158M [00:09<00:03, 13.3MB/s]
71%|███████████████████████████▋ | 112M/158M [00:09<00:03, 12.3MB/s]
72%|████████████████████████████▏ | 114M/158M [00:09<00:03, 13.6MB/s]
73%|████████████████████████████▍ | 115M/158M [00:09<00:03, 13.4MB/s]
74%|████████████████████████████▊ | 117M/158M [00:09<00:03, 12.3MB/s]
75%|█████████████████████████████▏ | 118M/158M [00:09<00:02, 13.6MB/s]
76%|█████████████████████████████▌ | 120M/158M [00:10<00:02, 13.4MB/s]
77%|█████████████████████████████▉ | 121M/158M [00:10<00:02, 12.3MB/s]
78%|██████████████████████████████▎ | 123M/158M [00:10<00:02, 13.7MB/s]
79%|██████████████████████████████▋ | 124M/158M [00:10<00:02, 13.5MB/s]
80%|███████████████████████████████ | 126M/158M [00:10<00:02, 12.4MB/s]
81%|███████████████████████████████▍ | 127M/158M [00:10<00:02, 13.8MB/s]
82%|███████████████████████████████▊ | 129M/158M [00:10<00:02, 13.6MB/s]
83%|████████████████████████████████▏ | 130M/158M [00:10<00:02, 12.4MB/s]
84%|████████████████████████████████▌ | 132M/158M [00:10<00:01, 13.6MB/s]
85%|████████████████████████████████▉ | 133M/158M [00:11<00:01, 13.6MB/s]
85%|█████████████████████████████████▎ | 135M/158M [00:11<00:01, 12.3MB/s]
86%|█████████████████████████████████▋ | 136M/158M [00:11<00:01, 13.3MB/s]
87%|██████████████████████████████████ | 138M/158M [00:11<00:01, 13.4MB/s]
88%|██████████████████████████████████▍ | 139M/158M [00:11<00:01, 12.1MB/s]
89%|██████████████████████████████████▊ | 141M/158M [00:11<00:01, 13.3MB/s]
90%|███████████████████████████████████▏ | 142M/158M [00:11<00:01, 13.4MB/s]
91%|███████████████████████████████████▍ | 143M/158M [00:11<00:01, 12.3MB/s]
92%|███████████████████████████████████▉ | 145M/158M [00:11<00:00, 13.4MB/s]
93%|████████████████████████████████████▏ | 147M/158M [00:12<00:00, 13.2MB/s]
94%|████████████████████████████████████▌ | 148M/158M [00:12<00:00, 12.2MB/s]
95%|████████████████████████████████████▉ | 150M/158M [00:12<00:00, 13.4MB/s]
96%|█████████████████████████████████████▎ | 151M/158M [00:12<00:00, 13.2MB/s]
96%|█████████████████████████████████████▋ | 152M/158M [00:12<00:00, 12.1MB/s]
98%|██████████████████████████████████████ | 154M/158M [00:12<00:00, 13.3MB/s]
98%|██████████████████████████████████████▍| 155M/158M [00:12<00:00, 13.1MB/s]
99%|██████████████████████████████████████▋| 157M/158M [00:12<00:00, 12.1MB/s]
0%| | 0.00/158M [00:00<?, ?B/s]
100%|████████████████████████████████████████| 158M/158M [00:00<00:00, 238GB/s]
SHA256 hash of downloaded file: 77b4c8966c47e5556be5bbe4ef4fd6222d02b9eeb7c82033efe175566cdcd465 Use this value as the 'known_hash' argument of 'pooch.retrieve' to ensure that the file hasn't changed if it is downloaded again in the future.
Finished operation in 0:00:50
Prepare the data for embedding¶
After downloading the data, we need to prepare it for embedding. In the previous step we have regionalized the selected area and buffered it, now we have to join the features with prepared regions.
In [7]:
Copied!
plot_regions(buffered_h3_regions, tiles_style="CartoDB positron")
plot_regions(buffered_h3_regions, tiles_style="CartoDB positron")
Out[7]:
Make this Notebook Trusted to load map: File -> Trust Notebook
In [8]:
Copied!
joiner = IntersectionJoiner()
joint_gdf = joiner.transform(buffered_h3_regions, features_gdf)
joint_gdf
joiner = IntersectionJoiner()
joint_gdf = joiner.transform(buffered_h3_regions, features_gdf)
joint_gdf
Out[8]:
region_id | feature_id |
---|---|
891e2041ddbffff | way/100919096 |
way/549210084 | |
way/549210083 | |
way/549210081 | |
way/1003666770 | |
... | ... |
891e2041c0bffff | way/657958424 |
way/265656095 | |
way/448924248 | |
way/204008122 | |
way/454917982 |
464854 rows × 0 columns
GeoVex-Embedding¶
After preparing the data we can proceed with generating embeddings for the regions.
In [9]:
Copied!
neighbourhood = H3Neighbourhood(buffered_h3_regions)
embedder = GeoVexEmbedder(
target_features=GEOFABRIK_LAYERS,
batch_size=10,
neighbourhood_radius=k_ring_buffer_radius,
convolutional_layers=2,
embedding_size=50,
)
neighbourhood = H3Neighbourhood(buffered_h3_regions)
embedder = GeoVexEmbedder(
target_features=GEOFABRIK_LAYERS,
batch_size=10,
neighbourhood_radius=k_ring_buffer_radius,
convolutional_layers=2,
embedding_size=50,
)
In [10]:
Copied!
with warnings.catch_warnings():
warnings.simplefilter("ignore")
embeddings = embedder.fit_transform(
regions_gdf=buffered_h3_regions,
features_gdf=features_gdf,
joint_gdf=joint_gdf,
neighbourhood=neighbourhood,
trainer_kwargs={
# "max_epochs": 20, # uncomment for a longer training
"max_epochs": 5,
"accelerator": (
"cpu" if torch.backends.mps.is_available() else "auto"
), # GeoVexEmbedder does not support MPS
},
learning_rate=0.001,
)
embeddings.head()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
embeddings = embedder.fit_transform(
regions_gdf=buffered_h3_regions,
features_gdf=features_gdf,
joint_gdf=joint_gdf,
neighbourhood=neighbourhood,
trainer_kwargs={
# "max_epochs": 20, # uncomment for a longer training
"max_epochs": 5,
"accelerator": (
"cpu" if torch.backends.mps.is_available() else "auto"
), # GeoVexEmbedder does not support MPS
},
learning_rate=0.001,
)
embeddings.head()
0%| | 0/4319 [00:00<?, ?it/s]
13%|█▎ | 578/4319 [00:00<00:00, 5770.49it/s]
27%|██▋ | 1156/4319 [00:00<00:00, 5697.75it/s]
40%|███▉ | 1726/4319 [00:00<00:00, 5468.95it/s]
53%|█████▎ | 2278/4319 [00:00<00:00, 5486.51it/s]
66%|██████▌ | 2850/4319 [00:00<00:00, 5564.37it/s]
79%|███████▉ | 3422/4319 [00:00<00:00, 5615.25it/s]
93%|█████████▎| 4010/4319 [00:00<00:00, 5697.85it/s]
100%|██████████| 4319/4319 [00:00<00:00, 5616.82it/s]
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: False, using: 0 HPUs
| Name | Type | Params --------------------------------------- 0 | encoder | Sequential | 2.4 M 1 | decoder | Sequential | 1.8 M 2 | _loss | GeoVeXLoss | 0 --------------------------------------- 4.2 M Trainable params 0 Non-trainable params 4.2 M Total params 16.673 Total estimated model params size (MB)
`Trainer.fit` stopped: `max_epochs=5` reached.
Warning: Some regions were not able to be encoded, as they don't have r=4 neighbors.
Out[10]:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | ... | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
region_id | |||||||||||||||||||||
891e2041ddbffff | -6.002269 | -5.619805 | 9.068292 | 10.417067 | 1.223674 | -22.373974 | 5.001889 | 20.060329 | -1.984833 | -11.269778 | ... | 0.604455 | -3.459945 | -12.597714 | 19.999340 | -2.599703 | 0.322198 | 23.335978 | -13.690354 | -4.191598 | -20.215748 |
891e2047193ffff | -12.644679 | 3.203645 | 10.038872 | 5.037261 | -5.022818 | -12.344154 | 8.511558 | 23.692659 | -2.947314 | -5.038610 | ... | 7.617133 | -7.136011 | -9.839093 | 21.288355 | -8.520233 | -11.073737 | 24.955727 | 0.927959 | -7.357563 | -12.709655 |
891e2051bc7ffff | -17.501898 | -1.766093 | 5.704447 | 6.940545 | -2.038602 | -23.920603 | 5.150405 | 14.794232 | -1.916240 | -9.617002 | ... | 6.904617 | -6.305346 | -9.714335 | 19.091284 | -10.854940 | -0.364384 | 17.013515 | -19.561752 | -12.114444 | -31.067543 |
891e2040e57ffff | -3.192482 | -0.360475 | 13.942698 | 9.672082 | -11.767628 | -8.302562 | 9.800679 | 28.640602 | -1.553025 | -4.629853 | ... | -2.564397 | -3.678543 | -15.788183 | 23.048933 | -3.321853 | -6.677700 | 32.172958 | -7.107971 | -3.290913 | -3.199780 |
891e2040ab3ffff | -17.040060 | 10.629964 | 21.984520 | 13.388414 | 3.253064 | 0.932851 | 5.512388 | 43.343056 | -4.430813 | -21.224218 | ... | -5.749437 | 0.201917 | -21.716101 | 25.661131 | 0.415634 | 0.756232 | 51.478462 | -9.420775 | 7.712603 | 13.750100 |
5 rows × 50 columns
Hex2Vec Embedding¶
In [11]:
Copied!
neighbourhood = H3Neighbourhood(buffered_h3_regions)
hex2vec_embedder = Hex2VecEmbedder(
encoder_sizes=[300, 150, 50],
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hex2vec_embeddings = hex2vec_embedder.fit_transform(
regions_gdf=buffered_h3_regions,
features_gdf=features_gdf,
joint_gdf=joint_gdf,
neighbourhood=neighbourhood,
negative_sample_k_distance=2,
batch_size=64,
learning_rate=0.001,
trainer_kwargs={
# "max_epochs": 50, # uncomment for a longer training
"max_epochs": 5,
"accelerator": "auto",
},
)
hex2vec_embeddings.head()
neighbourhood = H3Neighbourhood(buffered_h3_regions)
hex2vec_embedder = Hex2VecEmbedder(
encoder_sizes=[300, 150, 50],
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hex2vec_embeddings = hex2vec_embedder.fit_transform(
regions_gdf=buffered_h3_regions,
features_gdf=features_gdf,
joint_gdf=joint_gdf,
neighbourhood=neighbourhood,
negative_sample_k_distance=2,
batch_size=64,
learning_rate=0.001,
trainer_kwargs={
# "max_epochs": 50, # uncomment for a longer training
"max_epochs": 5,
"accelerator": "auto",
},
)
hex2vec_embeddings.head()
0%| | 0/4319 [00:00<?, ?it/s]
75%|███████▍ | 3227/4319 [00:00<00:00, 32264.49it/s]
100%|██████████| 4319/4319 [00:00<00:00, 32044.15it/s]
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: False, using: 0 HPUs
| Name | Type | Params --------------------------------------- 0 | encoder | Sequential | 136 K --------------------------------------- 136 K Trainable params 0 Non-trainable params 136 K Total params 0.546 Total estimated model params size (MB)
`Trainer.fit` stopped: `max_epochs=5` reached.
Out[11]:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | ... | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
region_id | |||||||||||||||||||||
891e2041ddbffff | 0.143165 | 0.092640 | 0.286772 | -0.146599 | 0.071895 | 0.046969 | 0.146602 | -0.025241 | -0.019410 | 0.006612 | ... | 0.178420 | 0.012579 | 0.041357 | 0.016802 | -0.217406 | -0.238258 | 0.255515 | -0.146644 | 0.163145 | 0.207884 |
891e2047193ffff | 0.058281 | -0.226801 | 0.056207 | 0.179368 | 0.138109 | 0.090817 | -0.157819 | -0.145030 | 0.001884 | 0.379649 | ... | -0.801977 | 0.394654 | 0.366309 | -0.449554 | 0.178894 | -0.299765 | -0.288317 | 0.186249 | 0.205276 | -0.218550 |
891e2051bc7ffff | -0.463805 | 0.074810 | -0.446682 | 0.254085 | 0.053858 | -0.071688 | -0.380435 | 0.480679 | -0.280187 | -0.224738 | ... | 0.418684 | -0.049881 | -0.174413 | 0.200768 | 0.490585 | 0.235876 | 0.072852 | -0.219477 | 0.070844 | -0.439439 |
891e2043247ffff | -0.088140 | 0.637899 | -0.823850 | 0.862618 | -0.213688 | 0.432644 | -0.164871 | 0.261166 | -0.579981 | -0.143090 | ... | 0.270767 | 0.253406 | -0.030971 | -0.238891 | 0.010621 | 0.276984 | -0.039487 | -0.183317 | 0.581494 | 0.265451 |
891e2040e57ffff | 0.232330 | -0.116793 | -0.210537 | 0.053187 | 0.217647 | -0.333352 | 0.151298 | 0.095009 | -0.191969 | 0.311927 | ... | -0.258786 | 0.108361 | -0.099224 | -0.611075 | -0.142254 | -0.105129 | -0.151868 | -0.074011 | -0.068070 | 0.099067 |
5 rows × 50 columns
Comparing the Embeddings¶
GeoVex Embedding¶
PCA¶
In [12]:
Copied!
# do pca with three components and then cast to RGB
pca = PCA(n_components=3)
pca_embeddings = pca.fit_transform(embeddings)
# make the embeddings into a dataframe
pca_embeddings = pd.DataFrame(pca_embeddings, index=embeddings.index)
# convert to RGB
pca_embeddings = (
(pca_embeddings - pca_embeddings.min()) / (pca_embeddings.max() - pca_embeddings.min()) * 255
).astype(int)
# make the rgb array into a string
pca_embeddings["rgb"] = pca_embeddings.apply(
lambda row: f"rgb({row[0]}, {row[1]}, {row[2]})", axis=1
)
color_dict = dict(enumerate(base_h3_regions.index.map(pca_embeddings["rgb"].to_dict()).to_list()))
base_h3_regions.reset_index().reset_index().explore(
column="index",
tooltip="region_id",
tiles="CartoDB positron",
legend=False,
cmap=lambda x: color_dict[x],
style_kwds=dict(color="#444", opacity=0.0, fillOpacity=0.5),
)
# do pca with three components and then cast to RGB
pca = PCA(n_components=3)
pca_embeddings = pca.fit_transform(embeddings)
# make the embeddings into a dataframe
pca_embeddings = pd.DataFrame(pca_embeddings, index=embeddings.index)
# convert to RGB
pca_embeddings = (
(pca_embeddings - pca_embeddings.min()) / (pca_embeddings.max() - pca_embeddings.min()) * 255
).astype(int)
# make the rgb array into a string
pca_embeddings["rgb"] = pca_embeddings.apply(
lambda row: f"rgb({row[0]}, {row[1]}, {row[2]})", axis=1
)
color_dict = dict(enumerate(base_h3_regions.index.map(pca_embeddings["rgb"].to_dict()).to_list()))
base_h3_regions.reset_index().reset_index().explore(
column="index",
tooltip="region_id",
tiles="CartoDB positron",
legend=False,
cmap=lambda x: color_dict[x],
style_kwds=dict(color="#444", opacity=0.0, fillOpacity=0.5),
)
Out[12]:
Make this Notebook Trusted to load map: File -> Trust Notebook
Clustering¶
In [13]:
Copied!
clusterizer = KMeans(n_clusters=5, random_state=SEED)
clusterizer.fit(embeddings)
embeddings.index.name = "region_id"
embeddings["cluster"] = clusterizer.labels_
embeddings["cluster"]
clusterizer = KMeans(n_clusters=5, random_state=SEED)
clusterizer.fit(embeddings)
embeddings.index.name = "region_id"
embeddings["cluster"] = clusterizer.labels_
embeddings["cluster"]
Out[13]:
region_id 891e2041ddbffff 3 891e2047193ffff 4 891e2051bc7ffff 0 891e2040e57ffff 2 891e2040ab3ffff 2 .. 891e2041537ffff 0 891e2042a1bffff 4 891e2047247ffff 4 891e204725bffff 2 891e2041c0bffff 0 Name: cluster, Length: 3251, dtype: int32
In [14]:
Copied!
plot_numeric_data(base_h3_regions, "cluster", embeddings, tiles_style="CartoDB positron")
plot_numeric_data(base_h3_regions, "cluster", embeddings, tiles_style="CartoDB positron")
Out[14]:
Make this Notebook Trusted to load map: File -> Trust Notebook
Hex2Vec¶
PCA¶
In [15]:
Copied!
# do pca with three components and then cast to RGB
pca = PCA(n_components=3)
pca_embeddings = pca.fit_transform(hex2vec_embeddings)
# make the embeddings into a dataframe
pca_embeddings = pd.DataFrame(pca_embeddings, index=hex2vec_embeddings.index)
# convert to RGB
pca_embeddings = (
(pca_embeddings - pca_embeddings.min()) / (pca_embeddings.max() - pca_embeddings.min()) * 255
).astype(int)
# make the rgb array into a string
pca_embeddings["rgb"] = pca_embeddings.apply(
lambda row: f"rgb({row[0]}, {row[1]}, {row[2]})", axis=1
)
color_dict = dict(enumerate(base_h3_regions.index.map(pca_embeddings["rgb"].to_dict()).to_list()))
base_h3_regions.reset_index().reset_index().explore(
column="index",
tooltip="region_id",
tiles="CartoDB positron",
legend=False,
cmap=lambda x: color_dict[x],
style_kwds=dict(color="#444", opacity=0.0, fillOpacity=0.5),
)
# do pca with three components and then cast to RGB
pca = PCA(n_components=3)
pca_embeddings = pca.fit_transform(hex2vec_embeddings)
# make the embeddings into a dataframe
pca_embeddings = pd.DataFrame(pca_embeddings, index=hex2vec_embeddings.index)
# convert to RGB
pca_embeddings = (
(pca_embeddings - pca_embeddings.min()) / (pca_embeddings.max() - pca_embeddings.min()) * 255
).astype(int)
# make the rgb array into a string
pca_embeddings["rgb"] = pca_embeddings.apply(
lambda row: f"rgb({row[0]}, {row[1]}, {row[2]})", axis=1
)
color_dict = dict(enumerate(base_h3_regions.index.map(pca_embeddings["rgb"].to_dict()).to_list()))
base_h3_regions.reset_index().reset_index().explore(
column="index",
tooltip="region_id",
tiles="CartoDB positron",
legend=False,
cmap=lambda x: color_dict[x],
style_kwds=dict(color="#444", opacity=0.0, fillOpacity=0.5),
)
Out[15]:
Make this Notebook Trusted to load map: File -> Trust Notebook
Clustering¶
In [16]:
Copied!
clusterizer = KMeans(n_clusters=5, random_state=SEED)
clusterizer.fit(hex2vec_embeddings)
hex2vec_embeddings["cluster"] = clusterizer.labels_
hex2vec_embeddings["cluster"]
clusterizer = KMeans(n_clusters=5, random_state=SEED)
clusterizer.fit(hex2vec_embeddings)
hex2vec_embeddings["cluster"] = clusterizer.labels_
hex2vec_embeddings["cluster"]
Out[16]:
region_id 891e2041ddbffff 0 891e2047193ffff 4 891e2051bc7ffff 3 891e2043247ffff 3 891e2040e57ffff 2 .. 891e2041537ffff 3 891e2042a1bffff 4 891e2047247ffff 0 891e204725bffff 2 891e2041c0bffff 1 Name: cluster, Length: 4319, dtype: int32
In [17]:
Copied!
plot_numeric_data(base_h3_regions, "cluster", hex2vec_embeddings, tiles_style="CartoDB positron")
plot_numeric_data(base_h3_regions, "cluster", hex2vec_embeddings, tiles_style="CartoDB positron")
Out[17]:
Make this Notebook Trusted to load map: File -> Trust Notebook