From 24f455b02656a753b19675cb96f0fd89eb63107d Mon Sep 17 00:00:00 2001 From: LinZhuoChen Date: Mon, 20 Apr 2026 14:45:04 +0800 Subject: [PATCH] update readme --- README.md | 14 +++++++------- demo.py | 1 - 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index d73b1c6..9eabe82 100644 --- a/README.md +++ b/README.md @@ -95,20 +95,20 @@ We provide four example scenes in `example/` that you can run out of the box: ```bash # Church scene -python demo.py --model_path /path/to/checkpoint.pt \ +python demo.py --model_path /path/to/lingbot-map.pt \ --image_folder example/church --mask_sky -# Oxford scene with sky masking (outdoor) -python demo.py --model_path /path/to/checkpoint.pt \ - --image_folder example/oxford --mask_sky - # University scene -python demo.py --model_path /path/to/checkpoint.pt \ +python demo.py --model_path /path/to/lingbot-map.pt \ --image_folder example/university --mask_sky # Loop scene (loop closure trajectory) -python demo.py --model_path /path/to/checkpoint.pt \ +python demo.py --model_path /path/to/lingbot-map.pt \ --image_folder example/loop + +# Oxford scene with sky masking (outdoor, large scale scene) +python demo.py --model_path /path/to/lingbot-map-long.pt \ + --image_folder example/oxford --mask_sky ``` ### Streaming Inference from Images diff --git a/demo.py b/demo.py index a8c8636..1b34dde 100644 --- a/demo.py +++ b/demo.py @@ -269,7 +269,6 @@ def main(): parser.add_argument( "--offload_to_cpu", action=argparse.BooleanOptionalAction, - default=True, help="Offload per-frame predictions to CPU during inference to cut GPU peak memory. " "Use --no-offload_to_cpu to keep outputs on GPU.", )