diff --git a/.clang-tidy b/.clang-tidy index 1c7d13e2b0..e85ebb6758 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -41,3 +41,4 @@ Checks: > -readability-redundant-declaration, -readability-redundant-function-ptr-dereference, -readability-suspicious-call-argument, + -readability-non-const-parameter, diff --git a/runtime/doc/api.txt b/runtime/doc/api.txt index 9cdfd8a563..34e2aedabf 100644 --- a/runtime/doc/api.txt +++ b/runtime/doc/api.txt @@ -2546,7 +2546,7 @@ nvim_buf_get_extmark_by_id({buffer}, {ns_id}, {id}, {opts}) 0-indexed (row, col) tuple or empty list () if extmark id was absent *nvim_buf_get_extmarks()* -nvim_buf_get_extmarks({buffer}, {ns_id}, {start}, {end}, {opts}) +nvim_buf_get_extmarks({buffer}, {ns_id}, {start}, {end}, {*opts}) Gets |extmarks| in "traversal order" from a |charwise| region defined by buffer positions (inclusive, 0-indexed |api-indexing|). @@ -2560,6 +2560,10 @@ nvim_buf_get_extmarks({buffer}, {ns_id}, {start}, {end}, {opts}) If `end` is less than `start`, traversal works backwards. (Useful with `limit`, to get the first marks prior to a given position.) + Note: when using extmark ranges (marks with a end_row/end_col position) + the `overlap` option might be useful. Otherwise only the start position of + an extmark will be considered. + Example: >lua local api = vim.api local pos = api.nvim_win_get_cursor(0) @@ -2589,6 +2593,8 @@ nvim_buf_get_extmarks({buffer}, {ns_id}, {start}, {end}, {opts}) • details: Whether to include the details dict • hl_name: Whether to include highlight group name instead of id, true if omitted + • overlap: Also include marks which overlap the range, even + if their start position is less than `start` • type: Filter marks by type: "highlight", "sign", "virt_text" and "virt_lines" @@ -2608,6 +2614,11 @@ nvim_buf_set_extmark({buffer}, {ns_id}, {line}, {col}, {*opts}) Using the optional arguments, it is possible to use this to highlight a range of text, and also to associate virtual text to the mark. + If present, the position defined by `end_col` and `end_row` should be + after the start position in order for the extmark to cover a range. An + earlier end position is not an error, but then it behaves like an empty + range (no highlighting). + Parameters: ~ • {buffer} Buffer handle, or 0 for current buffer • {ns_id} Namespace id from |nvim_create_namespace()| diff --git a/runtime/doc/news.txt b/runtime/doc/news.txt index cd977a8b5f..93012e78a9 100644 --- a/runtime/doc/news.txt +++ b/runtime/doc/news.txt @@ -221,6 +221,15 @@ The following changes to existing APIs or features add new behavior. "virtual_text" table, which gives users more control over how diagnostic virtual text is displayed. +• Extmarks now fully support multi-line ranges, and a single extmark can be + used to highlight a range of arbitrary length. The |nvim_buf_set_extmark()| + API function already allowed you to define such ranges, but highlight regions + were not rendered consistently for a range that covers more than one line break. + This has now been fixed. Signs defined as part of a multi-line extmark also + apply to every line in the range, not just the first. + In addition, |nvim_buf_get_extmarks()| has gained an "overlap" option to + return such ranges even if they started before the specified position. + ============================================================================== REMOVED FEATURES *news-removed* diff --git a/runtime/lua/vim/_meta/api.lua b/runtime/lua/vim/_meta/api.lua index bfff16933a..7cd0d825a1 100644 --- a/runtime/lua/vim/_meta/api.lua +++ b/runtime/lua/vim/_meta/api.lua @@ -5,6 +5,13 @@ error('Cannot require a meta file') vim.api = {} +--- @private +--- @param buffer integer +--- @param keys boolean +--- @param dot boolean +--- @return string +function vim.api.nvim__buf_debug_extmarks(buffer, keys, dot) end + --- @private --- @param buffer integer --- @param first integer @@ -313,6 +320,9 @@ function vim.api.nvim_buf_get_extmark_by_id(buffer, ns_id, id, opts) end --- ``` --- If `end` is less than `start`, traversal works backwards. (Useful with --- `limit`, to get the first marks prior to a given position.) +--- Note: when using extmark ranges (marks with a end_row/end_col position) +--- the `overlap` option might be useful. Otherwise only the start position of +--- an extmark will be considered. --- Example: --- ```lua --- local api = vim.api @@ -337,11 +347,13 @@ function vim.api.nvim_buf_get_extmark_by_id(buffer, ns_id, id, opts) end --- @param end_ any End of range (inclusive): a 0-indexed (row, col) or valid --- extmark id (whose position defines the bound). --- `api-indexing` ---- @param opts table Optional parameters. Keys: +--- @param opts vim.api.keyset.get_extmarks Optional parameters. Keys: --- • limit: Maximum number of marks to return --- • details: Whether to include the details dict --- • hl_name: Whether to include highlight group name instead --- of id, true if omitted +--- • overlap: Also include marks which overlap the range, even +--- if their start position is less than `start` --- • type: Filter marks by type: "highlight", "sign", --- "virt_text" and "virt_lines" --- @return any[] @@ -457,6 +469,10 @@ function vim.api.nvim_buf_line_count(buffer) end --- waiting for the return value.) --- Using the optional arguments, it is possible to use this to highlight a --- range of text, and also to associate virtual text to the mark. +--- If present, the position defined by `end_col` and `end_row` should be +--- after the start position in order for the extmark to cover a range. An +--- earlier end position is not an error, but then it behaves like an empty +--- range (no highlighting). --- --- @param buffer integer Buffer handle, or 0 for current buffer --- @param ns_id integer Namespace id from `nvim_create_namespace()` diff --git a/runtime/lua/vim/_meta/api_keysets.lua b/runtime/lua/vim/_meta/api_keysets.lua index 8f36edba77..4d08563ce2 100644 --- a/runtime/lua/vim/_meta/api_keysets.lua +++ b/runtime/lua/vim/_meta/api_keysets.lua @@ -122,6 +122,13 @@ error('Cannot require a meta file') --- @class vim.api.keyset.get_commands --- @field builtin? boolean +--- @class vim.api.keyset.get_extmarks +--- @field limit? integer +--- @field details? boolean +--- @field hl_name? boolean +--- @field overlap? boolean +--- @field type? string + --- @class vim.api.keyset.get_highlight --- @field id? integer --- @field name? string diff --git a/runtime/lua/vim/highlight.lua b/runtime/lua/vim/highlight.lua index 14b0e71312..fd4fb54a5b 100644 --- a/runtime/lua/vim/highlight.lua +++ b/runtime/lua/vim/highlight.lua @@ -55,6 +55,9 @@ function M.range(bufnr, ns, higroup, start, finish, opts) local inclusive = opts.inclusive or false local priority = opts.priority or M.priorities.user + -- TODO: in case of 'v', 'V' (not block), this should calculate equivalent + -- bounds (row, col, end_row, end_col) as multiline regions are natively + -- supported now local region = vim.region(bufnr, start, finish, regtype, inclusive) for linenr, cols in pairs(region) do local end_row diff --git a/src/klib/kvec.h b/src/klib/kvec.h index f6674a0adf..fd9096e1ad 100644 --- a/src/klib/kvec.h +++ b/src/klib/kvec.h @@ -207,6 +207,16 @@ static inline void *_memcpy_free(void *const restrict dest, void *const restrict /* 2^x initial array size. */ \ kvi_resize(v, (v).capacity << 1) +/// fit at least "len" more items +#define kvi_ensure_more_space(v, len) \ + do { \ + if ((v).capacity < (v).size + len) { \ + (v).capacity = (v).size + len; \ + kv_roundup32((v).capacity); \ + kvi_resize((v), (v).capacity); \ + } \ + } while (0) + /// Get location where to store new element to a vector with preallocated array /// /// @param[in,out] v Vector to push to. @@ -223,6 +233,19 @@ static inline void *_memcpy_free(void *const restrict dest, void *const restrict #define kvi_push(v, x) \ (*kvi_pushp(v) = (x)) +/// Copy a vector to a preallocated vector +/// +/// @param[out] v1 destination +/// @param[in] v0 source (can be either vector or preallocated vector) +#define kvi_copy(v1, v0) \ + do { \ + if ((v1).capacity < (v0).size) { \ + kvi_resize(v1, (v0).size); \ + } \ + (v1).size = (v0).size; \ + memcpy((v1).items, (v0).items, sizeof((v1).items[0]) * (v0).size); \ + } while (0) + /// Free array of elements of a vector with preallocated array if needed /// /// @param[out] v Vector to free. diff --git a/src/nvim/api/extmark.c b/src/nvim/api/extmark.c index 268fdded9a..b76a275c0d 100644 --- a/src/nvim/api/extmark.c +++ b/src/nvim/api/extmark.c @@ -308,6 +308,10 @@ ArrayOf(Integer) nvim_buf_get_extmark_by_id(Buffer buffer, Integer ns_id, /// If `end` is less than `start`, traversal works backwards. (Useful /// with `limit`, to get the first marks prior to a given position.) /// +/// Note: when using extmark ranges (marks with a end_row/end_col position) +/// the `overlap` option might be useful. Otherwise only the start position +/// of an extmark will be considered. +/// /// Example: ///
lua
 ///   local api = vim.api
@@ -334,11 +338,13 @@ ArrayOf(Integer) nvim_buf_get_extmark_by_id(Buffer buffer, Integer ns_id,
 ///          - limit:  Maximum number of marks to return
 ///          - details: Whether to include the details dict
 ///          - hl_name: Whether to include highlight group name instead of id, true if omitted
+///          - overlap: Also include marks which overlap the range, even if
+///                     their start position is less than `start`
 ///          - type: Filter marks by type: "highlight", "sign", "virt_text" and "virt_lines"
 /// @param[out] err   Error details, if any
 /// @return List of [extmark_id, row, col] tuples in "traversal order".
-Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object end, Dictionary opts,
-                            Error *err)
+Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object end,
+                            Dict(get_extmarks) *opts, Error *err)
   FUNC_API_SINCE(7)
 {
   Array rv = ARRAY_DICT_INIT;
@@ -348,63 +354,32 @@ Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object e
     return rv;
   }
 
-  bool all_ns;
-  if (ns_id == -1) {
-    all_ns = true;
-  } else {
-    VALIDATE_INT(ns_initialized((uint32_t)ns_id), "ns_id", ns_id, {
-      return rv;
-    });
-    all_ns = false;
-  }
+  VALIDATE_INT(ns_id == -1 || ns_initialized((uint32_t)ns_id), "ns_id", ns_id, {
+    return rv;
+  });
+
+  bool details = opts->details;
+  bool hl_name = GET_BOOL_OR_TRUE(opts, get_extmarks, hl_name);
 
-  Integer limit = -1;
-  bool details = false;
-  bool hl_name = true;
   ExtmarkType type = kExtmarkNone;
-
-  for (size_t i = 0; i < opts.size; i++) {
-    String k = opts.items[i].key;
-    Object *v = &opts.items[i].value;
-    if (strequal("limit", k.data)) {
-      VALIDATE_T("limit", kObjectTypeInteger, v->type, {
-        return rv;
-      });
-      limit = v->data.integer;
-    } else if (strequal("details", k.data)) {
-      details = api_object_to_bool(*v, "details", false, err);
-      if (ERROR_SET(err)) {
-        return rv;
-      }
-    } else if (strequal("hl_name", k.data)) {
-      hl_name = api_object_to_bool(*v, "hl_name", false, err);
-      if (ERROR_SET(err)) {
-        return rv;
-      }
-    } else if (strequal("type", k.data)) {
-      VALIDATE_EXP(v->type == kObjectTypeString, "type", "String", api_typename(v->type), {
-        return rv;
-      });
-      if (strequal(v->data.string.data, "sign")) {
-        type = kExtmarkSign;
-      } else if (strequal(v->data.string.data, "virt_text")) {
-        type = kExtmarkVirtText;
-      } else if (strequal(v->data.string.data, "virt_lines")) {
-        type = kExtmarkVirtLines;
-      } else if (strequal(v->data.string.data, "highlight")) {
-        type = kExtmarkHighlight;
-      } else {
-        VALIDATE_EXP(false, "type", "sign, virt_text, virt_lines or highlight", v->data.string.data, {
-          return rv;
-        });
-      }
+  if (HAS_KEY(opts, get_extmarks, type)) {
+    if (strequal(opts->type.data, "sign")) {
+      type = kExtmarkSign;
+    } else if (strequal(opts->type.data, "virt_text")) {
+      type = kExtmarkVirtText;
+    } else if (strequal(opts->type.data, "virt_lines")) {
+      type = kExtmarkVirtLines;
+    } else if (strequal(opts->type.data, "highlight")) {
+      type = kExtmarkHighlight;
     } else {
-      VALIDATE_S(false, "'opts' key", k.data, {
+      VALIDATE_EXP(false, "type", "sign, virt_text, virt_lines or highlight", opts->type.data, {
         return rv;
       });
     }
   }
 
+  Integer limit = HAS_KEY(opts, get_extmarks, limit) ? opts->limit : -1;
+
   if (limit == 0) {
     return rv;
   } else if (limit < 0) {
@@ -429,11 +404,12 @@ Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object e
     reverse = true;
   }
 
+  // note: ns_id=-1 allowed, represented as UINT32_MAX
   ExtmarkInfoArray marks = extmark_get(buf, (uint32_t)ns_id, l_row, l_col, u_row,
-                                       u_col, (int64_t)limit, reverse, all_ns, type);
+                                       u_col, (int64_t)limit, reverse, type, opts->overlap);
 
   for (size_t i = 0; i < kv_size(marks); i++) {
-    ADD(rv, ARRAY_OBJ(extmark_to_array(&kv_A(marks, i), true, (bool)details, hl_name)));
+    ADD(rv, ARRAY_OBJ(extmark_to_array(&kv_A(marks, i), true, details, hl_name)));
   }
 
   kv_destroy(marks);
@@ -451,6 +427,11 @@ Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object e
 /// Using the optional arguments, it is possible to use this to highlight
 /// a range of text, and also to associate virtual text to the mark.
 ///
+/// If present, the position defined by `end_col` and `end_row` should be after
+/// the start position in order for the extmark to cover a range.
+/// An earlier end position is not an error, but then it behaves like an empty
+/// range (no highlighting).
+///
 /// @param buffer  Buffer handle, or 0 for current buffer
 /// @param ns_id  Namespace id from |nvim_create_namespace()|
 /// @param line  Line where to place the mark, 0-based. |api-indexing|
@@ -1230,3 +1211,14 @@ free_exit:
   clear_virttext(&virt_text);
   return virt_text;
 }
+
+String nvim__buf_debug_extmarks(Buffer buffer, Boolean keys, Boolean dot, Error *err)
+  FUNC_API_SINCE(7)
+{
+  buf_T *buf = find_buffer_by_handle(buffer, err);
+  if (!buf) {
+    return NULL_STRING;
+  }
+
+  return mt_inspect(buf->b_marktree, keys, dot);
+}
diff --git a/src/nvim/api/keysets.h b/src/nvim/api/keysets.h
index 0a07e8c16f..4e5e7af619 100644
--- a/src/nvim/api/keysets.h
+++ b/src/nvim/api/keysets.h
@@ -50,6 +50,15 @@ typedef struct {
   Boolean ui_watched;
 } Dict(set_extmark);
 
+typedef struct {
+  OptionalKeys is_set__get_extmarks_;
+  Integer limit;
+  Boolean details;
+  Boolean hl_name;
+  Boolean overlap;
+  String type;
+} Dict(get_extmarks);
+
 typedef struct {
   OptionalKeys is_set__keymap_;
   Boolean noremap;
diff --git a/src/nvim/buffer.c b/src/nvim/buffer.c
index 8eacec4d5e..d2a5eab0a5 100644
--- a/src/nvim/buffer.c
+++ b/src/nvim/buffer.c
@@ -747,6 +747,7 @@ void buf_clear_file(buf_T *buf)
 void buf_clear(void)
 {
   linenr_T line_count = curbuf->b_ml.ml_line_count;
+  extmark_free_all(curbuf);   // delete any extmarks
   while (!(curbuf->b_ml.ml_flags & ML_EMPTY)) {
     ml_delete((linenr_T)1, false);
   }
diff --git a/src/nvim/decoration.c b/src/nvim/decoration.c
index d9d1417d2a..265bc11661 100644
--- a/src/nvim/decoration.c
+++ b/src/nvim/decoration.c
@@ -158,7 +158,7 @@ Decoration *decor_find_virttext(buf_T *buf, int row, uint64_t ns_id)
   MarkTreeIter itr[1] = { 0 };
   marktree_itr_get(buf->b_marktree, row, 0,  itr);
   while (true) {
-    mtkey_t mark = marktree_itr_current(itr);
+    MTKey mark = marktree_itr_current(itr);
     if (mark.pos.row < 0 || mark.pos.row > row) {
       break;
     } else if (marktree_decor_level(mark) < kDecorLevelVisible) {
@@ -189,7 +189,7 @@ bool decor_redraw_reset(win_T *wp, DecorState *state)
   return wp->w_buffer->b_marktree->n_keys;
 }
 
-Decoration get_decor(mtkey_t mark)
+Decoration get_decor(MTKey mark)
 {
   if (mark.decor_full) {
     return *mark.decor_full;
@@ -211,50 +211,20 @@ bool decor_redraw_start(win_T *wp, int top_row, DecorState *state)
 {
   buf_T *buf = wp->w_buffer;
   state->top_row = top_row;
-  marktree_itr_get(buf->b_marktree, top_row, 0, state->itr);
-  if (!state->itr->node) {
+  if (!marktree_itr_get_overlap(buf->b_marktree, top_row, 0, state->itr)) {
     return false;
   }
-  marktree_itr_rewind(buf->b_marktree, state->itr);
-  while (true) {
-    mtkey_t mark = marktree_itr_current(state->itr);
-    if (mark.pos.row < 0) {  // || mark.row > end_row
-      break;
-    }
-    if ((mark.pos.row < top_row && mt_end(mark))
-        || marktree_decor_level(mark) < kDecorLevelVisible) {
-      goto next_mark;
+  MTPair pair;
+
+  while (marktree_itr_step_overlap(buf->b_marktree, state->itr, &pair)) {
+    if (marktree_decor_level(pair.start) < kDecorLevelVisible) {
+      continue;
     }
 
-    Decoration decor = get_decor(mark);
+    Decoration decor = get_decor(pair.start);
 
-    mtpos_t altpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
-
-    // Exclude start marks if the end mark position is above the top row
-    // Exclude end marks if we have already added the start mark
-    if ((mt_start(mark) && altpos.row < top_row && !decor_virt_pos(&decor))
-        || (mt_end(mark) && altpos.row >= top_row)) {
-      goto next_mark;
-    }
-
-    if (mt_end(mark)) {
-      decor_add(state, altpos.row, altpos.col, mark.pos.row, mark.pos.col,
-                &decor, false, mark.ns, mark.id);
-    } else {
-      if (altpos.row == -1) {
-        altpos.row = mark.pos.row;
-        altpos.col = mark.pos.col;
-      }
-      decor_add(state, mark.pos.row, mark.pos.col, altpos.row, altpos.col,
-                &decor, false, mark.ns, mark.id);
-    }
-
-next_mark:
-    if (marktree_itr_node_done(state->itr)) {
-      marktree_itr_next(buf->b_marktree, state->itr);
-      break;
-    }
-    marktree_itr_next(buf->b_marktree, state->itr);
+    decor_add(state, pair.start.pos.row, pair.start.pos.col, pair.end_pos.row, pair.end_pos.col,
+              &decor, false, pair.start.ns, pair.start.id);
   }
 
   return true;  // TODO(bfredl): check if available in the region
@@ -268,7 +238,13 @@ bool decor_redraw_line(win_T *wp, int row, DecorState *state)
   state->row = row;
   state->col_until = -1;
   state->eol_col = -1;
-  return true;  // TODO(bfredl): be more precise
+
+  if (kv_size(state->active)) {
+    return true;
+  }
+
+  MTKey k = marktree_itr_current(state->itr);
+  return (k.pos.row >= 0 && k.pos.row <= row);
 }
 
 static void decor_add(DecorState *state, int start_row, int start_col, int end_row, int end_col,
@@ -302,7 +278,7 @@ int decor_redraw_col(win_T *wp, int col, int win_col, bool hidden, DecorState *s
   while (true) {
     // TODO(bfredl): check duplicate entry in "intersection"
     // branch
-    mtkey_t mark = marktree_itr_current(state->itr);
+    MTKey mark = marktree_itr_current(state->itr);
     if (mark.pos.row < 0 || mark.pos.row > state->row) {
       break;
     } else if (mark.pos.row == state->row && mark.pos.col > col) {
@@ -317,8 +293,7 @@ int decor_redraw_col(win_T *wp, int col, int win_col, bool hidden, DecorState *s
 
     Decoration decor = get_decor(mark);
 
-    mtpos_t endpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
-
+    MTPos endpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
     if (endpos.row == -1) {
       endpos = mark.pos;
     }
@@ -412,8 +387,28 @@ void decor_redraw_signs(buf_T *buf, int row, int *num_signs, SignTextAttrs sattr
   MarkTreeIter itr[1] = { 0 };
   marktree_itr_get(buf->b_marktree, row, 0, itr);
 
+  // TODO(bfredl): integrate with main decor loop.
+  if (!marktree_itr_get_overlap(buf->b_marktree, row, 0, itr)) {
+    return;
+  }
+
+  MTPair pair;
+  while (marktree_itr_step_overlap(buf->b_marktree, itr, &pair)) {
+    if (marktree_decor_level(pair.start) < kDecorLevelVisible) {
+      continue;
+    }
+
+    Decoration *decor = pair.start.decor_full;
+
+    if (!decor || !decor_has_sign(decor)) {
+      continue;
+    }
+
+    decor_to_sign(decor, num_signs, sattrs, num_id, line_id, cul_id);
+  }
+
   while (true) {
-    mtkey_t mark = marktree_itr_current(itr);
+    MTKey mark = marktree_itr_current(itr);
     if (mark.pos.row < 0 || mark.pos.row > row) {
       break;
     }
@@ -428,46 +423,52 @@ void decor_redraw_signs(buf_T *buf, int row, int *num_signs, SignTextAttrs sattr
       goto next_mark;
     }
 
-    if (decor->sign_text) {
-      int j;
-      for (j = (*num_signs); j > 0; j--) {
-        if (sattrs[j - 1].priority >= decor->priority) {
-          break;
-        }
-        if (j < SIGN_SHOW_MAX) {
-          sattrs[j] = sattrs[j - 1];
-        }
-      }
-      if (j < SIGN_SHOW_MAX) {
-        sattrs[j] = (SignTextAttrs) {
-          .text = decor->sign_text,
-          .hl_id = decor->sign_hl_id,
-          .priority = decor->priority
-        };
-        (*num_signs)++;
-      }
-    }
-
-    struct { HlPriId *dest; int hl; } cattrs[] = {
-      { line_id, decor->line_hl_id        },
-      { num_id,  decor->number_hl_id      },
-      { cul_id,  decor->cursorline_hl_id  },
-      { NULL, -1 },
-    };
-    for (int i = 0; cattrs[i].dest; i++) {
-      if (cattrs[i].hl != 0 && decor->priority >= cattrs[i].dest->priority) {
-        *cattrs[i].dest = (HlPriId) {
-          .hl_id = cattrs[i].hl,
-          .priority = decor->priority
-        };
-      }
-    }
+    decor_to_sign(decor, num_signs, sattrs, num_id, line_id, cul_id);
 
 next_mark:
     marktree_itr_next(buf->b_marktree, itr);
   }
 }
 
+static void decor_to_sign(Decoration *decor, int *num_signs, SignTextAttrs sattrs[],
+                          HlPriId *num_id, HlPriId *line_id, HlPriId *cul_id)
+{
+  if (decor->sign_text) {
+    int j;
+    for (j = (*num_signs); j > 0; j--) {
+      if (sattrs[j - 1].priority >= decor->priority) {
+        break;
+      }
+      if (j < SIGN_SHOW_MAX) {
+        sattrs[j] = sattrs[j - 1];
+      }
+    }
+    if (j < SIGN_SHOW_MAX) {
+      sattrs[j] = (SignTextAttrs) {
+        .text = decor->sign_text,
+        .hl_id = decor->sign_hl_id,
+        .priority = decor->priority
+      };
+      (*num_signs)++;
+    }
+  }
+
+  struct { HlPriId *dest; int hl; } cattrs[] = {
+    { line_id, decor->line_hl_id        },
+    { num_id,  decor->number_hl_id      },
+    { cul_id,  decor->cursorline_hl_id  },
+    { NULL, -1 },
+  };
+  for (int i = 0; cattrs[i].dest; i++) {
+    if (cattrs[i].hl != 0 && decor->priority >= cattrs[i].dest->priority) {
+      *cattrs[i].dest = (HlPriId) {
+        .hl_id = cattrs[i].hl,
+        .priority = decor->priority
+      };
+    }
+  }
+}
+
 // Get the maximum required amount of sign columns needed between row and
 // end_row.
 int decor_signcols(buf_T *buf, DecorState *state, int row, int end_row, int max)
@@ -488,7 +489,7 @@ int decor_signcols(buf_T *buf, DecorState *state, int row, int end_row, int max)
   MarkTreeIter itr[1] = { 0 };
   marktree_itr_get(buf->b_marktree, 0, -1, itr);
   while (true) {
-    mtkey_t mark = marktree_itr_current(itr);
+    MTKey mark = marktree_itr_current(itr);
     if (mark.pos.row < 0 || mark.pos.row > end_row) {
       break;
     }
@@ -525,7 +526,7 @@ int decor_signcols(buf_T *buf, DecorState *state, int row, int end_row, int max)
       goto next_mark;
     }
 
-    mtpos_t altpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
+    MTPos altpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
 
     if (mt_end(mark)) {
       if (mark.pos.row >= row && altpos.row <= end_row) {
@@ -610,7 +611,7 @@ int decor_virt_lines(win_T *wp, linenr_T lnum, VirtLines *lines, TriState has_fo
   MarkTreeIter itr[1] = { 0 };
   marktree_itr_get(buf->b_marktree, start_row, 0, itr);
   while (true) {
-    mtkey_t mark = marktree_itr_current(itr);
+    MTKey mark = marktree_itr_current(itr);
     if (mark.pos.row < 0 || mark.pos.row >= end_row) {
       break;
     } else if (mt_end(mark)
diff --git a/src/nvim/drawline.c b/src/nvim/drawline.c
index 4b989fa59a..969021ef2c 100644
--- a/src/nvim/drawline.c
+++ b/src/nvim/drawline.c
@@ -2889,15 +2889,21 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
         && !wp->w_p_wrap
         && wlv.filler_todo <= 0
         && (wp->w_p_rl ? wlv.col == 0 : wlv.col == grid->cols - 1)
-        && !has_fold
-        && (*ptr != NUL
-            || lcs_eol_one > 0
-            || (wlv.n_extra > 0 && (wlv.c_extra != NUL || *wlv.p_extra != NUL))
-            || wlv.more_virt_inline_chunks)) {
-      c = wp->w_p_lcs_chars.ext;
-      wlv.char_attr = win_hl_attr(wp, HLF_AT);
-      mb_c = c;
-      mb_utf8 = check_mb_utf8(&c, u8cc);
+        && !has_fold) {
+      if (*ptr == NUL && lcs_eol_one == 0 && has_decor) {
+        // Tricky: there might be a virtual text just _after_ the last char
+        decor_redraw_col(wp, (colnr_T)v, wlv.off, false, &decor_state);
+        handle_inline_virtual_text(wp, &wlv, v);
+      }
+      if (*ptr != NUL
+          || lcs_eol_one > 0
+          || (wlv.n_extra > 0 && (wlv.c_extra != NUL || *wlv.p_extra != NUL))
+          || wlv.more_virt_inline_chunks) {
+        c = wp->w_p_lcs_chars.ext;
+        wlv.char_attr = win_hl_attr(wp, HLF_AT);
+        mb_c = c;
+        mb_utf8 = check_mb_utf8(&c, u8cc);
+      }
     }
 
     // advance to the next 'colorcolumn'
@@ -3079,6 +3085,15 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
       wlv.char_attr = saved_attr2;
     }
 
+    if ((wp->w_p_rl ? (wlv.col < 0) : (wlv.col >= grid->cols)) && has_decor) {
+      // At the end of screen line: might need to peek for decorations just after
+      // this position. Without wrapping, we might need to display win_pos overlays
+      // from the entire text line.
+      colnr_T nextpos = wp->w_p_wrap ? (colnr_T)(ptr - line) : (colnr_T)strlen(line);
+      decor_redraw_col(wp, nextpos, wlv.off, true, &decor_state);
+      handle_inline_virtual_text(wp, &wlv, v);
+    }
+
     // At end of screen line and there is more to come: Display the line
     // so far.  If there is no more to display it is caught above.
     if ((wp->w_p_rl ? (wlv.col < 0) : (wlv.col >= grid->cols))
diff --git a/src/nvim/ex_getln.c b/src/nvim/ex_getln.c
index 08b010c153..09781f392a 100644
--- a/src/nvim/ex_getln.c
+++ b/src/nvim/ex_getln.c
@@ -2410,6 +2410,9 @@ static void cmdpreview_restore_state(CpInfo *cpinfo)
 
     buf->b_changed = cp_bufinfo.save_b_changed;
 
+    // Clear preview highlights.
+    extmark_clear(buf, (uint32_t)cmdpreview_ns, 0, 0, MAXLNUM, MAXCOL);
+
     if (buf->b_u_seq_cur != cp_bufinfo.undo_info.save_b_u_seq_cur) {
       int count = 0;
 
@@ -2439,9 +2442,6 @@ static void cmdpreview_restore_state(CpInfo *cpinfo)
     }
 
     buf->b_p_ul = cp_bufinfo.save_b_p_ul;        // Restore 'undolevels'
-
-    // Clear preview highlights.
-    extmark_clear(buf, (uint32_t)cmdpreview_ns, 0, 0, MAXLNUM, MAXCOL);
   }
 
   for (size_t i = 0; i < cpinfo->win_info.size; i++) {
diff --git a/src/nvim/extmark.c b/src/nvim/extmark.c
index 77e41cb5cf..5140fe199e 100644
--- a/src/nvim/extmark.c
+++ b/src/nvim/extmark.c
@@ -82,7 +82,7 @@ void extmark_set(buf_T *buf, uint32_t ns_id, uint32_t *idp, int row, colnr_T col
     id = ++*ns;
   } else {
     MarkTreeIter itr[1] = { 0 };
-    mtkey_t old_mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
+    MTKey old_mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
     if (old_mark.id) {
       if (decor_state.running_on_lines) {
         if (err) {
@@ -124,8 +124,8 @@ void extmark_set(buf_T *buf, uint32_t ns_id, uint32_t *idp, int row, colnr_T col
     }
   }
 
-  mtkey_t mark = { { row, col }, ns_id, id, 0,
-                   mt_flags(right_gravity, decor_level), 0, NULL };
+  MTKey mark = { { row, col }, ns_id, id, 0,
+                 mt_flags(right_gravity, decor_level), 0, NULL };
   if (decor_full) {
     mark.decor_full = decor;
   } else if (decor) {
@@ -180,7 +180,7 @@ error:
 static bool extmark_setraw(buf_T *buf, uint64_t mark, int row, colnr_T col)
 {
   MarkTreeIter itr[1] = { 0 };
-  mtkey_t key = marktree_lookup(buf->b_marktree, mark, itr);
+  MTKey key = marktree_lookup(buf->b_marktree, mark, itr);
   if (key.pos.row == -1) {
     return false;
   }
@@ -199,14 +199,14 @@ static bool extmark_setraw(buf_T *buf, uint64_t mark, int row, colnr_T col)
 bool extmark_del(buf_T *buf, uint32_t ns_id, uint32_t id)
 {
   MarkTreeIter itr[1] = { 0 };
-  mtkey_t key = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
+  MTKey key = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
   if (!key.id) {
     return false;
   }
   assert(key.pos.row >= 0);
   uint64_t other = marktree_del_itr(buf->b_marktree, itr, false);
 
-  mtkey_t key2 = key;
+  MTKey key2 = key;
 
   if (other) {
     key2 = marktree_lookup(buf->b_marktree, other, itr);
@@ -250,7 +250,7 @@ bool extmark_clear(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_r
   MarkTreeIter itr[1] = { 0 };
   marktree_itr_get(buf->b_marktree, l_row, l_col, itr);
   while (true) {
-    mtkey_t mark = marktree_itr_current(itr);
+    MTKey mark = marktree_itr_current(itr);
     if (mark.pos.row < 0
         || mark.pos.row > u_row
         || (mark.pos.row == u_row && mark.pos.col > u_col)) {
@@ -292,7 +292,7 @@ bool extmark_clear(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_r
   uint64_t id;
   ssize_t decor_id;
   map_foreach(&delete_set, id, decor_id, {
-    mtkey_t mark = marktree_lookup(buf->b_marktree, id, itr);
+    MTKey mark = marktree_lookup(buf->b_marktree, id, itr);
     assert(marktree_itr_valid(itr));
     marktree_del_itr(buf->b_marktree, itr, false);
     if (decor_id >= 0) {
@@ -313,17 +313,31 @@ bool extmark_clear(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_r
 /// dir can be set to control the order of the array
 /// amount = amount of marks to find or -1 for all
 ExtmarkInfoArray extmark_get(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_row,
-                             colnr_T u_col, int64_t amount, bool reverse, bool all_ns,
-                             ExtmarkType type_filter)
+                             colnr_T u_col, int64_t amount, bool reverse, ExtmarkType type_filter,
+                             bool overlap)
 {
   ExtmarkInfoArray array = KV_INITIAL_VALUE;
   MarkTreeIter itr[1];
-  // Find all the marks
-  marktree_itr_get_ext(buf->b_marktree, mtpos_t(l_row, l_col),
-                       itr, reverse, false, NULL);
+
+  if (overlap) {
+    // Find all the marks overlapping the start position
+    if (!marktree_itr_get_overlap(buf->b_marktree, l_row, l_col, itr)) {
+      return array;
+    }
+
+    MTPair pair;
+    while (marktree_itr_step_overlap(buf->b_marktree, itr, &pair)) {
+      push_mark(&array, ns_id, type_filter, pair.start, pair.end_pos, pair.end_right_gravity);
+    }
+  } else {
+    // Find all the marks beginning with the start position
+    marktree_itr_get_ext(buf->b_marktree, MTPos(l_row, l_col),
+                         itr, reverse, false, NULL);
+  }
+
   int order = reverse ? -1 : 1;
   while ((int64_t)kv_size(array) < amount) {
-    mtkey_t mark = marktree_itr_current(itr);
+    MTKey mark = marktree_itr_current(itr);
     if (mark.pos.row < 0
         || (mark.pos.row - u_row) * order > 0
         || (mark.pos.row == u_row && (mark.pos.col - u_col) * order > 0)) {
@@ -333,35 +347,8 @@ ExtmarkInfoArray extmark_get(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_co
       goto next_mark;
     }
 
-    uint16_t type_flags = kExtmarkNone;
-    if (type_filter != kExtmarkNone) {
-      Decoration *decor = mark.decor_full;
-      if (decor && (decor->sign_text || decor->number_hl_id)) {
-        type_flags |= kExtmarkSign;
-      }
-      if (decor && decor->virt_text.size) {
-        type_flags |= kExtmarkVirtText;
-      }
-      if (decor && decor->virt_lines.size) {
-        type_flags |= kExtmarkVirtLines;
-      }
-      if ((decor && (decor->line_hl_id || decor->cursorline_hl_id))
-          || mark.hl_id) {
-        type_flags |= kExtmarkHighlight;
-      }
-    }
-
-    if ((all_ns || mark.ns == ns_id) && type_flags & type_filter) {
-      mtkey_t end = marktree_get_alt(buf->b_marktree, mark, NULL);
-      kv_push(array, ((ExtmarkInfo) { .ns_id = mark.ns,
-                                      .mark_id = mark.id,
-                                      .row = mark.pos.row, .col = mark.pos.col,
-                                      .end_row = end.pos.row,
-                                      .end_col = end.pos.col,
-                                      .right_gravity = mt_right(mark),
-                                      .end_right_gravity = mt_right(end),
-                                      .decor = get_decor(mark) }));
-    }
+    MTKey end = marktree_get_alt(buf->b_marktree, mark, NULL);
+    push_mark(&array, ns_id, type_filter, mark, end.pos, mt_right(end));
 next_mark:
     if (reverse) {
       marktree_itr_prev(buf->b_marktree, itr);
@@ -372,16 +359,54 @@ next_mark:
   return array;
 }
 
+static void push_mark(ExtmarkInfoArray *array, uint32_t ns_id, ExtmarkType type_filter, MTKey mark,
+                      MTPos end_pos, bool end_right)
+{
+  if (!(ns_id == UINT32_MAX || mark.ns == ns_id)) {
+    return;
+  }
+  uint16_t type_flags = kExtmarkNone;
+  if (type_filter != kExtmarkNone) {
+    Decoration *decor = mark.decor_full;
+    if (decor && (decor->sign_text || decor->number_hl_id)) {
+      type_flags |= kExtmarkSign;
+    }
+    if (decor && decor->virt_text.size) {
+      type_flags |= kExtmarkVirtText;
+    }
+    if (decor && decor->virt_lines.size) {
+      type_flags |= kExtmarkVirtLines;
+    }
+    if ((decor && (decor->line_hl_id || decor->cursorline_hl_id))
+        || mark.hl_id) {
+      type_flags |= kExtmarkHighlight;
+    }
+
+    if (!(type_flags & type_filter)) {
+      return;
+    }
+  }
+
+  kv_push(*array, ((ExtmarkInfo) { .ns_id = mark.ns,
+                                   .mark_id = mark.id,
+                                   .row = mark.pos.row, .col = mark.pos.col,
+                                   .end_row = end_pos.row,
+                                   .end_col = end_pos.col,
+                                   .right_gravity = mt_right(mark),
+                                   .end_right_gravity = end_right,
+                                   .decor = get_decor(mark) }));
+}
+
 /// Lookup an extmark by id
 ExtmarkInfo extmark_from_id(buf_T *buf, uint32_t ns_id, uint32_t id)
 {
   ExtmarkInfo ret = { 0, 0, -1, -1, -1, -1, false, false, DECORATION_INIT };
-  mtkey_t mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, NULL);
+  MTKey mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, NULL);
   if (!mark.id) {
     return ret;
   }
   assert(mark.pos.row >= 0);
-  mtkey_t end = marktree_get_alt(buf->b_marktree, mark, NULL);
+  MTKey end = marktree_get_alt(buf->b_marktree, mark, NULL);
 
   ret.ns_id = ns_id;
   ret.mark_id = id;
@@ -406,7 +431,7 @@ void extmark_free_all(buf_T *buf)
   MarkTreeIter itr[1] = { 0 };
   marktree_itr_get(buf->b_marktree, 0, 0, itr);
   while (true) {
-    mtkey_t mark = marktree_itr_current(itr);
+    MTKey mark = marktree_itr_current(itr);
     if (mark.pos.row < 0) {
       break;
     }
@@ -462,7 +487,7 @@ void u_extmark_copy(buf_T *buf, int l_row, colnr_T l_col, int u_row, colnr_T u_c
   MarkTreeIter itr[1] = { 0 };
   marktree_itr_get(buf->b_marktree, (int32_t)l_row, l_col, itr);
   while (true) {
-    mtkey_t mark = marktree_itr_current(itr);
+    MTKey mark = marktree_itr_current(itr);
     if (mark.pos.row < 0
         || mark.pos.row > u_row
         || (mark.pos.row == u_row && mark.pos.col > u_col)) {
diff --git a/src/nvim/marktree.c b/src/nvim/marktree.c
index d07d176b6d..d8b8dbba29 100644
--- a/src/nvim/marktree.c
+++ b/src/nvim/marktree.c
@@ -14,8 +14,6 @@
 // Use marktree_itr_current and marktree_itr_next/prev to read marks in a loop.
 // marktree_del_itr deletes the current mark of the iterator and implicitly
 // moves the iterator to the next mark.
-//
-// Work is ongoing to fully support ranges (mark pairs).
 
 // Copyright notice for kbtree (included in heavily modified form):
 //
@@ -58,19 +56,27 @@
 #include "nvim/memory.h"
 #include "nvim/pos.h"
 
+// only for debug functions
+#include "nvim/api/private/helpers.h"
+
 #define T MT_BRANCH_FACTOR
-#define ILEN (sizeof(mtnode_t) + (2 * T) * sizeof(void *))
+#define ILEN (sizeof(MTNode) + (2 * T) * sizeof(void *))
 
 #define ID_INCR (((uint64_t)1) << 2)
 
-#define rawkey(itr) ((itr)->node->key[(itr)->i])
+#define rawkey(itr) ((itr)->x->key[(itr)->i])
 
-static bool pos_leq(mtpos_t a, mtpos_t b)
+static bool pos_leq(MTPos a, MTPos b)
 {
   return a.row < b.row || (a.row == b.row && a.col <= b.col);
 }
 
-static void relative(mtpos_t base, mtpos_t *val)
+static bool pos_less(MTPos a, MTPos b)
+{
+  return !pos_leq(b, a);
+}
+
+static void relative(MTPos base, MTPos *val)
 {
   assert(pos_leq(base, *val));
   if (val->row == base.row) {
@@ -81,7 +87,7 @@ static void relative(mtpos_t base, mtpos_t *val)
   }
 }
 
-static void unrelative(mtpos_t base, mtpos_t *val)
+static void unrelative(MTPos base, MTPos *val)
 {
   if (val->row == 0) {
     val->row = base.row;
@@ -91,7 +97,7 @@ static void unrelative(mtpos_t base, mtpos_t *val)
   }
 }
 
-static void compose(mtpos_t *base, mtpos_t val)
+static void compose(MTPos *base, MTPos val)
 {
   if (val.row == 0) {
     base->col += val.col;
@@ -101,12 +107,21 @@ static void compose(mtpos_t *base, mtpos_t val)
   }
 }
 
+// Used by `marktree_splice`. Need to keep track of marks which moved
+// in order to repair intersections.
+typedef struct {
+  uint64_t id;
+  MTNode *old, *new;
+  int old_i, new_i;
+} Damage;
+typedef kvec_withinit_t(Damage, 8) DamageList;
+
 #ifdef INCLUDE_GENERATED_DECLARATIONS
 # include "marktree.c.generated.h"
 #endif
 
 #define mt_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
-static int key_cmp(mtkey_t a, mtkey_t b)
+static int key_cmp(MTKey a, MTKey b)
 {
   int cmp = mt_generic_cmp(a.pos.row, b.pos.row);
   if (cmp != 0) {
@@ -116,18 +131,25 @@ static int key_cmp(mtkey_t a, mtkey_t b)
   if (cmp != 0) {
     return cmp;
   }
-  // NB: keeping the events at the same pos sorted by id is actually not
-  // necessary only make sure that START is before END etc.
-  return mt_generic_cmp(a.flags, b.flags);
+
+  // TODO(bfredl): MT_FLAG_REAL could go away if we fix marktree_getp_aux for real
+  const uint16_t cmp_mask = MT_FLAG_RIGHT_GRAVITY | MT_FLAG_END | MT_FLAG_REAL | MT_FLAG_LAST;
+  return mt_generic_cmp(a.flags & cmp_mask, b.flags & cmp_mask);
 }
 
-static inline int marktree_getp_aux(const mtnode_t *x, mtkey_t k, int *r)
+/// @return position of k if it exists in the node, otherwise the position
+/// it should be inserted, which ranges from 0 to x->n _inclusively_
+/// @param match (optional) set to TRUE if match (pos, gravity) was found
+static inline int marktree_getp_aux(const MTNode *x, MTKey k, bool *match)
 {
-  int tr, *rr, begin = 0, end = x->n;
+  bool dummy_match;
+  bool *m = match ? match : &dummy_match;
+
+  int begin = 0, end = x->n;
   if (x->n == 0) {
+    *m = false;
     return -1;
   }
-  rr = r ? r : &tr;
   while (begin < end) {
     int mid = (begin + end) >> 1;
     if (key_cmp(x->key[mid], k) < 0) {
@@ -137,47 +159,84 @@ static inline int marktree_getp_aux(const mtnode_t *x, mtkey_t k, int *r)
     }
   }
   if (begin == x->n) {
-    *rr = 1; return x->n - 1;
+    *m = false;
+    return x->n - 1;
   }
-  if ((*rr = key_cmp(k, x->key[begin])) < 0) {
+  if (!(*m = (key_cmp(k, x->key[begin]) == 0))) {
     begin--;
   }
   return begin;
 }
 
-static inline void refkey(MarkTree *b, mtnode_t *x, int i)
+static inline void refkey(MarkTree *b, MTNode *x, int i)
 {
   pmap_put(uint64_t)(b->id2node, mt_lookup_key(x->key[i]), x);
 }
 
+static MTNode *id2node(MarkTree *b, uint64_t id)
+{
+  return pmap_get(uint64_t)(b->id2node, id);
+}
+
 // put functions
 
 // x must be an internal node, which is not full
 // x->ptr[i] should be a full node, i e x->ptr[i]->n == 2*T-1
-static inline void split_node(MarkTree *b, mtnode_t *x, const int i)
+static inline void split_node(MarkTree *b, MTNode *x, const int i, MTKey next)
 {
-  mtnode_t *y = x->ptr[i];
-  mtnode_t *z;
-  z = (mtnode_t *)xcalloc(1, y->level ? ILEN : sizeof(mtnode_t));
-  b->n_nodes++;
+  MTNode *y = x->ptr[i];
+  MTNode *z = marktree_alloc_node(b, y->level);
   z->level = y->level;
   z->n = T - 1;
-  memcpy(z->key, &y->key[T], sizeof(mtkey_t) * (T - 1));
+
+  // tricky: we might split a node in between inserting the start node and the end
+  // node of the same pair. Then we must not intersect this id yet (done later
+  // in marktree_intersect_pair).
+  uint64_t last_start = mt_end(next) ? mt_lookup_id(next.ns, next.id, false) : MARKTREE_END_FLAG;
+
+  // no alloc in the common case (less than 4 intersects)
+  kvi_copy(z->intersect, y->intersect);
+
+  if (!y->level) {
+    uint64_t pi = pseudo_index(y, 0);  // note: sloppy pseudo-index
+    for (int j = 0; j < T; j++) {
+      MTKey k = y->key[j];
+      uint64_t pi_end = pseudo_index_for_id(b, mt_lookup_id(k.ns, k.id, true), true);
+      if (mt_start(k) && pi_end > pi && mt_lookup_key(k) != last_start) {
+        intersect_node(b, z, mt_lookup_id(k.ns, k.id, false));
+      }
+    }
+
+    // note: y->key[T-1] is moved up and thus checked for both
+    for (int j = T - 1; j < (T * 2) - 1; j++) {
+      MTKey k = y->key[j];
+      uint64_t pi_start = pseudo_index_for_id(b, mt_lookup_id(k.ns, k.id, false), true);
+      if (mt_end(k) && pi_start > 0 && pi_start < pi) {
+        intersect_node(b, y, mt_lookup_id(k.ns, k.id, false));
+      }
+    }
+  }
+
+  memcpy(z->key, &y->key[T], sizeof(MTKey) * (T - 1));
   for (int j = 0; j < T - 1; j++) {
     refkey(b, z, j);
   }
   if (y->level) {
-    memcpy(z->ptr, &y->ptr[T], sizeof(mtnode_t *) * T);
+    memcpy(z->ptr, &y->ptr[T], sizeof(MTNode *) * T);
     for (int j = 0; j < T; j++) {
       z->ptr[j]->parent = z;
+      z->ptr[j]->p_idx = (int16_t)j;
     }
   }
   y->n = T - 1;
   memmove(&x->ptr[i + 2], &x->ptr[i + 1],
-          sizeof(mtnode_t *) * (size_t)(x->n - i));
+          sizeof(MTNode *) * (size_t)(x->n - i));
   x->ptr[i + 1] = z;
   z->parent = x;  // == y->parent
-  memmove(&x->key[i + 1], &x->key[i], sizeof(mtkey_t) * (size_t)(x->n - i));
+  for (int j = i + 1; j < x->n + 2; j++) {
+    x->ptr[j]->p_idx = (int16_t)j;
+  }
+  memmove(&x->key[i + 1], &x->key[i], sizeof(MTKey) * (size_t)(x->n - i));
 
   // move key to internal layer:
   x->key[i] = y->key[T - 1];
@@ -190,25 +249,32 @@ static inline void split_node(MarkTree *b, mtnode_t *x, const int i)
   if (i > 0) {
     unrelative(x->key[i - 1].pos, &x->key[i].pos);
   }
+
+  if (y->level) {
+    bubble_up(y);
+    bubble_up(z);
+  } else {
+    // code above goose here
+  }
 }
 
 // x must not be a full node (even if there might be internal space)
-static inline void marktree_putp_aux(MarkTree *b, mtnode_t *x, mtkey_t k)
+static inline void marktree_putp_aux(MarkTree *b, MTNode *x, MTKey k)
 {
-  int i;
+  // TODO(bfredl): ugh, make sure this is the _last_ valid (pos, gravity) position,
+  // to minimize movement
+  int i = marktree_getp_aux(x, k, NULL) + 1;
   if (x->level == 0) {
-    i = marktree_getp_aux(x, k, 0);
-    if (i != x->n - 1) {
-      memmove(&x->key[i + 2], &x->key[i + 1],
-              (size_t)(x->n - i - 1) * sizeof(mtkey_t));
+    if (i != x->n) {
+      memmove(&x->key[i + 1], &x->key[i],
+              (size_t)(x->n - i) * sizeof(MTKey));
     }
-    x->key[i + 1] = k;
-    refkey(b, x, i + 1);
+    x->key[i] = k;
+    refkey(b, x, i);
     x->n++;
   } else {
-    i = marktree_getp_aux(x, k, 0) + 1;
     if (x->ptr[i]->n == 2 * T - 1) {
-      split_node(b, x, i);
+      split_node(b, x, i, k);
       if (key_cmp(k, x->key[i]) > 0) {
         i++;
       }
@@ -220,7 +286,7 @@ static inline void marktree_putp_aux(MarkTree *b, mtnode_t *x, mtkey_t k)
   }
 }
 
-void marktree_put(MarkTree *b, mtkey_t key, int end_row, int end_col, bool end_right)
+void marktree_put(MarkTree *b, MTKey key, int end_row, int end_col, bool end_right)
 {
   assert(!(key.flags & ~MT_FLAG_EXTERNAL_MASK));
   if (end_row >= 0) {
@@ -230,32 +296,151 @@ void marktree_put(MarkTree *b, mtkey_t key, int end_row, int end_col, bool end_r
   marktree_put_key(b, key);
 
   if (end_row >= 0) {
-    mtkey_t end_key = key;
+    MTKey end_key = key;
     end_key.flags = (uint16_t)((uint16_t)(key.flags & ~MT_FLAG_RIGHT_GRAVITY)
                                |(uint16_t)MT_FLAG_END
                                |(uint16_t)(end_right ? MT_FLAG_RIGHT_GRAVITY : 0));
-    end_key.pos = (mtpos_t){ end_row, end_col };
+    end_key.pos = (MTPos){ end_row, end_col };
     marktree_put_key(b, end_key);
+    MarkTreeIter itr[1] = { 0 }, end_itr[1] = { 0 };
+    marktree_lookup(b, mt_lookup_key(key), itr);
+    marktree_lookup(b, mt_lookup_key(end_key), end_itr);
+
+    marktree_intersect_pair(b, mt_lookup_key(key), itr, end_itr, false);
   }
 }
 
-void marktree_put_key(MarkTree *b, mtkey_t k)
+// this is currently not used very often, but if it was it should use binary search
+static bool intersection_has(Intersection *x, uint64_t id)
+{
+  for (size_t i = 0; i < kv_size(*x); i++) {
+    if (kv_A(*x, i) == id) {
+      return true;
+    } else if (kv_A(*x, i) >= id) {
+      return false;
+    }
+  }
+  return false;
+}
+
+static void intersect_node(MarkTree *b, MTNode *x, uint64_t id)
+{
+  assert(!(id & MARKTREE_END_FLAG));
+  kvi_pushp(x->intersect);
+  // optimized for the common case: new key is always in the end
+  for (ssize_t i = (ssize_t)kv_size(x->intersect) - 1; i >= 0; i--) {
+    if (i > 0 && kv_A(x->intersect, i - 1) > id) {
+      kv_A(x->intersect, i) = kv_A(x->intersect, i - 1);
+    } else {
+      kv_A(x->intersect, i) = id;
+      break;
+    }
+  }
+}
+
+static void unintersect_node(MarkTree *b, MTNode *x, uint64_t id, bool strict)
+{
+  assert(!(id & MARKTREE_END_FLAG));
+  bool seen = false;
+  size_t i;
+  for (i = 0; i < kv_size(x->intersect); i++) {
+    if (kv_A(x->intersect, i) < id) {
+      continue;
+    } else if (kv_A(x->intersect, i) == id) {
+      seen = true;
+      break;
+    } else {  // (kv_A(x->intersect, i) > id)
+      break;
+    }
+  }
+  if (strict) {
+    assert(seen);
+  }
+
+  if (seen) {
+    if (i < kv_size(x->intersect) - 1) {
+      memmove(&kv_A(x->intersect, i), &kv_A(x->intersect, i + 1), (kv_size(x->intersect) - i - 1) *
+              sizeof(kv_A(x->intersect, i)));
+    }
+    kv_size(x->intersect)--;
+  }
+}
+
+/// @param itr mutated
+/// @param end_itr not mutated
+void marktree_intersect_pair(MarkTree *b, uint64_t id, MarkTreeIter *itr, MarkTreeIter *end_itr,
+                             bool delete)
+{
+  int lvl = 0, maxlvl = MIN(itr->lvl, end_itr->lvl);
+#define iat(itr, l, q) ((l == itr->lvl) ? itr->i + q : itr->s[l].i)
+  for (; lvl < maxlvl; lvl++) {
+    if (itr->s[lvl].i > end_itr->s[lvl].i) {
+      return;  // empty range
+    } else if (itr->s[lvl].i < end_itr->s[lvl].i) {
+      break;  // work to do
+    }
+  }
+  if (lvl == maxlvl && iat(itr, lvl, 1) > iat(end_itr, lvl, 0)) {
+    return;  // empty range
+  }
+
+  while (itr->x) {
+    bool skip = false;
+    if (itr->x == end_itr->x) {
+      if (itr->x->level == 0 || itr->i >= end_itr->i) {
+        break;
+      } else {
+        skip = true;
+      }
+    } else if (itr->lvl > lvl) {
+      skip = true;
+    } else {
+      if (iat(itr, lvl, 1) < iat(end_itr, lvl, 1)) {
+        skip = true;
+      } else {
+        lvl++;
+      }
+    }
+
+    if (skip) {
+      if (itr->x->level) {
+        MTNode *x = itr->x->ptr[itr->i + 1];
+        if (delete) {
+          unintersect_node(b, x, id, true);
+        } else {
+          intersect_node(b, x, id);
+        }
+      }
+    }
+    marktree_itr_next_skip(b, itr, skip, true, NULL);
+  }
+#undef iat
+}
+
+static MTNode *marktree_alloc_node(MarkTree *b, bool internal)
+{
+  MTNode *x = xcalloc(1, internal ? ILEN : sizeof(MTNode));
+  kvi_init(x->intersect);
+  b->n_nodes++;
+  return x;
+}
+
+void marktree_put_key(MarkTree *b, MTKey k)
 {
   k.flags |= MT_FLAG_REAL;  // let's be real.
   if (!b->root) {
-    b->root = (mtnode_t *)xcalloc(1, ILEN);
-    b->n_nodes++;
+    b->root = marktree_alloc_node(b, true);
   }
-  mtnode_t *r, *s;
+  MTNode *r, *s;
   b->n_keys++;
   r = b->root;
   if (r->n == 2 * T - 1) {
-    b->n_nodes++;
-    s = (mtnode_t *)xcalloc(1, ILEN);
+    s = marktree_alloc_node(b, true);
     b->root = s; s->level = r->level + 1; s->n = 0;
     s->ptr[0] = r;
     r->parent = s;
-    split_node(b, s, 0);
+    r->p_idx = 0;
+    split_node(b, s, 0, k);
     r = s;
   }
   marktree_putp_aux(b, r, k);
@@ -289,22 +474,31 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
 {
   int adjustment = 0;
 
-  mtnode_t *cur = itr->node;
+  MTNode *cur = itr->x;
   int curi = itr->i;
   uint64_t id = mt_lookup_key(cur->key[curi]);
-  // fprintf(stderr, "\nDELET %lu\n", id);
 
-  mtkey_t raw = rawkey(itr);
+  MTKey raw = rawkey(itr);
   uint64_t other = 0;
-  if (mt_paired(raw)) {
-    other = mt_lookup_id(raw.ns, raw.id, !mt_end(raw));
+  if (mt_paired(raw) && !(raw.flags & MT_FLAG_ORPHANED)) {
+    other = mt_lookup_key_side(raw, !mt_end(raw));
+
+    MarkTreeIter other_itr[1];
+    marktree_lookup(b, other, other_itr);
+    rawkey(other_itr).flags |= MT_FLAG_ORPHANED;
+    // Remove intersect markers. NB: must match exactly!
+    if (mt_start(raw)) {
+      MarkTreeIter this_itr[1] = { *itr };  // mutated copy
+      marktree_intersect_pair(b, id, this_itr, other_itr, true);
+    } else {
+      marktree_intersect_pair(b, other, other_itr, itr, true);
+    }
   }
 
-  if (itr->node->level) {
+  if (itr->x->level) {
     if (rev) {
       abort();
     } else {
-      // fprintf(stderr, "INTERNAL %d\n", cur->level);
       // steal previous node
       marktree_itr_prev(b, itr);
       adjustment = -1;
@@ -312,41 +506,72 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
   }
 
   // 3.
-  mtnode_t *x = itr->node;
+  MTNode *x = itr->x;
   assert(x->level == 0);
-  mtkey_t intkey = x->key[itr->i];
+  MTKey intkey = x->key[itr->i];
   if (x->n > itr->i + 1) {
     memmove(&x->key[itr->i], &x->key[itr->i + 1],
-            sizeof(mtkey_t) * (size_t)(x->n - itr->i - 1));
+            sizeof(MTKey) * (size_t)(x->n - itr->i - 1));
   }
   x->n--;
 
+  b->n_keys--;
+  pmap_del(uint64_t)(b->id2node, id, NULL);
+
   // 4.
   // if (adjustment == 1) {
   //   abort();
   // }
   if (adjustment == -1) {
     int ilvl = itr->lvl - 1;
-    const mtnode_t *lnode = x;
+    MTNode *lnode = x;
+    uint64_t start_id = 0;
+    bool did_bubble = false;
+    if (mt_end(intkey)) {
+      start_id = mt_lookup_key_side(intkey, false);
+    }
     do {
-      const mtnode_t *const p = lnode->parent;
+      MTNode *p = lnode->parent;
       if (ilvl < 0) {
         abort();
       }
-      const int i = itr->s[ilvl].i;
+      int i = itr->s[ilvl].i;
       assert(p->ptr[i] == lnode);
       if (i > 0) {
         unrelative(p->key[i - 1].pos, &intkey.pos);
       }
+
+      if (p != cur && start_id) {
+        if (intersection_has(&p->ptr[0]->intersect, start_id)) {
+          // if not the first time, we need to undo the addition in the
+          // previous step (`intersect_node` just below)
+          int last = (lnode != x) ? 1 : 0;
+          for (int k = 0; k < p->n + last; k++) {  // one less as p->ptr[n] is the last
+            unintersect_node(b, p->ptr[k], start_id, true);
+          }
+          intersect_node(b, p, start_id);
+          did_bubble = true;
+        }
+      }
+
       lnode = p;
       ilvl--;
     } while (lnode != cur);
 
-    mtkey_t deleted = cur->key[curi];
+    MTKey deleted = cur->key[curi];
     cur->key[curi] = intkey;
     refkey(b, cur, curi);
+    // if `did_bubble` then we already added `start_id` to some parent
+    if (mt_end(cur->key[curi]) && !did_bubble) {
+      uint64_t pi = pseudo_index(x, 0);  // note: sloppy pseudo-index
+      uint64_t pi_start = pseudo_index_for_id(b, start_id, true);
+      if (pi_start > 0 && pi_start < pi) {
+        intersect_node(b, x, start_id);
+      }
+    }
+
     relative(intkey.pos, &deleted.pos);
-    mtnode_t *y = cur->ptr[curi + 1];
+    MTNode *y = cur->ptr[curi + 1];
     if (deleted.pos.row || deleted.pos.col) {
       while (y) {
         for (int k = 0; k < y->n; k++) {
@@ -358,46 +583,48 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
     itr->i--;
   }
 
-  b->n_keys--;
-  pmap_del(uint64_t)(b->id2node, id, NULL);
-
   // 5.
   bool itr_dirty = false;
   int rlvl = itr->lvl - 1;
   int *lasti = &itr->i;
+  MTPos ppos = itr->pos;
   while (x != b->root) {
     assert(rlvl >= 0);
-    mtnode_t *p = x->parent;
+    MTNode *p = x->parent;
     if (x->n >= T - 1) {
       // we are done, if this node is fine the rest of the tree will be
       break;
     }
     int pi = itr->s[rlvl].i;
     assert(p->ptr[pi] == x);
+    if (pi > 0) {
+      ppos.row -= p->key[pi - 1].pos.row;
+      ppos.col = itr->s[rlvl].oldcol;
+    }
+    // ppos is now the pos of p
+
     if (pi > 0 && p->ptr[pi - 1]->n > T - 1) {
       *lasti += 1;
       itr_dirty = true;
       // steal one key from the left neighbour
-      pivot_right(b, p, pi - 1);
+      pivot_right(b, ppos, p, pi - 1);
       break;
     } else if (pi < p->n && p->ptr[pi + 1]->n > T - 1) {
       // steal one key from right neighbour
-      pivot_left(b, p, pi);
+      pivot_left(b, ppos, p, pi);
       break;
     } else if (pi > 0) {
-      // fprintf(stderr, "LEFT ");
       assert(p->ptr[pi - 1]->n == T - 1);
       // merge with left neighbour
       *lasti += T;
       x = merge_node(b, p, pi - 1);
       if (lasti == &itr->i) {
         // TRICKY: we merged the node the iterator was on
-        itr->node = x;
+        itr->x = x;
       }
       itr->s[rlvl].i--;
       itr_dirty = true;
     } else {
-      // fprintf(stderr, "RIGHT ");
       assert(pi < p->n && p->ptr[pi + 1]->n == T - 1);
       merge_node(b, p, pi);
       // no iter adjustment needed
@@ -414,18 +641,18 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
       itr->lvl--;
     }
     if (b->root->level) {
-      mtnode_t *oldroot = b->root;
+      MTNode *oldroot = b->root;
       b->root = b->root->ptr[0];
       b->root->parent = NULL;
-      xfree(oldroot);
+      marktree_free_node(b, oldroot);
     } else {
       // no items, nothing for iterator to point to
       // not strictly needed, should handle delete right-most mark anyway
-      itr->node = NULL;
+      itr->x = NULL;
     }
   }
 
-  if (itr->node && itr_dirty) {
+  if (itr->x && itr_dirty) {
     marktree_itr_fix_pos(b, itr);
   }
 
@@ -441,10 +668,10 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
     marktree_itr_next(b, itr);
     marktree_itr_next(b, itr);
   } else {
-    if (itr->node && itr->i >= itr->node->n) {
+    if (itr->x && itr->i >= itr->x->n) {
       // we deleted the last key of a leaf node
       // go to the inner key after that.
-      assert(itr->node->level == 0);
+      assert(itr->x->level == 0);
       marktree_itr_next(b, itr);
     }
   }
@@ -452,9 +679,229 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
   return other;
 }
 
-static mtnode_t *merge_node(MarkTree *b, mtnode_t *p, int i)
+/// similar to intersect_common but modify x and y in place to retain
+/// only the items which are NOT in common
+static void intersect_merge(Intersection *restrict m, Intersection *restrict x,
+                            Intersection *restrict y)
 {
-  mtnode_t *x = p->ptr[i], *y = p->ptr[i + 1];
+  size_t xi = 0, yi = 0;
+  size_t xn = 0, yn = 0;
+  while (xi < kv_size(*x) && yi < kv_size(*y)) {
+    if (kv_A(*x, xi) == kv_A(*y, yi)) {
+      // TODO(bfredl): kvi_pushp is actually quite complex, break out kvi_resize() to a function?
+      kvi_push(*m, kv_A(*x, xi));
+      xi++;
+      yi++;
+    } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
+      kv_A(*x, xn++) = kv_A(*x, xi++);
+    } else {
+      kv_A(*y, yn++) = kv_A(*y, yi++);
+    }
+  }
+
+  if (xi < kv_size(*x)) {
+    memmove(&kv_A(*x, xn), &kv_A(*x, xi), sizeof(kv_A(*x, xn)) * (kv_size(*x) - xi));
+    xn += kv_size(*x) - xi;
+  }
+  if (yi < kv_size(*y)) {
+    memmove(&kv_A(*y, yn), &kv_A(*y, yi), sizeof(kv_A(*y, yn)) * (kv_size(*y) - yi));
+    yn += kv_size(*y) - yi;
+  }
+
+  kv_size(*x) = xn;
+  kv_size(*y) = yn;
+}
+
+// w used to be a child of x but it is now a child of y, adjust intersections accordingly
+// @param[out] d are intersections which should be added to the old children of y
+static void intersect_mov(Intersection *restrict x, Intersection *restrict y,
+                          Intersection *restrict w, Intersection *restrict d)
+{
+  size_t wi = 0, yi = 0;
+  size_t wn = 0, yn = 0;
+  size_t xi = 0;
+  while (wi < kv_size(*w) || xi < kv_size(*x)) {
+    if (wi < kv_size(*w) && (xi >= kv_size(*x) || kv_A(*x, xi) >= kv_A(*w, wi))) {
+      if (xi < kv_size(*x) && kv_A(*x, xi) == kv_A(*w, wi)) {
+        xi++;
+      }
+      // now w < x strictly
+      while (yi < kv_size(*y) && kv_A(*y, yi) < kv_A(*w, wi)) {
+        kvi_push(*d, kv_A(*y, yi));
+        yi++;
+      }
+      if (yi < kv_size(*y) && kv_A(*y, yi) == kv_A(*w, wi)) {
+        kv_A(*y, yn++) = kv_A(*y, yi++);
+        wi++;
+      } else {
+        kv_A(*w, wn++) = kv_A(*w, wi++);
+      }
+    } else {
+      // x < w strictly
+      while (yi < kv_size(*y) && kv_A(*y, yi) < kv_A(*x, xi)) {
+        kvi_push(*d, kv_A(*y, yi));
+        yi++;
+      }
+      if (yi < kv_size(*y) && kv_A(*y, yi) == kv_A(*x, xi)) {
+        kv_A(*y, yn++) = kv_A(*y, yi++);
+        xi++;
+      } else {
+        // add kv_A(x, xi) at kv_A(w, wn), pushing up wi if wi == wn
+        if (wi == wn) {
+          size_t n = kv_size(*w) - wn;
+          kvi_pushp(*w);
+          if (n > 0) {
+            memmove(&kv_A(*w, wn + 1), &kv_A(*w, wn), n * sizeof(kv_A(*w, 0)));
+          }
+          kv_A(*w, wi) = kv_A(*x, xi);
+          wn++;
+          wi++;  // no need to consider the added element again
+        } else {
+          assert(wn < wi);
+          kv_A(*w, wn++) = kv_A(*x, xi);
+        }
+        xi++;
+      }
+    }
+  }
+  if (yi < kv_size(*y)) {
+    // move remaining items to d
+    size_t n = kv_size(*y) - yi;  // at least one
+    kvi_ensure_more_space(*d, n);
+    memcpy(&kv_A(*d, kv_size(*d)), &kv_A(*y, yi), n * sizeof(kv_A(*d, 0)));
+    kv_size(*d) += n;
+  }
+  kv_size(*w) = wn;
+  kv_size(*y) = yn;
+}
+
+bool intersect_mov_test(uint64_t *x, size_t nx, uint64_t *y, size_t ny, uint64_t *win, size_t nwin,
+                        uint64_t *wout, size_t *nwout, uint64_t *dout, size_t *ndout)
+{
+  // x is immutable in the context of intersect_mov. y might shrink, but we
+  // don't care about it (we get it the deleted ones in d)
+  Intersection xi = { .items = x, .size = nx };
+  Intersection yi = { .items = y, .size = ny };
+
+  Intersection w;
+  kvi_init(w);
+  for (size_t i = 0; i < nwin; i++) {
+    kvi_push(w, win[i]);
+  }
+  Intersection d;
+  kvi_init(d);
+
+  intersect_mov(&xi, &yi, &w, &d);
+
+  if (w.size > *nwout || d.size > *ndout) {
+    return false;
+  }
+
+  memcpy(wout, w.items, sizeof(w.items[0]) * w.size);
+  *nwout = w.size;
+
+  memcpy(dout, d.items, sizeof(d.items[0]) * d.size);
+  *ndout = d.size;
+
+  return true;
+}
+
+/// intersection: i = x & y
+static void intersect_common(Intersection *i, Intersection *x, Intersection *y)
+{
+  size_t xi = 0, yi = 0;
+  while (xi < kv_size(*x) && yi < kv_size(*y)) {
+    if (kv_A(*x, xi) == kv_A(*y, yi)) {
+      kvi_push(*i, kv_A(*x, xi));
+      xi++;
+      yi++;
+    } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
+      xi++;
+    } else {
+      yi++;
+    }
+  }
+}
+
+// inplace union: x |= y
+static void intersect_add(Intersection *x, Intersection *y)
+{
+  size_t xi = 0, yi = 0;
+  while (xi < kv_size(*x) && yi < kv_size(*y)) {
+    if (kv_A(*x, xi) == kv_A(*y, yi)) {
+      xi++;
+      yi++;
+    } else if (kv_A(*y, yi) < kv_A(*x, xi)) {
+      size_t n = kv_size(*x) - xi;  // at least one
+      kvi_pushp(*x);
+      memmove(&kv_A(*x, xi + 1), &kv_A(*x, xi), n * sizeof(kv_A(*x, 0)));
+      kv_A(*x, xi) = kv_A(*y, yi);
+      xi++;  // newly added element
+      yi++;
+    } else {
+      xi++;
+    }
+  }
+  if (yi < kv_size(*y)) {
+    size_t n = kv_size(*y) - yi;  // at least one
+    kvi_ensure_more_space(*x, n);
+    memcpy(&kv_A(*x, kv_size(*x)), &kv_A(*y, yi), n * sizeof(kv_A(*x, 0)));
+    kv_size(*x) += n;
+  }
+}
+
+// inplace assymetric difference: x &= ~y
+static void intersect_sub(Intersection *restrict x, Intersection *restrict y)
+{
+  size_t xi = 0, yi = 0;
+  size_t xn = 0;
+  while (xi < kv_size(*x) && yi < kv_size(*y)) {
+    if (kv_A(*x, xi) == kv_A(*y, yi)) {
+      xi++;
+      yi++;
+    } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
+      kv_A(*x, xn++) = kv_A(*x, xi++);
+    } else {
+      yi++;
+    }
+  }
+  if (xi < kv_size(*x)) {
+    size_t n = kv_size(*x) - xi;
+    if (xn < xi) {  // otherwise xn == xi
+      memmove(&kv_A(*x, xn), &kv_A(*x, xi), n * sizeof(kv_A(*x, 0)));
+    }
+    xn += n;
+  }
+  kv_size(*x) = xn;
+}
+
+/// x is a node which shrunk, or the half of a split
+///
+/// this means that intervals which previously intersected all the (current)
+/// child nodes, now instead intersects `x` itself.
+static void bubble_up(MTNode *x)
+{
+  Intersection xi;
+  kvi_init(xi);
+  // due to invariants, the largest subset of _all_ subnodes is the intersection
+  // between the first and the last
+  intersect_common(&xi, &x->ptr[0]->intersect, &x->ptr[x->n]->intersect);
+  if (kv_size(xi)) {
+    for (int i = 0; i < x->n + 1; i++) {
+      intersect_sub(&x->ptr[i]->intersect, &xi);
+    }
+    intersect_add(&x->intersect, &xi);
+  }
+  kvi_destroy(xi);
+}
+
+static MTNode *merge_node(MarkTree *b, MTNode *p, int i)
+{
+  MTNode *x = p->ptr[i], *y = p->ptr[i + 1];
+  Intersection m;
+  kvi_init(m);
+
+  intersect_merge(&m, &x->intersect, &y->intersect);
 
   x->key[x->n] = p->key[i];
   refkey(b, x, x->n);
@@ -462,35 +909,78 @@ static mtnode_t *merge_node(MarkTree *b, mtnode_t *p, int i)
     relative(p->key[i - 1].pos, &x->key[x->n].pos);
   }
 
-  memmove(&x->key[x->n + 1], y->key, (size_t)y->n * sizeof(mtkey_t));
+  memmove(&x->key[x->n + 1], y->key, (size_t)y->n * sizeof(MTKey));
   for (int k = 0; k < y->n; k++) {
     refkey(b, x, x->n + 1 + k);
     unrelative(x->key[x->n].pos, &x->key[x->n + 1 + k].pos);
   }
   if (x->level) {
-    memmove(&x->ptr[x->n + 1], y->ptr, ((size_t)y->n + 1) * sizeof(mtnode_t *));
-    for (int k = 0; k < y->n + 1; k++) {
-      x->ptr[x->n + k + 1]->parent = x;
+    // bubble down: ranges that intersected old-x but not old-y or vice versa
+    // must be moved to their respective children
+    memmove(&x->ptr[x->n + 1], y->ptr, ((size_t)y->n + 1) * sizeof(MTNode *));
+    for (int k = 0; k < x->n + 1; k++) {
+      // TODO(bfredl): dedicated impl for "Z |= Y"
+      for (size_t idx = 0; idx < kv_size(x->intersect); idx++) {
+        intersect_node(b, x->ptr[k], kv_A(x->intersect, idx));
+      }
+    }
+    for (int ky = 0; ky < y->n + 1; ky++) {
+      int k = x->n + ky + 1;
+      // nodes that used to be in y, now the second half of x
+      x->ptr[k]->parent = x;
+      x->ptr[k]->p_idx = (int16_t)k;
+      // TODO(bfredl): dedicated impl for "Z |= X"
+      for (size_t idx = 0; idx < kv_size(y->intersect); idx++) {
+        intersect_node(b, x->ptr[k], kv_A(y->intersect, idx));
+      }
     }
   }
   x->n += y->n + 1;
-  memmove(&p->key[i], &p->key[i + 1], (size_t)(p->n - i - 1) * sizeof(mtkey_t));
+  memmove(&p->key[i], &p->key[i + 1], (size_t)(p->n - i - 1) * sizeof(MTKey));
   memmove(&p->ptr[i + 1], &p->ptr[i + 2],
-          (size_t)(p->n - i - 1) * sizeof(mtkey_t *));
+          (size_t)(p->n - i - 1) * sizeof(MTKey *));
+  for (int j = i + 1; j < p->n; j++) {  // note: one has been deleted
+    p->ptr[j]->p_idx = (int16_t)j;
+  }
   p->n--;
-  xfree(y);
-  b->n_nodes--;
+  marktree_free_node(b, y);
+
+  kvi_destroy(x->intersect);
+
+  // move of a kvec_withinit_t, messy!
+  // TODO(bfredl): special case version of intersect_merge(x_out, x_in_m_out, y) to avoid this
+  kvi_move(&x->intersect, &m);
+
   return x;
 }
 
+/// @param dest is overwritten (assumed to already been freed/moved)
+/// @param src consumed (don't free or use)
+void kvi_move(Intersection *dest, Intersection *src)
+{
+  dest->size = src->size;
+  dest->capacity = src->capacity;
+  if (src->items == src->init_array) {
+    memcpy(dest->init_array, src->init_array, src->size * sizeof(*src->init_array));
+    dest->items = dest->init_array;
+  } else {
+    dest->items = src->items;
+  }
+}
+
 // TODO(bfredl): as a potential "micro" optimization, pivoting should balance
 // the two nodes instead of stealing just one key
-static void pivot_right(MarkTree *b, mtnode_t *p, int i)
+// x_pos is the absolute position of the key just before x (or a dummy key strictly less than any
+// key inside x, if x is the first leaf)
+static void pivot_right(MarkTree *b, MTPos p_pos, MTNode *p, const int i)
 {
-  mtnode_t *x = p->ptr[i], *y = p->ptr[i + 1];
-  memmove(&y->key[1], y->key, (size_t)y->n * sizeof(mtkey_t));
+  MTNode *x = p->ptr[i], *y = p->ptr[i + 1];
+  memmove(&y->key[1], y->key, (size_t)y->n * sizeof(MTKey));
   if (y->level) {
-    memmove(&y->ptr[1], y->ptr, ((size_t)y->n + 1) * sizeof(mtnode_t *));
+    memmove(&y->ptr[1], y->ptr, ((size_t)y->n + 1) * sizeof(MTNode *));
+    for (int j = 1; j < y->n + 2; j++) {
+      y->ptr[j]->p_idx = (int16_t)j;
+    }
   }
   y->key[0] = p->key[i];
   refkey(b, y, 0);
@@ -499,6 +989,7 @@ static void pivot_right(MarkTree *b, mtnode_t *p, int i)
   if (x->level) {
     y->ptr[0] = x->ptr[x->n];
     y->ptr[0]->parent = y;
+    y->ptr[0]->p_idx = 0;
   }
   x->n--;
   y->n++;
@@ -509,11 +1000,46 @@ static void pivot_right(MarkTree *b, mtnode_t *p, int i)
   for (int k = 1; k < y->n; k++) {
     unrelative(y->key[0].pos, &y->key[k].pos);
   }
+
+  // repair intersections of x
+  if (x->level) {
+    // handle y and first new y->ptr[0]
+    Intersection d;
+    kvi_init(d);
+    // y->ptr[0] was moved from x to y
+    // adjust y->ptr[0] for a difference between the parents
+    // in addition, this might cause some intersection of the old y
+    // to bubble down to the old children of y (if y->ptr[0] wasn't intersected)
+    intersect_mov(&x->intersect, &y->intersect, &y->ptr[0]->intersect, &d);
+    if (kv_size(d)) {
+      for (int yi = 1; yi < y->n + 1; yi++) {
+        intersect_add(&y->ptr[yi]->intersect, &d);
+      }
+    }
+    kvi_destroy(d);
+
+    bubble_up(x);
+  } else {
+    // if the last element of x used to be an end node, check if it now covers all of x
+    if (mt_end(p->key[i])) {
+      uint64_t pi = pseudo_index(x, 0);  // note: sloppy pseudo-index
+      uint64_t start_id = mt_lookup_key_side(p->key[i], false);
+      uint64_t pi_start = pseudo_index_for_id(b, start_id, true);
+      if (pi_start > 0 && pi_start < pi) {
+        intersect_node(b, x, start_id);
+      }
+    }
+
+    if (mt_start(y->key[0])) {
+      // no need for a check, just delet it if it was there
+      unintersect_node(b, y, mt_lookup_key(y->key[0]), false);
+    }
+  }
 }
 
-static void pivot_left(MarkTree *b, mtnode_t *p, int i)
+static void pivot_left(MarkTree *b, MTPos p_pos, MTNode *p, int i)
 {
-  mtnode_t *x = p->ptr[i], *y = p->ptr[i + 1];
+  MTNode *x = p->ptr[i], *y = p->ptr[i + 1];
 
   // reverse from how we "always" do it. but pivot_left
   // is just the inverse of pivot_right, so reverse it literally.
@@ -532,40 +1058,88 @@ static void pivot_left(MarkTree *b, mtnode_t *p, int i)
   if (x->level) {
     x->ptr[x->n + 1] = y->ptr[0];
     x->ptr[x->n + 1]->parent = x;
+    x->ptr[x->n + 1]->p_idx = (int16_t)(x->n + 1);
   }
-  memmove(y->key, &y->key[1], (size_t)(y->n - 1) * sizeof(mtkey_t));
+  memmove(y->key, &y->key[1], (size_t)(y->n - 1) * sizeof(MTKey));
   if (y->level) {
-    memmove(y->ptr, &y->ptr[1], (size_t)y->n * sizeof(mtnode_t *));
+    memmove(y->ptr, &y->ptr[1], (size_t)y->n * sizeof(MTNode *));
+    for (int j = 0; j < y->n; j++) {  // note: last item deleted
+      y->ptr[j]->p_idx = (int16_t)j;
+    }
   }
   x->n++;
   y->n--;
+
+  // repair intersections of x,y
+  if (x->level) {
+    // handle y and first new y->ptr[0]
+    Intersection d;
+    kvi_init(d);
+    // x->ptr[x->n] was moved from y to x
+    // adjust x->ptr[x->n] for a difference between the parents
+    // in addition, this might cause some intersection of the old x
+    // to bubble down to the old children of x (if x->ptr[n] wasn't intersected)
+    intersect_mov(&y->intersect, &x->intersect, &x->ptr[x->n]->intersect, &d);
+    if (kv_size(d)) {
+      for (int xi = 0; xi < x->n; xi++) {  // ptr[x->n| deliberately skipped
+        intersect_add(&x->ptr[xi]->intersect, &d);
+      }
+    }
+    kvi_destroy(d);
+
+    bubble_up(y);
+  } else {
+    // if the first element of y used to be an start node, check if it now covers all of y
+    if (mt_start(p->key[i])) {
+      uint64_t pi = pseudo_index(y, 0);  // note: sloppy pseudo-index
+
+      uint64_t end_id = mt_lookup_key_side(p->key[i], true);
+      uint64_t pi_end = pseudo_index_for_id(b, end_id, true);
+
+      if (pi_end > pi) {
+        intersect_node(b, y, mt_lookup_key(p->key[i]));
+      }
+    }
+
+    if (mt_end(x->key[x->n - 1])) {
+      // no need for a check, just delet it if it was there
+      unintersect_node(b, x, mt_lookup_key_side(x->key[x->n - 1], false), false);
+    }
+  }
 }
 
 /// frees all mem, resets tree to valid empty state
 void marktree_clear(MarkTree *b)
 {
   if (b->root) {
-    marktree_free_node(b->root);
+    marktree_free_subtree(b, b->root);
     b->root = NULL;
   }
   map_destroy(uint64_t, b->id2node);
   *b->id2node = (PMap(uint64_t)) MAP_INIT;
   b->n_keys = 0;
-  b->n_nodes = 0;
+  assert(b->n_nodes == 0);
 }
 
-void marktree_free_node(mtnode_t *x)
+void marktree_free_subtree(MarkTree *b, MTNode *x)
 {
   if (x->level) {
     for (int i = 0; i < x->n + 1; i++) {
-      marktree_free_node(x->ptr[i]);
+      marktree_free_subtree(b, x->ptr[i]);
     }
   }
+  marktree_free_node(b, x);
+}
+
+static void marktree_free_node(MarkTree *b, MTNode *x)
+{
+  kvi_destroy(x->intersect);
   xfree(x);
+  b->n_nodes--;
 }
 
 /// NB: caller must check not pair!
-void marktree_revise(MarkTree *b, MarkTreeIter *itr, uint8_t decor_level, mtkey_t key)
+void marktree_revise(MarkTree *b, MarkTreeIter *itr, uint8_t decor_level, MTKey key)
 {
   // TODO(bfredl): clean up this mess and re-instantiate &= and |= forms
   // once we upgrade to a non-broken version of gcc in functionaltest-lua CI
@@ -578,49 +1152,108 @@ void marktree_revise(MarkTree *b, MarkTreeIter *itr, uint8_t decor_level, mtkey_
   rawkey(itr).priority = key.priority;
 }
 
+/// @param itr iterator is invalid after call
 void marktree_move(MarkTree *b, MarkTreeIter *itr, int row, int col)
 {
-  mtkey_t key = rawkey(itr);
-  // TODO(bfredl): optimize when moving a mark within a leaf without moving it
-  // across neighbours!
-  marktree_del_itr(b, itr, false);
-  key.pos = (mtpos_t){ row, col };
+  MTKey key = rawkey(itr);
+  MTNode *x = itr->x;
+  if (!x->level) {
+    bool internal = false;
+    MTPos newpos = MTPos(row, col);
+    if (x->parent != NULL) {
+      // strictly _after_ key before `x`
+      // (not optimal when x is very first leaf of the entire tree, but that's fine)
+      if (pos_less(itr->pos, newpos)) {
+        relative(itr->pos, &newpos);
+
+        // strictly before the end of x. (this could be made sharper by
+        // finding the internal key just after x, but meh)
+        if (pos_less(newpos, x->key[x->n - 1].pos)) {
+          internal = true;
+        }
+      }
+    } else {
+      // tree is one node. newpos thus is already "relative" itr->pos
+      internal = true;
+    }
+
+    if (internal) {
+      key.pos = newpos;
+      bool match;
+      // tricky: could minimize movement in either direction better
+      int new_i = marktree_getp_aux(x, key, &match);
+      if (!match) {
+        new_i++;
+      }
+      if (new_i == itr->i || key_cmp(key, x->key[new_i]) == 0) {
+        x->key[itr->i].pos = newpos;
+      } else if (new_i < itr->i) {
+        memmove(&x->key[new_i + 1], &x->key[new_i], sizeof(MTKey) * (size_t)(itr->i - new_i));
+        x->key[new_i] = key;
+      } else if (new_i > itr->i) {
+        memmove(&x->key[itr->i], &x->key[itr->i + 1], sizeof(MTKey) * (size_t)(new_i - itr->i));
+        x->key[new_i] = key;
+      }
+      return;
+    }
+  }
+  uint64_t other = marktree_del_itr(b, itr, false);
+  key.pos = (MTPos){ row, col };
 
   marktree_put_key(b, key);
-  itr->node = NULL;  // itr might become invalid by put
+
+  if (other) {
+    marktree_restore_pair(b, key);
+  }
+  itr->x = NULL;  // itr might become invalid by put
+}
+
+void marktree_restore_pair(MarkTree *b, MTKey key)
+{
+  MarkTreeIter itr[1];
+  MarkTreeIter end_itr[1];
+  marktree_lookup(b, mt_lookup_key_side(key, false), itr);
+  marktree_lookup(b, mt_lookup_key_side(key, true), end_itr);
+  if (!itr->x || !end_itr->x) {
+    // this could happen if the other end is waiting to be restored later
+    // this function will be called again for the other end.
+    return;
+  }
+  rawkey(itr).flags &= (uint16_t) ~MT_FLAG_ORPHANED;
+  rawkey(end_itr).flags &= (uint16_t) ~MT_FLAG_ORPHANED;
+
+  marktree_intersect_pair(b, mt_lookup_key_side(key, false), itr, end_itr, false);
 }
 
 // itr functions
 
-// TODO(bfredl): static inline?
 bool marktree_itr_get(MarkTree *b, int32_t row, int col, MarkTreeIter *itr)
 {
-  return marktree_itr_get_ext(b, (mtpos_t){ row, col },
-                              itr, false, false, NULL);
+  return marktree_itr_get_ext(b, MTPos(row, col), itr, false, false, NULL);
 }
 
-bool marktree_itr_get_ext(MarkTree *b, mtpos_t p, MarkTreeIter *itr, bool last, bool gravity,
-                          mtpos_t *oldbase)
+bool marktree_itr_get_ext(MarkTree *b, MTPos p, MarkTreeIter *itr, bool last, bool gravity,
+                          MTPos *oldbase)
 {
   if (b->n_keys == 0) {
-    itr->node = NULL;
+    itr->x = NULL;
     return false;
   }
 
-  mtkey_t k = { .pos = p, .flags = gravity ? MT_FLAG_RIGHT_GRAVITY : 0 };
+  MTKey k = { .pos = p, .flags = gravity ? MT_FLAG_RIGHT_GRAVITY : 0 };
   if (last && !gravity) {
     k.flags = MT_FLAG_LAST;
   }
-  itr->pos = (mtpos_t){ 0, 0 };
-  itr->node = b->root;
+  itr->pos = (MTPos){ 0, 0 };
+  itr->x = b->root;
   itr->lvl = 0;
   if (oldbase) {
     oldbase[itr->lvl] = itr->pos;
   }
   while (true) {
-    itr->i = marktree_getp_aux(itr->node, k, 0) + 1;
+    itr->i = marktree_getp_aux(itr->x, k, 0) + 1;
 
-    if (itr->node->level == 0) {
+    if (itr->x->level == 0) {
       break;
     }
 
@@ -628,10 +1261,10 @@ bool marktree_itr_get_ext(MarkTree *b, mtpos_t p, MarkTreeIter *itr, bool last,
     itr->s[itr->lvl].oldcol = itr->pos.col;
 
     if (itr->i > 0) {
-      compose(&itr->pos, itr->node->key[itr->i - 1].pos);
-      relative(itr->node->key[itr->i - 1].pos, &k.pos);
+      compose(&itr->pos, itr->x->key[itr->i - 1].pos);
+      relative(itr->x->key[itr->i - 1].pos, &k.pos);
     }
-    itr->node = itr->node->ptr[itr->i];
+    itr->x = itr->x->ptr[itr->i];
     itr->lvl++;
     if (oldbase) {
       oldbase[itr->lvl] = itr->pos;
@@ -640,7 +1273,7 @@ bool marktree_itr_get_ext(MarkTree *b, mtpos_t p, MarkTreeIter *itr, bool last,
 
   if (last) {
     return marktree_itr_prev(b, itr);
-  } else if (itr->i >= itr->node->n) {
+  } else if (itr->i >= itr->x->n) {
     return marktree_itr_next(b, itr);
   }
   return true;
@@ -648,19 +1281,20 @@ bool marktree_itr_get_ext(MarkTree *b, mtpos_t p, MarkTreeIter *itr, bool last,
 
 bool marktree_itr_first(MarkTree *b, MarkTreeIter *itr)
 {
-  itr->node = b->root;
   if (b->n_keys == 0) {
+    itr->x = NULL;
     return false;
   }
 
+  itr->x = b->root;
   itr->i = 0;
   itr->lvl = 0;
-  itr->pos = (mtpos_t){ 0, 0 };
-  while (itr->node->level > 0) {
+  itr->pos = MTPos(0, 0);
+  while (itr->x->level > 0) {
     itr->s[itr->lvl].i = 0;
     itr->s[itr->lvl].oldcol = 0;
     itr->lvl++;
-    itr->node = itr->node->ptr[0];
+    itr->x = itr->x->ptr[0];
   }
   return true;
 }
@@ -669,16 +1303,16 @@ bool marktree_itr_first(MarkTree *b, MarkTreeIter *itr)
 int marktree_itr_last(MarkTree *b, MarkTreeIter *itr)
 {
   if (b->n_keys == 0) {
-    itr->node = NULL;
+    itr->x = NULL;
     return false;
   }
-  itr->pos = (mtpos_t){ 0, 0 };
-  itr->node = b->root;
+  itr->pos = MTPos(0, 0);
+  itr->x = b->root;
   itr->lvl = 0;
   while (true) {
-    itr->i = itr->node->n;
+    itr->i = itr->x->n;
 
-    if (itr->node->level == 0) {
+    if (itr->x->level == 0) {
       break;
     }
 
@@ -686,63 +1320,71 @@ int marktree_itr_last(MarkTree *b, MarkTreeIter *itr)
     itr->s[itr->lvl].oldcol = itr->pos.col;
 
     assert(itr->i > 0);
-    compose(&itr->pos, itr->node->key[itr->i - 1].pos);
+    compose(&itr->pos, itr->x->key[itr->i - 1].pos);
 
-    itr->node = itr->node->ptr[itr->i];
+    itr->x = itr->x->ptr[itr->i];
     itr->lvl++;
   }
   itr->i--;
   return true;
 }
 
-// TODO(bfredl): static inline
 bool marktree_itr_next(MarkTree *b, MarkTreeIter *itr)
 {
-  return marktree_itr_next_skip(b, itr, false, NULL);
+  return marktree_itr_next_skip(b, itr, false, false, NULL);
 }
 
-static bool marktree_itr_next_skip(MarkTree *b, MarkTreeIter *itr, bool skip, mtpos_t oldbase[])
+static bool marktree_itr_next_skip(MarkTree *b, MarkTreeIter *itr, bool skip, bool preload,
+                                   MTPos oldbase[])
 {
-  if (!itr->node) {
+  if (!itr->x) {
     return false;
   }
   itr->i++;
-  if (itr->node->level == 0 || skip) {
-    if (itr->i < itr->node->n) {
+  if (itr->x->level == 0 || skip) {
+    if (preload && itr->x->level == 0 && skip) {
+      // skip rest of this leaf node
+      itr->i = itr->x->n;
+    } else if (itr->i < itr->x->n) {
       // TODO(bfredl): this is the common case,
       // and could be handled by inline wrapper
       return true;
     }
     // we ran out of non-internal keys. Go up until we find an internal key
-    while (itr->i >= itr->node->n) {
-      itr->node = itr->node->parent;
-      if (itr->node == NULL) {
+    while (itr->i >= itr->x->n) {
+      itr->x = itr->x->parent;
+      if (itr->x == NULL) {
         return false;
       }
       itr->lvl--;
       itr->i = itr->s[itr->lvl].i;
       if (itr->i > 0) {
-        itr->pos.row -= itr->node->key[itr->i - 1].pos.row;
+        itr->pos.row -= itr->x->key[itr->i - 1].pos.row;
         itr->pos.col = itr->s[itr->lvl].oldcol;
       }
     }
   } else {
     // we stood at an "internal" key. Go down to the first non-internal
     // key after it.
-    while (itr->node->level > 0) {
+    while (itr->x->level > 0) {
       // internal key, there is always a child after
       if (itr->i > 0) {
         itr->s[itr->lvl].oldcol = itr->pos.col;
-        compose(&itr->pos, itr->node->key[itr->i - 1].pos);
+        compose(&itr->pos, itr->x->key[itr->i - 1].pos);
       }
       if (oldbase && itr->i == 0) {
         oldbase[itr->lvl + 1] = oldbase[itr->lvl];
       }
       itr->s[itr->lvl].i = itr->i;
-      assert(itr->node->ptr[itr->i]->parent == itr->node);
-      itr->node = itr->node->ptr[itr->i];
-      itr->i = 0;
+      assert(itr->x->ptr[itr->i]->parent == itr->x);
       itr->lvl++;
+      itr->x = itr->x->ptr[itr->i];
+      if (preload && itr->x->level) {
+        itr->i = -1;
+        break;
+      } else {
+        itr->i = 0;
+      }
     }
   }
   return true;
@@ -750,10 +1392,10 @@ static bool marktree_itr_next_skip(MarkTree *b, MarkTreeIter *itr, bool skip, mt
 
 bool marktree_itr_prev(MarkTree *b, MarkTreeIter *itr)
 {
-  if (!itr->node) {
+  if (!itr->x) {
     return false;
   }
-  if (itr->node->level == 0) {
+  if (itr->x->level == 0) {
     itr->i--;
     if (itr->i >= 0) {
       // TODO(bfredl): this is the common case,
@@ -762,30 +1404,30 @@ bool marktree_itr_prev(MarkTree *b, MarkTreeIter *itr)
     }
     // we ran out of non-internal keys. Go up until we find a non-internal key
     while (itr->i < 0) {
-      itr->node = itr->node->parent;
-      if (itr->node == NULL) {
+      itr->x = itr->x->parent;
+      if (itr->x == NULL) {
         return false;
       }
       itr->lvl--;
       itr->i = itr->s[itr->lvl].i - 1;
       if (itr->i >= 0) {
-        itr->pos.row -= itr->node->key[itr->i].pos.row;
+        itr->pos.row -= itr->x->key[itr->i].pos.row;
         itr->pos.col = itr->s[itr->lvl].oldcol;
       }
     }
   } else {
     // we stood at an "internal" key. Go down to the last non-internal
     // key before it.
-    while (itr->node->level > 0) {
+    while (itr->x->level > 0) {
       // internal key, there is always a child before
       if (itr->i > 0) {
         itr->s[itr->lvl].oldcol = itr->pos.col;
-        compose(&itr->pos, itr->node->key[itr->i - 1].pos);
+        compose(&itr->pos, itr->x->key[itr->i - 1].pos);
       }
       itr->s[itr->lvl].i = itr->i;
-      assert(itr->node->ptr[itr->i]->parent == itr->node);
-      itr->node = itr->node->ptr[itr->i];
-      itr->i = itr->node->n;
+      assert(itr->x->ptr[itr->i]->parent == itr->x);
+      itr->x = itr->x->ptr[itr->i];
+      itr->i = itr->x->n;
       itr->lvl++;
     }
     itr->i--;
@@ -793,33 +1435,22 @@ bool marktree_itr_prev(MarkTree *b, MarkTreeIter *itr)
   return true;
 }
 
-void marktree_itr_rewind(MarkTree *b, MarkTreeIter *itr)
-{
-  if (!itr->node) {
-    return;
-  }
-  if (itr->node->level) {
-    marktree_itr_prev(b, itr);
-  }
-  itr->i = 0;
-}
-
 bool marktree_itr_node_done(MarkTreeIter *itr)
 {
-  return !itr->node || itr->i == itr->node->n - 1;
+  return !itr->x || itr->i == itr->x->n - 1;
 }
 
-mtpos_t marktree_itr_pos(MarkTreeIter *itr)
+MTPos marktree_itr_pos(MarkTreeIter *itr)
 {
-  mtpos_t pos = rawkey(itr).pos;
+  MTPos pos = rawkey(itr).pos;
   unrelative(itr->pos, &pos);
   return pos;
 }
 
-mtkey_t marktree_itr_current(MarkTreeIter *itr)
+MTKey marktree_itr_current(MarkTreeIter *itr)
 {
-  if (itr->node) {
-    mtkey_t key = rawkey(itr);
+  if (itr->x) {
+    MTKey key = rawkey(itr);
     key.pos = marktree_itr_pos(itr);
     return key;
   }
@@ -831,47 +1462,198 @@ static bool itr_eq(MarkTreeIter *itr1, MarkTreeIter *itr2)
   return (&rawkey(itr1) == &rawkey(itr2));
 }
 
-static void itr_swap(MarkTreeIter *itr1, MarkTreeIter *itr2)
+/// Get all marks which overlaps the position (row,col)
+///
+/// After calling this function, use marktree_itr_step_overlap to step through
+/// one overlapping mark at a time, until it returns false
+///
+/// NOTE: It's possible to get all marks which overlaps a region (row,col) to (row_end,col_end)
+/// To do this, first call marktree_itr_get_overlap with the start position and
+/// keep calling marktree_itr_step_overlap until it returns false.
+/// After this, as a second loop, keep calling the marktree_itr_next() until
+/// the iterator is invalid or reaches past (row_end, col_end). In this loop,
+/// consider all "start" marks (and unpaired marks if relevant), but skip over
+/// all "end" marks, using mt_end(mark).
+///
+/// @return false if we already know no marks can be found
+///               even if "true" the first call to marktree_itr_step_overlap
+///               could return false
+bool marktree_itr_get_overlap(MarkTree *b, int row, int col, MarkTreeIter *itr)
 {
-  mtkey_t key1 = rawkey(itr1);
-  mtkey_t key2 = rawkey(itr2);
+  if (b->n_keys == 0) {
+    itr->x = NULL;
+    return false;
+  }
+
+  itr->x = b->root;
+  itr->i = -1;
+  itr->lvl = 0;
+  itr->pos = MTPos(0, 0);
+  itr->intersect_pos = MTPos(row, col);
+  // intersect_pos but will be adjusted relative itr->x
+  itr->intersect_pos_x = MTPos(row, col);
+  itr->intersect_idx = 0;
+  return true;
+}
+
+static inline MTPair pair_from(MTKey start, MTKey end)
+{
+  return (MTPair){ .start = start, .end_pos = end.pos, .end_right_gravity = mt_right(end) };
+}
+
+/// Step through all overlapping pairs at a position.
+///
+/// This function must only be used with an iterator from |marktree_itr_step_overlap|
+///
+/// @return true if a valid pair was found (returned as `pair`)
+/// When all overlapping mark pairs have been found, false will be returned. `itr`
+/// is then valid as an ordinary iterator at the (row, col) position specified in
+/// marktree_itr_step_overlap
+bool marktree_itr_step_overlap(MarkTree *b, MarkTreeIter *itr, MTPair *pair)
+{
+  // phase one: we start at the root node and step inwards towards itr->intersect_pos
+  // (the position queried in marktree_itr_get_overlap)
+  //
+  // For each node (ancestor node to the node containing the sought position)
+  // we return all intersecting intervals, one at a time
+  while (itr->i == -1) {
+    if (itr->intersect_idx < kv_size(itr->x->intersect)) {
+      uint64_t id = kv_A(itr->x->intersect, itr->intersect_idx++);
+      *pair = pair_from(marktree_lookup(b, id, NULL),
+                        marktree_lookup(b, id|MARKTREE_END_FLAG, NULL));
+      return true;
+    }
+
+    if (itr->x->level == 0) {
+      itr->s[itr->lvl].i = itr->i = 0;
+      break;
+    }
+
+    MTKey k = { .pos = itr->intersect_pos_x, .flags = 0 };
+    itr->i = marktree_getp_aux(itr->x, k, 0) + 1;
+
+    itr->s[itr->lvl].i = itr->i;
+    itr->s[itr->lvl].oldcol = itr->pos.col;
+
+    if (itr->i > 0) {
+      compose(&itr->pos, itr->x->key[itr->i - 1].pos);
+      relative(itr->x->key[itr->i - 1].pos, &itr->intersect_pos_x);
+    }
+    itr->x = itr->x->ptr[itr->i];
+    itr->lvl++;
+    itr->i = -1;
+    itr->intersect_idx = 0;
+  }
+
+  // phase two: we now need to handle the node found at itr->intersect_pos
+  // first consider all start nodes in the node before this position.
+  while (itr->i < itr->x->n && pos_less(rawkey(itr).pos, itr->intersect_pos_x)) {
+    MTKey k = itr->x->key[itr->i++];
+    itr->s[itr->lvl].i = itr->i;
+    if (mt_start(k)) {
+      MTKey end = marktree_lookup(b, mt_lookup_id(k.ns, k.id, true), NULL);
+      if (pos_less(end.pos, itr->intersect_pos)) {
+        continue;
+      }
+
+      unrelative(itr->pos, &k.pos);
+      *pair = pair_from(k, end);
+      return true;  // it's a start!
+    }
+  }
+
+  // phase 2B: We also need to step to the end of this node and consider all end marks, which
+  // might end an interval overlapping itr->intersect_pos
+  while (itr->i < itr->x->n) {
+    MTKey k = itr->x->key[itr->i++];
+    if (mt_end(k)) {
+      uint64_t id = mt_lookup_id(k.ns, k.id, false);
+      if (id2node(b, id) == itr->x) {
+        continue;
+      }
+      unrelative(itr->pos, &k.pos);
+      MTKey start = marktree_lookup(b, id, NULL);
+      if (pos_less(itr->intersect_pos, start.pos)) {
+        continue;
+      }
+      *pair = pair_from(start, k);
+      return true;  // end of a range which began before us!
+    }
+  }
+
+  // when returning false, get back to the queried position, to ensure the caller
+  // can keep using it as an ordinary iterator at the queried position. The docstring
+  // for marktree_itr_get_overlap explains how this is useful.
+  itr->i = itr->s[itr->lvl].i;
+  assert(itr->i >= 0);
+  if (itr->i >= itr->x->n) {
+    marktree_itr_next(b, itr);
+  }
+
+  // either on or after the intersected position, bail out
+  return false;
+}
+
+static void swap_keys(MarkTree *b, MarkTreeIter *itr1, MarkTreeIter *itr2, DamageList *damage)
+{
+  if (itr1->x != itr2->x) {
+    if (mt_paired(rawkey(itr1))) {
+      kvi_push(*damage, ((Damage){ mt_lookup_key(rawkey(itr1)), itr1->x, itr2->x,
+                                   itr1->i, itr2->i }));
+    }
+    if (mt_paired(rawkey(itr2))) {
+      kvi_push(*damage, ((Damage){ mt_lookup_key(rawkey(itr2)), itr2->x, itr1->x,
+                                   itr2->i, itr1->i }));
+    }
+  }
+
+  MTKey key1 = rawkey(itr1);
+  MTKey key2 = rawkey(itr2);
   rawkey(itr1) = key2;
   rawkey(itr1).pos = key1.pos;
   rawkey(itr2) = key1;
   rawkey(itr2).pos = key2.pos;
+  refkey(b, itr1->x, itr1->i);
+  refkey(b, itr2->x, itr2->i);
+}
+
+static int damage_cmp(const void *s1, const void *s2)
+{
+  Damage *d1 = (Damage *)s1, *d2 = (Damage *)s2;
+  assert(d1->id != d2->id);
+  return d1->id > d2->id;
 }
 
 bool marktree_splice(MarkTree *b, int32_t start_line, int start_col, int old_extent_line,
                      int old_extent_col, int new_extent_line, int new_extent_col)
 {
-  mtpos_t start = { start_line, start_col };
-  mtpos_t old_extent = { old_extent_line, old_extent_col };
-  mtpos_t new_extent = { new_extent_line, new_extent_col };
+  MTPos start = { start_line, start_col };
+  MTPos old_extent = { old_extent_line, old_extent_col };
+  MTPos new_extent = { new_extent_line, new_extent_col };
 
   bool may_delete = (old_extent.row != 0 || old_extent.col != 0);
   bool same_line = old_extent.row == 0 && new_extent.row == 0;
   unrelative(start, &old_extent);
   unrelative(start, &new_extent);
-  MarkTreeIter itr[1] = { 0 };
-  MarkTreeIter enditr[1] = { 0 };
+  MarkTreeIter itr[1] = { 0 }, enditr[1] = { 0 };
 
-  mtpos_t oldbase[MT_MAX_DEPTH] = { 0 };
+  MTPos oldbase[MT_MAX_DEPTH] = { 0 };
 
   marktree_itr_get_ext(b, start, itr, false, true, oldbase);
-  if (!itr->node) {
+  if (!itr->x) {
     // den e FÄRDIG
     return false;
   }
-  mtpos_t delta = { new_extent.row - old_extent.row,
-                    new_extent.col - old_extent.col };
+  MTPos delta = { new_extent.row - old_extent.row,
+                  new_extent.col - old_extent.col };
 
   if (may_delete) {
-    mtpos_t ipos = marktree_itr_pos(itr);
+    MTPos ipos = marktree_itr_pos(itr);
     if (!pos_leq(old_extent, ipos)
         || (old_extent.row == ipos.row && old_extent.col == ipos.col
             && !mt_right(rawkey(itr)))) {
       marktree_itr_get_ext(b, old_extent, enditr, true, true, NULL);
-      assert(enditr->node);
+      assert(enditr->x);
       // "assert" (itr <= enditr)
     } else {
       may_delete = false;
@@ -880,14 +1662,16 @@ bool marktree_splice(MarkTree *b, int32_t start_line, int start_col, int old_ext
 
   bool past_right = false;
   bool moved = false;
+  DamageList damage;
+  kvi_init(damage);
 
   // Follow the general strategy of messing things up and fix them later
   // "oldbase" carries the information needed to calculate old position of
   // children.
   if (may_delete) {
-    while (itr->node && !past_right) {
-      mtpos_t loc_start = start;
-      mtpos_t loc_old = old_extent;
+    while (itr->x && !past_right) {
+      MTPos loc_start = start;
+      MTPos loc_old = old_extent;
       relative(itr->pos, &loc_start);
 
       relative(oldbase[itr->lvl], &loc_old);
@@ -905,9 +1689,7 @@ continue_same_node:
           marktree_itr_prev(b, enditr);
         }
         if (!mt_right(rawkey(enditr))) {
-          itr_swap(itr, enditr);
-          refkey(b, itr->node, itr->i);
-          refkey(b, enditr->node, enditr->i);
+          swap_keys(b, itr, enditr, &damage);
         } else {
           past_right = true;  // NOLINT
           (void)past_right;
@@ -921,14 +1703,14 @@ continue_same_node:
       }
 
       moved = true;
-      if (itr->node->level) {
+      if (itr->x->level) {
         oldbase[itr->lvl + 1] = rawkey(itr).pos;
         unrelative(oldbase[itr->lvl], &oldbase[itr->lvl + 1]);
         rawkey(itr).pos = loc_start;
-        marktree_itr_next_skip(b, itr, false, oldbase);
+        marktree_itr_next_skip(b, itr, false, false, oldbase);
       } else {
         rawkey(itr).pos = loc_start;
-        if (itr->i < itr->node->n - 1) {
+        if (itr->i < itr->x->n - 1) {
           itr->i++;
           if (!past_right) {
             goto continue_same_node;
@@ -938,10 +1720,10 @@ continue_same_node:
         }
       }
     }
-    while (itr->node) {
-      mtpos_t loc_new = new_extent;
+    while (itr->x) {
+      MTPos loc_new = new_extent;
       relative(itr->pos, &loc_new);
-      mtpos_t limit = old_extent;
+      MTPos limit = old_extent;
 
       relative(oldbase[itr->lvl], &limit);
 
@@ -951,16 +1733,16 @@ past_continue_same_node:
         break;
       }
 
-      mtpos_t oldpos = rawkey(itr).pos;
+      MTPos oldpos = rawkey(itr).pos;
       rawkey(itr).pos = loc_new;
       moved = true;
-      if (itr->node->level) {
+      if (itr->x->level) {
         oldbase[itr->lvl + 1] = oldpos;
         unrelative(oldbase[itr->lvl], &oldbase[itr->lvl + 1]);
 
-        marktree_itr_next_skip(b, itr, false, oldbase);
+        marktree_itr_next_skip(b, itr, false, false, oldbase);
       } else {
-        if (itr->i < itr->node->n - 1) {
+        if (itr->i < itr->x->n - 1) {
           itr->i++;
           goto past_continue_same_node;
         } else {
@@ -970,7 +1752,7 @@ past_continue_same_node:
     }
   }
 
-  while (itr->node) {
+  while (itr->x) {
     unrelative(oldbase[itr->lvl], &rawkey(itr).pos);
     int realrow = rawkey(itr).pos.row;
     assert(realrow >= old_extent.row);
@@ -978,7 +1760,6 @@ past_continue_same_node:
     if (realrow == old_extent.row) {
       if (delta.col) {
         rawkey(itr).pos.col += delta.col;
-        moved = true;
       }
     } else {
       if (same_line) {
@@ -994,22 +1775,78 @@ past_continue_same_node:
     if (done) {
       break;
     }
-    marktree_itr_next_skip(b, itr, true, NULL);
+    marktree_itr_next_skip(b, itr, true, false, NULL);
   }
+
+  if (kv_size(damage)) {
+    // TODO(bfredl): a full sort is not really needed. we just need a "start" node to find
+    // its corresponding "end" node. Set up some dedicated hash for this later (c.f. the
+    // "grow only" variant of khash_t branch)
+    qsort((void *)&kv_A(damage, 0), kv_size(damage), sizeof(kv_A(damage, 0)),
+          damage_cmp);
+
+    for (size_t i = 0; i < kv_size(damage); i++) {
+      Damage d = kv_A(damage, i);
+      if (!(d.id & MARKTREE_END_FLAG)) {  // start
+        if (i + 1 < kv_size(damage) && kv_A(damage, i + 1).id == (d.id | MARKTREE_END_FLAG)) {
+          Damage d2 = kv_A(damage, i + 1);
+
+          // pair
+          marktree_itr_set_node(b, itr, d.old, d.old_i);
+          marktree_itr_set_node(b, enditr, d2.old, d2.old_i);
+          marktree_intersect_pair(b, d.id, itr, enditr, true);
+          marktree_itr_set_node(b, itr, d.new, d.new_i);
+          marktree_itr_set_node(b, enditr, d2.new, d2.new_i);
+          marktree_intersect_pair(b, d.id, itr, enditr, false);
+
+          i++;  // consume two items
+          continue;
+        }
+
+        // d is lone start, end didn't move
+        MarkTreeIter endpos[1];
+        marktree_lookup(b, d.id | MARKTREE_END_FLAG, endpos);
+        if (endpos->x) {
+          marktree_itr_set_node(b, itr, d.old, d.old_i);
+          *enditr = *endpos;
+          marktree_intersect_pair(b, d.id, itr, enditr, true);
+          marktree_itr_set_node(b, itr, d.new, d.new_i);
+          *enditr = *endpos;
+          marktree_intersect_pair(b, d.id, itr, enditr, false);
+        }
+      } else {
+        // d is lone end, start didn't move
+        MarkTreeIter startpos[1];
+        uint64_t start_id = d.id & ~MARKTREE_END_FLAG;
+
+        marktree_lookup(b, start_id, startpos);
+        if (startpos->x) {
+          *itr = *startpos;
+          marktree_itr_set_node(b, enditr, d.old, d.old_i);
+          marktree_intersect_pair(b, start_id, itr, enditr, true);
+          *itr = *startpos;
+          marktree_itr_set_node(b, enditr, d.new, d.new_i);
+          marktree_intersect_pair(b, start_id, itr, enditr, false);
+        }
+      }
+    }
+  }
+  kvi_destroy(damage);
+
   return moved;
 }
 
 void marktree_move_region(MarkTree *b, int start_row, colnr_T start_col, int extent_row,
                           colnr_T extent_col, int new_row, colnr_T new_col)
 {
-  mtpos_t start = { start_row, start_col }, size = { extent_row, extent_col };
-  mtpos_t end = size;
+  MTPos start = { start_row, start_col }, size = { extent_row, extent_col };
+  MTPos end = size;
   unrelative(start, &end);
   MarkTreeIter itr[1] = { 0 };
   marktree_itr_get_ext(b, start, itr, false, true, NULL);
-  kvec_t(mtkey_t) saved = KV_INITIAL_VALUE;
-  while (itr->node) {
-    mtkey_t k = marktree_itr_current(itr);
+  kvec_t(MTKey) saved = KV_INITIAL_VALUE;
+  while (itr->x) {
+    MTKey k = marktree_itr_current(itr);
     if (!pos_leq(k.pos, end) || (k.pos.row == end.row && k.pos.col == end.col
                                  && mt_right(k))) {
       break;
@@ -1020,57 +1857,101 @@ void marktree_move_region(MarkTree *b, int start_row, colnr_T start_col, int ext
   }
 
   marktree_splice(b, start.row, start.col, size.row, size.col, 0, 0);
-  mtpos_t new = { new_row, new_col };
+  MTPos new = { new_row, new_col };
   marktree_splice(b, new.row, new.col,
                   0, 0, size.row, size.col);
 
   for (size_t i = 0; i < kv_size(saved); i++) {
-    mtkey_t item = kv_A(saved, i);
+    MTKey item = kv_A(saved, i);
     unrelative(new, &item.pos);
     marktree_put_key(b, item);
+    if (mt_paired(item)) {
+      // other end might be later in `saved`, this will safely bail out then
+      marktree_restore_pair(b, item);
+    }
   }
   kv_destroy(saved);
 }
 
 /// @param itr OPTIONAL. set itr to pos.
-mtkey_t marktree_lookup_ns(MarkTree *b, uint32_t ns, uint32_t id, bool end, MarkTreeIter *itr)
+MTKey marktree_lookup_ns(MarkTree *b, uint32_t ns, uint32_t id, bool end, MarkTreeIter *itr)
 {
   return marktree_lookup(b, mt_lookup_id(ns, id, end), itr);
 }
 
-/// @param itr OPTIONAL. set itr to pos.
-mtkey_t marktree_lookup(MarkTree *b, uint64_t id, MarkTreeIter *itr)
+static uint64_t pseudo_index(MTNode *x, int i)
 {
-  mtnode_t *n = pmap_get(uint64_t)(b->id2node, id);
+  int off = MT_LOG2_BRANCH * x->level;
+  uint64_t index = 0;
+
+  while (x) {
+    index |= (uint64_t)(i + 1) << off;
+    off += MT_LOG2_BRANCH;
+    i = x->p_idx;
+    x = x->parent;
+  }
+
+  return index;
+}
+
+/// @param itr OPTIONAL. set itr to pos.
+/// if sloppy, two keys at the same _leaf_ node has the same index
+static uint64_t pseudo_index_for_id(MarkTree *b, uint64_t id, bool sloppy)
+{
+  MTNode *n = id2node(b, id);
+  if (n == NULL) {
+    return 0;  // a valid pseudo-index is never zero!
+  }
+
+  int i = 0;
+  if (n->level || !sloppy) {
+    for (i = 0; i < n->n; i++) {
+      if (mt_lookup_key(n->key[i]) == id) {
+        break;
+      }
+    }
+    assert(i < n->n);
+    if (n->level) {
+      i += 1;  // internal key i comes after ptr[i]
+    }
+  }
+
+  return pseudo_index(n, i);
+}
+
+/// @param itr OPTIONAL. set itr to pos.
+MTKey marktree_lookup(MarkTree *b, uint64_t id, MarkTreeIter *itr)
+{
+  MTNode *n = id2node(b, id);
   if (n == NULL) {
     if (itr) {
-      itr->node = NULL;
+      itr->x = NULL;
     }
     return MT_INVALID_KEY;
   }
   int i = 0;
   for (i = 0; i < n->n; i++) {
     if (mt_lookup_key(n->key[i]) == id) {
-      goto found;
+      return marktree_itr_set_node(b, itr, n, i);
     }
   }
+
   abort();
-found: {}
-  mtkey_t key = n->key[i];
+}
+
+MTKey marktree_itr_set_node(MarkTree *b, MarkTreeIter *itr, MTNode *n, int i)
+{
+  MTKey key = n->key[i];
   if (itr) {
     itr->i = i;
-    itr->node = n;
+    itr->x = n;
     itr->lvl = b->root->level - n->level;
   }
   while (n->parent != NULL) {
-    mtnode_t *p = n->parent;
-    for (i = 0; i < p->n + 1; i++) {
-      if (p->ptr[i] == n) {
-        goto found_node;
-      }
-    }
-    abort();
-found_node:
+    MTNode *p = n->parent;
+    i = n->p_idx;
+    assert(p->ptr[i] == n);
+
     if (itr) {
       itr->s[b->root->level - p->level].i = i;
     }
@@ -1085,14 +1966,14 @@ found_node:
   return key;
 }
 
-mtpos_t marktree_get_altpos(MarkTree *b, mtkey_t mark, MarkTreeIter *itr)
+MTPos marktree_get_altpos(MarkTree *b, MTKey mark, MarkTreeIter *itr)
 {
   return marktree_get_alt(b, mark, itr).pos;
 }
 
-mtkey_t marktree_get_alt(MarkTree *b, mtkey_t mark, MarkTreeIter *itr)
+MTKey marktree_get_alt(MarkTree *b, MTKey mark, MarkTreeIter *itr)
 {
-  mtkey_t end = MT_INVALID_KEY;
+  MTKey end = MT_INVALID_KEY;
   if (mt_paired(mark)) {
     end = marktree_lookup_ns(b, mark.ns, mark.id, !mt_end(mark), itr);
   }
@@ -1101,8 +1982,8 @@ mtkey_t marktree_get_alt(MarkTree *b, mtkey_t mark, MarkTreeIter *itr)
 
 static void marktree_itr_fix_pos(MarkTree *b, MarkTreeIter *itr)
 {
-  itr->pos = (mtpos_t){ 0, 0 };
-  mtnode_t *x = b->root;
+  itr->pos = (MTPos){ 0, 0 };
+  MTNode *x = b->root;
   for (int lvl = 0; lvl < itr->lvl; lvl++) {
     itr->s[lvl].oldcol = itr->pos.col;
     int i = itr->s[lvl].i;
@@ -1112,23 +1993,36 @@ static void marktree_itr_fix_pos(MarkTree *b, MarkTreeIter *itr)
     assert(x->level);
     x = x->ptr[i];
   }
-  assert(x == itr->node);
+  assert(x == itr->x);
 }
 
 // for unit test
-void marktree_put_test(MarkTree *b, uint32_t id, int row, int col, bool right_gravity)
+void marktree_put_test(MarkTree *b, uint32_t ns, uint32_t id, int row, int col, bool right_gravity,
+                       int end_row, int end_col, bool end_right)
 {
-  mtkey_t key = { { row, col }, UINT32_MAX, id, 0,
-                  mt_flags(right_gravity, 0), 0, NULL };
-  marktree_put(b, key, -1, -1, false);
+  MTKey key = { { row, col }, ns, id, 0,
+                mt_flags(right_gravity, 0), 0, NULL };
+  marktree_put(b, key, end_row, end_col, end_right);
 }
 
 // for unit test
-bool mt_right_test(mtkey_t key)
+bool mt_right_test(MTKey key)
 {
   return mt_right(key);
 }
 
+// for unit test
+void marktree_del_pair_test(MarkTree *b, uint32_t ns, uint32_t id)
+{
+  MarkTreeIter itr[1];
+  marktree_lookup_ns(b, ns, id, false, itr);
+
+  uint64_t other = marktree_del_itr(b, itr, false);
+  assert(other);
+  marktree_lookup(b, other, itr);
+  marktree_del_itr(b, itr, false);
+}
+
 void marktree_check(MarkTree *b)
 {
 #ifndef NDEBUG
@@ -1139,7 +2033,7 @@ void marktree_check(MarkTree *b)
     return;
   }
 
-  mtpos_t dummy;
+  MTPos dummy;
   bool last_right = false;
   size_t nkeys = marktree_check_node(b, b->root, &dummy, &last_right);
   assert(b->n_keys == nkeys);
@@ -1151,7 +2045,7 @@ void marktree_check(MarkTree *b)
 }
 
 #ifndef NDEBUG
-size_t marktree_check_node(MarkTree *b, mtnode_t *x, mtpos_t *last, bool *last_right)
+size_t marktree_check_node(MarkTree *b, MTNode *x, MTPos *last, bool *last_right)
 {
   assert(x->n <= 2 * T - 1);
   // TODO(bfredl): too strict if checking "in repair" post-delete tree.
@@ -1162,7 +2056,7 @@ size_t marktree_check_node(MarkTree *b, mtnode_t *x, mtpos_t *last, bool *last_r
     if (x->level) {
       n_keys += marktree_check_node(b, x->ptr[i], last, last_right);
     } else {
-      *last = (mtpos_t) { 0, 0 };
+      *last = (MTPos) { 0, 0 };
     }
     if (i > 0) {
       unrelative(x->key[i - 1].pos, last);
@@ -1182,6 +2076,7 @@ size_t marktree_check_node(MarkTree *b, mtnode_t *x, mtpos_t *last, bool *last_r
 
     for (int i = 0; i < x->n + 1; i++) {
       assert(x->ptr[i]->parent == x);
+      assert(x->ptr[i]->p_idx == i);
       assert(x->ptr[i]->level == x->level - 1);
       // PARANOIA: check no double node ref
       for (int j = 0; j < i; j++) {
@@ -1193,34 +2088,221 @@ size_t marktree_check_node(MarkTree *b, mtnode_t *x, mtpos_t *last, bool *last_r
   }
   return n_keys;
 }
-#endif
 
-char *mt_inspect_rec(MarkTree *b)
+bool marktree_check_intersections(MarkTree *b)
 {
-  garray_T ga;
-  ga_init(&ga, (int)sizeof(char), 80);
-  mtpos_t p = { 0, 0 };
-  mt_inspect_node(b, &ga, b->root, p);
-  return ga.ga_data;
+  if (!b->root) {
+    return true;
+  }
+  PMap(ptr_t) checked = MAP_INIT;
+
+  // 1. move x->intersect to checked[x] and reinit x->intersect
+  mt_recurse_nodes(b->root, &checked);
+
+  // 2. iterate over all marks. for each START mark of a pair,
+  // intersect the nodes between the pair
+  MarkTreeIter itr[1];
+  marktree_itr_first(b, itr);
+  while (true) {
+    MTKey mark = marktree_itr_current(itr);
+    if (mark.pos.row < 0) {
+      break;
+    }
+
+    if (mt_start(mark)) {
+      MarkTreeIter start_itr[1];
+      MarkTreeIter end_itr[1];
+      uint64_t end_id = mt_lookup_id(mark.ns, mark.id, true);
+      MTKey k = marktree_lookup(b, end_id, end_itr);
+      if (k.pos.row >= 0) {
+        *start_itr = *itr;
+        marktree_intersect_pair(b, mt_lookup_key(mark), start_itr, end_itr, false);
+      }
+    }
+
+    marktree_itr_next(b, itr);
+  }
+
+  // 3. for each node check if the recreated intersection
+  // matches the old checked[x] intersection.
+  bool status = mt_recurse_nodes_compare(b->root, &checked);
+
+  uint64_t *val;
+  map_foreach_value(&checked, val, {
+    xfree(val);
+  });
+  map_destroy(ptr_t, &checked);
+
+  return status;
 }
 
-void mt_inspect_node(MarkTree *b, garray_T *ga, mtnode_t *n, mtpos_t off)
+void mt_recurse_nodes(MTNode *x, PMap(ptr_t) *checked)
+{
+  if (kv_size(x->intersect)) {
+    kvi_push(x->intersect, (uint64_t)-1);  // sentinel
+    uint64_t *val;
+    if (x->intersect.items == x->intersect.init_array) {
+      val = xmemdup(x->intersect.items, x->intersect.size * sizeof(*x->intersect.items));
+    } else {
+      val = x->intersect.items;
+    }
+    pmap_put(ptr_t)(checked, x, val);
+    kvi_init(x->intersect);
+  }
+
+  if (x->level) {
+    for (int i = 0; i < x->n + 1; i++) {
+      mt_recurse_nodes(x->ptr[i], checked);
+    }
+  }
+}
+
+bool mt_recurse_nodes_compare(MTNode *x, PMap(ptr_t) *checked)
+{
+  uint64_t *ref = pmap_get(ptr_t)(checked, x);
+  if (ref != NULL) {
+    for (size_t i = 0;; i++) {
+      if (ref[i] == (uint64_t)-1) {
+        if (i != kv_size(x->intersect)) {
+          return false;
+        }
+
+        break;
+      } else {
+        if (kv_size(x->intersect) <= i || ref[i] != kv_A(x->intersect, i)) {
+          return false;
+        }
+      }
+    }
+  } else {
+    if (kv_size(x->intersect)) {
+      return false;
+    }
+  }
+
+  if (x->level) {
+    for (int i = 0; i < x->n + 1; i++) {
+      if (!mt_recurse_nodes_compare(x->ptr[i], checked)) {
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+#endif
+
+// TODO(bfredl): kv_print
+#define GA_PUT(x) ga_concat(ga, (char *)(x))
+#define GA_PRINT(fmt, ...) snprintf(buf, sizeof(buf), fmt, __VA_ARGS__); \
+  GA_PUT(buf);
+
+String mt_inspect(MarkTree *b, bool keys, bool dot)
+{
+  garray_T ga[1];
+  ga_init(ga, (int)sizeof(char), 80);
+  MTPos p = { 0, 0 };
+  if (b->root) {
+    if (dot) {
+      GA_PUT("digraph D {\n\n");
+      mt_inspect_dotfile_node(b, ga, b->root, p, NULL);
+      GA_PUT("\n}");
+    } else {
+      mt_inspect_node(b, ga, keys, b->root, p);
+    }
+  }
+  return ga_take_string(ga);
+}
+
+void mt_inspect_node(MarkTree *b, garray_T *ga, bool keys, MTNode *n, MTPos off)
 {
   static char buf[1024];
-  ga_concat(ga, "[");
+  GA_PUT("[");
+  if (keys && kv_size(n->intersect)) {
+    for (size_t i = 0; i < kv_size(n->intersect); i++) {
+      GA_PUT(i == 0 ? "{" : ";");
+      // GA_PRINT("%"PRIu64, kv_A(n->intersect, i));
+      GA_PRINT("%" PRIu64, mt_dbg_id(kv_A(n->intersect, i)));
+    }
+    GA_PUT("},");
+  }
   if (n->level) {
-    mt_inspect_node(b, ga, n->ptr[0], off);
+    mt_inspect_node(b, ga, keys, n->ptr[0], off);
   }
   for (int i = 0; i < n->n; i++) {
-    mtpos_t p = n->key[i].pos;
+    MTPos p = n->key[i].pos;
     unrelative(off, &p);
-    snprintf(buf, sizeof(buf), "%d/%d", p.row, p.col);
-    ga_concat(ga, buf);
+    GA_PRINT("%d/%d", p.row, p.col);
+    if (keys) {
+      MTKey key = n->key[i];
+      GA_PUT(":");
+      if (mt_start(key)) {
+        GA_PUT("<");
+      }
+      // GA_PRINT("%"PRIu64, mt_lookup_id(key.ns, key.id, false));
+      GA_PRINT("%" PRIu32, key.id);
+      if (mt_end(key)) {
+        GA_PUT(">");
+      }
+    }
     if (n->level) {
-      mt_inspect_node(b, ga, n->ptr[i + 1], p);
+      mt_inspect_node(b, ga, keys, n->ptr[i + 1], p);
     } else {
       ga_concat(ga, ",");
     }
   }
   ga_concat(ga, "]");
 }
+
+void mt_inspect_dotfile_node(MarkTree *b, garray_T *ga, MTNode *n, MTPos off, char *parent)
+{
+  static char buf[1024];
+  char namebuf[64];
+  if (parent != NULL) {
+    snprintf(namebuf, sizeof namebuf, "%s_%c%d", parent, 'a' + n->level, n->p_idx);
+  } else {
+    snprintf(namebuf, sizeof namebuf, "Node");
+  }
+
+  GA_PRINT("  %s[shape=plaintext, label=<\n", namebuf);
+  GA_PUT("    \n");
+  if (kv_size(n->intersect)) {
+    GA_PUT("    \n");
+  }
+
+  GA_PUT("    \n");
+  GA_PUT("    
"); + for (size_t i = 0; i < kv_size(n->intersect); i++) { + if (i > 0) { + GA_PUT(", "); + } + GA_PRINT("%" PRIu64, mt_dbg_id(kv_A(n->intersect, i))); + } + GA_PUT("
"); + for (int i = 0; i < n->n; i++) { + MTKey k = n->key[i]; + if (i > 0) { + GA_PUT(", "); + } + GA_PRINT("%d", k.id); + if (mt_paired(k)) { + GA_PUT(mt_end(k) ? "e" : "s"); + } + } + GA_PUT("
\n"); + GA_PUT(">];\n"); + if (parent) { + GA_PRINT(" %s -> %s\n", parent, namebuf); + } + if (n->level) { + mt_inspect_dotfile_node(b, ga, n->ptr[0], off, namebuf); + } + for (int i = 0; i < n->n; i++) { + MTPos p = n->key[i].pos; + unrelative(off, &p); + if (n->level) { + mt_inspect_dotfile_node(b, ga, n->ptr[i + 1], p, namebuf); + } + } +} diff --git a/src/nvim/marktree.h b/src/nvim/marktree.h index cd56115b58..f53a54f3cc 100644 --- a/src/nvim/marktree.h +++ b/src/nvim/marktree.h @@ -6,29 +6,37 @@ #include #include +#include "klib/kvec.h" #include "nvim/assert.h" #include "nvim/garray.h" #include "nvim/map.h" #include "nvim/pos.h" #include "nvim/types.h" +// only for debug functions: +#include "api/private/defs.h" + struct mtnode_s; #define MT_MAX_DEPTH 20 #define MT_BRANCH_FACTOR 10 +// note max branch is actually 2*MT_BRANCH_FACTOR +// and strictly this is ceil(log2(2*MT_BRANCH_FACTOR + 1)) +// as we need a pseudo-index for "right before this node" +#define MT_LOG2_BRANCH 5 typedef struct { int32_t row; int32_t col; -} mtpos_t; -#define mtpos_t(r, c) ((mtpos_t){ .row = (r), .col = (c) }) +} MTPos; +#define MTPos(r, c) ((MTPos){ .row = (r), .col = (c) }) -typedef struct mtnode_s mtnode_t; +typedef struct mtnode_s MTNode; typedef struct { - mtpos_t pos; + MTPos pos; int lvl; - mtnode_t *node; + MTNode *x; int i; struct { int oldcol; @@ -36,33 +44,43 @@ typedef struct { } s[MT_MAX_DEPTH]; size_t intersect_idx; - mtpos_t intersect_pos; + MTPos intersect_pos; + MTPos intersect_pos_x; } MarkTreeIter; -#define marktree_itr_valid(itr) ((itr)->node != NULL) +#define marktree_itr_valid(itr) ((itr)->x != NULL) // Internal storage // // NB: actual marks have flags > 0, so we can use (row,col,0) pseudo-key for // "space before (row,col)" typedef struct { - mtpos_t pos; + MTPos pos; uint32_t ns; uint32_t id; int32_t hl_id; uint16_t flags; uint16_t priority; Decoration *decor_full; -} mtkey_t; -#define MT_INVALID_KEY (mtkey_t) { { -1, -1 }, 0, 0, 0, 0, 0, NULL } +} MTKey; + +typedef struct { + MTKey start; + MTPos end_pos; + bool end_right_gravity; +} MTPair; + +#define MT_INVALID_KEY (MTKey) { { -1, -1 }, 0, 0, 0, 0, 0, NULL } #define MT_FLAG_REAL (((uint16_t)1) << 0) #define MT_FLAG_END (((uint16_t)1) << 1) #define MT_FLAG_PAIRED (((uint16_t)1) << 2) -#define MT_FLAG_HL_EOL (((uint16_t)1) << 3) +// orphaned: the other side of this paired mark was deleted. this mark must be deleted very soon! +#define MT_FLAG_ORPHANED (((uint16_t)1) << 3) +#define MT_FLAG_HL_EOL (((uint16_t)1) << 4) #define DECOR_LEVELS 4 -#define MT_FLAG_DECOR_OFFSET 4 +#define MT_FLAG_DECOR_OFFSET 5 #define MT_FLAG_DECOR_MASK (((uint16_t)(DECOR_LEVELS - 1)) << MT_FLAG_DECOR_OFFSET) // next flag is (((uint16_t)1) << 6) @@ -73,39 +91,44 @@ typedef struct { #define MT_FLAG_EXTERNAL_MASK (MT_FLAG_DECOR_MASK | MT_FLAG_RIGHT_GRAVITY | MT_FLAG_HL_EOL) -#define MARKTREE_END_FLAG (((uint64_t)1) << 63) +// this is defined so that start and end of the same range have adjacent ids +#define MARKTREE_END_FLAG ((uint64_t)1) static inline uint64_t mt_lookup_id(uint32_t ns, uint32_t id, bool enda) { - return (uint64_t)ns << 32 | id | (enda ? MARKTREE_END_FLAG : 0); + return (uint64_t)ns << 33 | (id <<1) | (enda ? MARKTREE_END_FLAG : 0); } -#undef MARKTREE_END_FLAG -static inline uint64_t mt_lookup_key(mtkey_t key) +static inline uint64_t mt_lookup_key_side(MTKey key, bool end) +{ + return mt_lookup_id(key.ns, key.id, end); +} + +static inline uint64_t mt_lookup_key(MTKey key) { return mt_lookup_id(key.ns, key.id, key.flags & MT_FLAG_END); } -static inline bool mt_paired(mtkey_t key) +static inline bool mt_paired(MTKey key) { return key.flags & MT_FLAG_PAIRED; } -static inline bool mt_end(mtkey_t key) +static inline bool mt_end(MTKey key) { return key.flags & MT_FLAG_END; } -static inline bool mt_start(mtkey_t key) +static inline bool mt_start(MTKey key) { return mt_paired(key) && !mt_end(key); } -static inline bool mt_right(mtkey_t key) +static inline bool mt_right(MTKey key) { return key.flags & MT_FLAG_RIGHT_GRAVITY; } -static inline uint8_t marktree_decor_level(mtkey_t key) +static inline uint8_t marktree_decor_level(MTKey key) { return (uint8_t)((key.flags&MT_FLAG_DECOR_MASK) >> MT_FLAG_DECOR_OFFSET); } @@ -117,18 +140,27 @@ static inline uint16_t mt_flags(bool right_gravity, uint8_t decor_level) | (decor_level << MT_FLAG_DECOR_OFFSET)); } +typedef kvec_withinit_t(uint64_t, 4) Intersection; + struct mtnode_s { int32_t n; - int32_t level; + int16_t level; + int16_t p_idx; // index in parent + Intersection intersect; // TODO(bfredl): we could consider having a only-sometimes-valid // index into parent for faster "cached" lookup. - mtnode_t *parent; - mtkey_t key[2 * MT_BRANCH_FACTOR - 1]; - mtnode_t *ptr[]; + MTNode *parent; + MTKey key[2 * MT_BRANCH_FACTOR - 1]; + MTNode *ptr[]; }; +static inline uint64_t mt_dbg_id(uint64_t id) +{ + return (id>>1)&0xffffffff; +} + typedef struct { - mtnode_t *root; + MTNode *root; size_t n_keys, n_nodes; // TODO(bfredl): the pointer to node could be part of the larger // Map(uint64_t, ExtmarkItem) essentially; diff --git a/src/nvim/option_defs.h b/src/nvim/option_defs.h index 1007925ccb..14f29682e1 100644 --- a/src/nvim/option_defs.h +++ b/src/nvim/option_defs.h @@ -630,6 +630,7 @@ EXTERN unsigned rdb_flags; #define RDB_NODELTA 0x008 #define RDB_LINE 0x010 #define RDB_FLUSH 0x020 +#define RDB_INTERSECT 0x040 EXTERN long p_rdt; // 'redrawtime' EXTERN long p_re; // 'regexpengine' diff --git a/src/nvim/plines.c b/src/nvim/plines.c index 82554c7785..99f666ef3f 100644 --- a/src/nvim/plines.c +++ b/src/nvim/plines.c @@ -133,7 +133,7 @@ void init_chartabsize_arg(chartabsize_T *cts, win_T *wp, linenr_T lnum, colnr_T if (cts->cts_row >= 0 && wp->w_buffer->b_virt_text_inline > 0) { marktree_itr_get(wp->w_buffer->b_marktree, cts->cts_row, 0, cts->cts_iter); - mtkey_t mark = marktree_itr_current(cts->cts_iter); + MTKey mark = marktree_itr_current(cts->cts_iter); if (mark.pos.row == cts->cts_row) { cts->cts_has_virt_text = true; } @@ -222,7 +222,7 @@ int win_lbr_chartabsize(chartabsize_T *cts, int *headp) int tab_size = size; int col = (int)(s - line); while (true) { - mtkey_t mark = marktree_itr_current(cts->cts_iter); + MTKey mark = marktree_itr_current(cts->cts_iter); if (mark.pos.row != cts->cts_row || mark.pos.col > col) { break; } else if (mark.pos.col == col) { diff --git a/src/nvim/version.h b/src/nvim/version.h index 484350edee..e0c7b76700 100644 --- a/src/nvim/version.h +++ b/src/nvim/version.h @@ -7,6 +7,9 @@ // defined in version.c extern char *Version; extern char *longVersion; +#ifndef NDEBUG +extern char *version_cflags; +#endif // // Vim version number, name, etc. Patchlevel is defined in version.c. diff --git a/test/functional/api/extmark_spec.lua b/test/functional/api/extmark_spec.lua index 6d8e3d8e0a..a917432dab 100644 --- a/test/functional/api/extmark_spec.lua +++ b/test/functional/api/extmark_spec.lua @@ -753,7 +753,14 @@ describe('API/extmarks', function() }) end) - -- TODO(bfredl): add more tests! + it('can get overlapping extmarks', function() + set_extmark(ns, 1, 0, 0, {end_row = 5, end_col=0}) + set_extmark(ns, 2, 2, 5, {end_row = 2, end_col=30}) + set_extmark(ns, 3, 0, 5, {end_row = 2, end_col=10}) + set_extmark(ns, 4, 0, 0, {end_row = 1, end_col=0}) + eq({{ 2, 2, 5 }}, get_extmarks(ns, {2, 0}, {2, -1}, { overlap=false })) + eq({{ 1, 0, 0 }, { 3, 0, 5}, {2, 2, 5}}, get_extmarks(ns, {2, 0}, {2, -1}, { overlap=true })) + end) end) it('replace works', function() diff --git a/test/functional/helpers.lua b/test/functional/helpers.lua index b98cf97e7e..6f5397a089 100644 --- a/test/functional/helpers.lua +++ b/test/functional/helpers.lua @@ -857,6 +857,11 @@ function module.testprg(name) return ('%s/%s%s'):format(module.nvim_dir, name, ext) end +function module.is_asan() + local version = module.eval('execute("verbose version")') + return version:match('-fsanitize=[a-z,]*address') +end + -- Returns a valid, platform-independent Nvim listen address. -- Useful for communicating with child instances. function module.new_pipename() diff --git a/test/functional/legacy/memory_usage_spec.lua b/test/functional/legacy/memory_usage_spec.lua index bf37315914..5f722e5190 100644 --- a/test/functional/legacy/memory_usage_spec.lua +++ b/test/functional/legacy/memory_usage_spec.lua @@ -11,14 +11,10 @@ local load_adjust = helpers.load_adjust local write_file = helpers.write_file local is_os = helpers.is_os local is_ci = helpers.is_ci - -local function isasan() - local version = eval('execute("verbose version")') - return version:match('-fsanitize=[a-z,]*address') -end +local is_asan = helpers.is_asan clear() -if isasan() then +if is_asan() then pending('ASAN build is difficult to estimate memory usage', function() end) return elseif is_os('win') then diff --git a/test/functional/ui/decorations_spec.lua b/test/functional/ui/decorations_spec.lua index a75290b400..4262f7ce77 100644 --- a/test/functional/ui/decorations_spec.lua +++ b/test/functional/ui/decorations_spec.lua @@ -691,6 +691,7 @@ describe('extmark decorations', function() [33] = {foreground = Screen.colors.DarkBlue, background = Screen.colors.LightGray}; [34] = {background = Screen.colors.Yellow}; [35] = {background = Screen.colors.Yellow, bold = true, foreground = Screen.colors.Blue}; + [36] = {foreground = Screen.colors.Blue1, bold = true, background = Screen.colors.Red}; } ns = meths.create_namespace 'test' @@ -1652,6 +1653,70 @@ describe('extmark decorations', function() {24:-- VISUAL BLOCK --} | ]]) end) + + it('supports multiline highlights', function() + insert(example_text) + feed 'gg' + for _,i in ipairs {1,2,3,5,6,7} do + for _,j in ipairs {2,5,10,15} do + meths.buf_set_extmark(0, ns, i, j, { end_col=j+2, hl_group = 'NonText'}) + end + end + screen:expect{grid=[[ + ^for _,item in ipairs(items) do | + {1: }l{1:oc}al {1:te}xt,{1: h}l_id_cell, count = unpack(item) | + {1: }i{1:f }hl_{1:id}_ce{1:ll} ~= nil then | + {1: } {1: } hl{1:_i}d ={1: h}l_id_cell | + end | + {1: }f{1:or} _ {1:= }1, {1:(c}ount or 1) do | + {1: } {1: } lo{1:ca}l c{1:el}l = line[colpos] | + {1: } {1: } ce{1:ll}.te{1:xt} = text | + cell.hl_id = hl_id | + colpos = colpos+1 | + end | + end | + {1:~ }| + {1:~ }| + | + ]]} + feed'5' + screen:expect{grid=[[ + ^ {1: }f{1:or} _ {1:= }1, {1:(c}ount or 1) do | + {1: } {1: } lo{1:ca}l c{1:el}l = line[colpos] | + {1: } {1: } ce{1:ll}.te{1:xt} = text | + cell.hl_id = hl_id | + colpos = colpos+1 | + end | + end | + {1:~ }| + {1:~ }| + {1:~ }| + {1:~ }| + {1:~ }| + {1:~ }| + {1:~ }| + | + ]]} + + meths.buf_set_extmark(0, ns, 1, 0, { end_line=8, end_col=10, hl_group = 'ErrorMsg'}) + screen:expect{grid=[[ + {4:^ }{36: }{4:f}{36:or}{4: _ }{36:= }{4:1, }{36:(c}{4:ount or 1) do} | + {4: }{36: }{4: }{36: }{4: lo}{36:ca}{4:l c}{36:el}{4:l = line[colpos]} | + {4: }{36: }{4: }{36: }{4: ce}{36:ll}{4:.te}{36:xt}{4: = text} | + {4: ce}ll.hl_id = hl_id | + colpos = colpos+1 | + end | + end | + {1:~ }| + {1:~ }| + {1:~ }| + {1:~ }| + {1:~ }| + {1:~ }| + {1:~ }| + | + ]]} + end) end) describe('decorations: inline virtual text', function() @@ -4136,7 +4201,6 @@ l5 end) it('can add multiple signs (single extmark)', function() - pending('TODO(lewis6991): Support ranged signs') insert(example_test3) feed 'gg' @@ -4158,7 +4222,6 @@ l5 end) it('can add multiple signs (multiple extmarks)', function() - pending('TODO(lewis6991): Support ranged signs') insert(example_test3) feed'gg' @@ -4219,7 +4282,6 @@ l5 end) it('can add multiple signs (multiple extmarks) 3', function() - pending('TODO(lewis6991): Support ranged signs') insert(example_test3) feed 'gg' @@ -4289,7 +4351,6 @@ l5 end) it('works with old signs (with range)', function() - pending('TODO(lewis6991): Support ranged signs') insert(example_test3) feed 'gg' @@ -4304,7 +4365,7 @@ l5 screen:expect{grid=[[ S3S4S1^l1 | - S2S3x l2 | + x S2S3l2 | S5S3{1: }l3 | S3{1: }l4 | S3{1: }l5 | @@ -4317,8 +4378,6 @@ l5 end) it('can add a ranged sign (with start out of view)', function() - pending('TODO(lewis6991): Support ranged signs') - insert(example_test3) command 'set signcolumn=yes:2' feed 'gg' diff --git a/test/unit/helpers.lua b/test/unit/helpers.lua index e9b97266d0..43b6980702 100644 --- a/test/unit/helpers.lua +++ b/test/unit/helpers.lua @@ -849,6 +849,16 @@ local function ptr2key(ptr) return ffi.string(s) end +local function is_asan() + cimport('./src/nvim/version.h') + local status, res = pcall(function() return lib.version_cflags end) + if status then + return ffi.string(res):match('-fsanitize=[a-z,]*address') + else + return false + end +end + local module = { cimport = cimport, cppimport = cppimport, @@ -876,6 +886,7 @@ local module = { ptr2addr = ptr2addr, ptr2key = ptr2key, debug_log = debug_log, + is_asan = is_asan, } module = global_helpers.tbl_extend('error', module, global_helpers) return function() diff --git a/test/unit/marktree_spec.lua b/test/unit/marktree_spec.lua index 3c96bc5f58..32300c167c 100644 --- a/test/unit/marktree_spec.lua +++ b/test/unit/marktree_spec.lua @@ -87,13 +87,18 @@ local function dosplice(tree, shadow, start, old_extent, new_extent) shadowsplice(shadow, start, old_extent, new_extent) end +local ns = 10 local last_id = nil -local function put(tree, row, col, gravitate) +local function put(tree, row, col, gravitate, end_row, end_col, end_gravitate) last_id = last_id + 1 local my_id = last_id - lib.marktree_put_test(tree, my_id, row, col, gravitate); + end_row = end_row or -1 + end_col = end_col or -1 + end_gravitate = end_gravitate or false + + lib.marktree_put_test(tree, ns, my_id, row, col, gravitate, end_row, end_col, end_gravitate); return my_id end @@ -102,7 +107,7 @@ describe('marktree', function() last_id = 0 end) - itp('works', function() + itp('works', function() local tree = ffi.new("MarkTree[1]") -- zero initialized by luajit local shadow = {} local iter = ffi.new("MarkTreeIter[1]") @@ -129,7 +134,7 @@ describe('marktree', function() eq({}, id2pos) for i,ipos in pairs(shadow) do - local p = lib.marktree_lookup_ns(tree, -1, i, false, iter) + local p = lib.marktree_lookup_ns(tree, ns, i, false, iter) eq(ipos[1], p.pos.row) eq(ipos[2], p.pos.col) local k = lib.marktree_itr_current(iter) @@ -210,10 +215,224 @@ describe('marktree', function() lib.marktree_itr_get(tree, 10, 10, iter) lib.marktree_del_itr(tree, iter, false) - eq(11, iter[0].node.key[iter[0].i].pos.col) + eq(11, iter[0].x.key[iter[0].i].pos.col) lib.marktree_itr_get(tree, 11, 11, iter) lib.marktree_del_itr(tree, iter, false) - eq(12, iter[0].node.key[iter[0].i].pos.col) - end) + eq(12, iter[0].x.key[iter[0].i].pos.col) + end) + + itp("'intersect_mov' function works correctly", function() + local function mov(x, y, w) + local xa = ffi.new("uint64_t[?]", #x) + for i, xi in ipairs(x) do xa[i-1] = xi end + local ya = ffi.new("uint64_t[?]", #y) + for i, yi in ipairs(y) do ya[i-1] = yi end + local wa = ffi.new("uint64_t[?]", #w) + for i, wi in ipairs(w) do wa[i-1] = wi end + + local dummy_size = #x + #y + #w + local wouta = ffi.new("uint64_t[?]", dummy_size) + local douta = ffi.new("uint64_t[?]", dummy_size) + local wsize = ffi.new("size_t[1]") + wsize[0] = dummy_size + local dsize = ffi.new("size_t[1]") + dsize[0] = dummy_size + + local status = lib.intersect_mov_test(xa, #x, ya, #y, wa, #w, wouta, wsize, douta, dsize) + if status == 0 then error'wowza' end + + local wout, dout = {}, {} + for i = 0,tonumber(wsize[0])-1 do table.insert(wout, tonumber(wouta[i])) end + for i = 0,tonumber(dsize[0])-1 do table.insert(dout, tonumber(douta[i])) end + return {wout, dout} + end + + eq({{}, {}}, mov({}, {2, 3}, {2, 3})) + eq({{2, 3}, {}}, mov({}, {}, {2, 3})) + eq({{2, 3}, {}}, mov({2, 3}, {}, {})) + eq({{}, {2,3}}, mov({}, {2,3}, {})) + + eq({{1, 5}, {}}, mov({1,2,5}, {2, 3}, {3})) + eq({{1, 2}, {}}, mov({1,2,5}, {5, 10}, {10})) + eq({{1, 2}, {5}}, mov({1,2}, {5, 10}, {10})) + eq({{1,3,5,7,9}, {2,4,6,8,10}}, mov({1,3,5,7,9}, {2,4,6,8,10}, {})) + eq({{1,3,5,7,9}, {2,6,10}}, mov({1,3,5,7,9}, {2,4,6,8,10}, {4, 8})) + eq({{1,4,7}, {2,5,8}}, mov({1,3,4,6,7,9}, {2,3,5,6,8,9}, {})) + eq({{1,4,7}, {}}, mov({1,3,4,6,7,9}, {2,3,5,6,8,9}, {2,5,8})) + eq({{0,1,4,7,10}, {}}, mov({1,3,4,6,7,9}, {2,3,5,6,8,9}, {0,2,5,8,10})) + end) + + + local function check_intersections(tree) + lib.marktree_check(tree) + -- to debug stuff disable this branch + if true == true then + ok(lib.marktree_check_intersections(tree)) + return + end + + local str1 = lib.mt_inspect(tree, true, true) + local dot1 = ffi.string(str1.data, str1.size) + + local val = lib.marktree_check_intersections(tree) + if not val then + local str2 = lib.mt_inspect(tree, true, true) + local dot2 = ffi.string(str2.data, str2.size) + print("actual:\n\n".."Xafile.dot".."\n\nexpected:\n\n".."Xefile.dot".."\n") + print("nivå", tree[0].root.level); + io.stdout:flush() + local afil = io.open("Xafile.dot", "wb") + afil:write(dot1) + afil:close() + local efil = io.open("Xefile.dot", "wb") + efil:write(dot2) + efil:close() + ok(false) + else + ffi.C.xfree(str1.data) + end + end + + itp('works with intersections', function() + local tree = ffi.new("MarkTree[1]") -- zero initialized by luajit + + local ids = {} + + for i = 1,80 do + table.insert(ids, put(tree, 1, i, false, 2, 100-i, false)) + check_intersections(tree) + end + for i = 1,80 do + lib.marktree_del_pair_test(tree, ns, ids[i]) + check_intersections(tree) + end + ids = {} + + for i = 1,80 do + table.insert(ids, put(tree, 1, i, false, 2, 100-i, false)) + check_intersections(tree) + end + + for i = 1,10 do + for j = 1,8 do + local ival = (j-1)*10+i + lib.marktree_del_pair_test(tree, ns, ids[ival]) + check_intersections(tree) + end + end + end) + + itp('works with intersections with a big tree', function() + local tree = ffi.new("MarkTree[1]") -- zero initialized by luajit + + local ids = {} + + for i = 1,1000 do + table.insert(ids, put(tree, 1, i, false, 2, 1000-i, false)) + if i % 10 == 1 then + check_intersections(tree) + end + end + + check_intersections(tree) + eq(2000, tree[0].n_keys) + ok(tree[0].root.level >= 2) + + local iter = ffi.new("MarkTreeIter[1]") + + local k = 0 + for i = 1,20 do + for j = 1,50 do + k = k + 1 + local ival = (j-1)*20+i + if false == true then -- if there actually is a failure, this branch will fail out at the actual spot of the error + lib.marktree_lookup_ns(tree, ns, ids[ival], false, iter) + lib.marktree_del_itr(tree, iter, false) + check_intersections(tree) + + lib.marktree_lookup_ns(tree, ns, ids[ival], true, iter) + lib.marktree_del_itr(tree, iter, false) + check_intersections(tree) + else + lib.marktree_del_pair_test(tree, ns, ids[ival]) + if k % 5 == 1 then + check_intersections(tree) + end + end + end + end + + eq(0, tree[0].n_keys) + end) + + itp('works with intersections with a even bigger tree', function() + local tree = ffi.new("MarkTree[1]") -- zero initialized by luajit + + local ids = {} + + -- too much overhead on ASAN + local size_factor = helpers.is_asan() and 3 or 10 + + local at_row = {} + for i = 1, 10 do + at_row[i] = {} + end + + local size = 1000*size_factor + local k = 1 + while k <= size do + for row1 = 1,9 do + for row2 = row1,10 do -- note row2 can be == row1, leads to empty ranges being tested when k > size/2 + if k > size then + break + end + local id = put(tree, row1, k, false, row2, size-k, false) + table.insert(ids, id) + for i = row1+1, row2 do + table.insert(at_row[i], id) + end + --if tree[0].root.level == 4 then error("kk"..k) end + if k % 100*size_factor == 1 or (k < 2000 and k%100 == 1) then + check_intersections(tree) + end + k = k + 1 + end + end + end + + eq(2*size, tree[0].n_keys) + ok(tree[0].root.level >= 3) + check_intersections(tree) + + local iter = ffi.new("MarkTreeIter[1]") + local pair = ffi.new("MTPair[1]") + for i = 1,10 do + -- use array as set and not {[id]=true} map, to detect duplicates + local set = {} + eq(true, ffi.C.marktree_itr_get_overlap(tree, i, 0, iter)) + while ffi.C.marktree_itr_step_overlap(tree, iter, pair) do + local id = tonumber(pair[0].start.id) + table.insert(set, id) + end + table.sort(set) + eq(at_row[i], set) + end + + k = 0 + for i = 1,100 do + for j = 1,(10*size_factor) do + k = k + 1 + local ival = (j-1)*100+i + lib.marktree_del_pair_test(tree, ns, ids[ival]) + -- just a few stickprov, if there is trouble we need to check + -- everyone using the code in the "big tree" case above + if k % 100*size_factor == 0 or (k > 3000 and k % 200 == 0) then + check_intersections(tree) + end + end + end + + eq(0, tree[0].n_keys) + end) end)