1 // Copyright 2012 Google Inc. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS-IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // Original author: ericv@google.com (Eric Veach)
16 // Converted to D:  madric@gmail.com (Vijay Nayar)
17 
18 module s2.mutable_s2shape_index;
19 
20 import s2.base.spinlock : SpinLock;
21 import s2.logger;
22 import s2.r1interval;
23 import s2.r2point;
24 import s2.r2rect;
25 import s2.s2cell_id;
26 import s2.s2edge_clipping : getClippedEdgeBound, FACE_CLIP_ERROR_UV_COORD, EDGE_CLIP_ERROR_UV_COORD;
27 import s2.s2edge_crosser;
28 import s2.s2padded_cell;
29 import s2.s2point;
30 import s2.s2pointutil;
31 import s2.s2shape;
32 import s2.s2shape_index : S2ShapeIndex, S2ShapeIndexCell, S2ClippedShape;
33 import s2.shapeutil.contains_brute_force : containsBruteForce;
34 import s2.util.container.btree_map;
35 import s2coords = s2.s2coords;
36 
37 import core.atomic : atomicLoad, atomicStore, MemoryOrder;
38 import core.sync.mutex : Mutex;
39 import std.algorithm : swap;
40 import std.array;
41 import std.exception : enforce;
42 
43 // The default maximum number of edges per cell (not counting "long" edges).
44 // If a cell has more than this many edges, and it is not a leaf cell, then it
45 // is subdivided.  This flag can be overridden via MutableS2ShapeIndex::Options.
46 // Reasonable values range from 10 to about 50 or so.
47 enum int S2SHAPE_INDEX_DEFAULT_MAX_EDGES_PER_CELL = 10;
48 
49 // FLAGS_s2shape_index_tmp_memory_budget_mb
50 //
51 // Attempt to limit the amount of temporary memory allocated while building or
52 // updating a MutableS2ShapeIndex to at most this value.  This is achieved by
53 // splitting the updates into multiple batches when necessary.  (The memory
54 // required is proportional to the number of edges being updated at once.)
55 //
56 // Note that this limit is not a hard guarantee, for several reasons:
57 //  (1) the memory estimates are only approximations;
58 //  (2) all edges in a given shape are added or removed at once, so shapes
59 //      with huge numbers of edges may exceed the budget;
60 //  (3) shapes being removed are always processed in a single batch.  (This
61 //      could be fixed, but it seems better to keep the code simpler for now.)
62 enum int S2SHAPE_INDEX_TMP_MEMORY_BUDGET_MB = 100;
63 
64 // FLAGS_s2shape_index_cell_size_to_long_edge_ratio
65 //
66 // The cell size relative to the length of an edge at which it is first
67 // considered to be "long".  Long edges do not contribute toward the decision
68 // to subdivide a cell further.  For example, a value of 2.0 means that the
69 // cell must be at least twice the size of the edge in order for that edge to
70 // be counted.  There are two reasons for not counting long edges: (1) such
71 // edges typically need to be propagated to several children, which increases
72 // time and memory costs without much benefit, and (2) in pathological cases,
73 // many long edges close together could force subdivision to continue all the
74 // way to the leaf cell level.
75 enum double S2SHAPE_INDEX_CELL_SIZE_TO_LONG_EDGE_RATIO = 1.0;
76 
77 
78 // MutableS2ShapeIndex is a class for in-memory indexing of polygonal geometry.
79 // The objects in the index are known as "shapes", and may consist of points,
80 // polylines, and/or polygons, possibly overlapping.  The index makes it very
81 // fast to answer queries such as finding nearby shapes, measuring distances,
82 // testing for intersection and containment, etc.
83 //
84 // MutableS2ShapeIndex allows not only building an index, but also updating it
85 // incrementally by adding or removing shapes (hence its name).  It is one of
86 // several implementations of the S2ShapeIndex interface.  MutableS2ShapeIndex
87 // is designed to be compact; usually it is smaller than the underlying
88 // geometry being indexed.  It is capable of indexing up to hundreds of
89 // millions of edges.  The index is also fast to construct.
90 //
91 // There are a number of built-in classes that work with S2ShapeIndex objects.
92 // Generally these classes accept any collection of geometry that can be
93 // represented by an S2ShapeIndex, i.e. any combination of points, polylines,
94 // and polygons.  Such classes include:
95 //
96 // - S2ContainsPointQuery: returns the shape(s) that contain a given point.
97 //
98 // - S2ClosestEdgeQuery: returns the closest edge(s) to a given point, edge,
99 //                       S2CellId, or S2ShapeIndex.
100 //
101 // - S2CrossingEdgeQuery: returns the edge(s) that cross a given edge.
102 //
103 // - S2BooleanOperation: computes boolean operations such as union,
104 //                       and boolean predicates such as containment.
105 //
106 // - S2ShapeIndexRegion: computes approximations for a collection of geometry.
107 //
108 // - S2ShapeIndexBufferedRegion: computes approximations that have been
109 //                               expanded by a given radius.
110 //
111 // Here is an example showing how to build an index for a set of polygons, and
112 // then then determine which polygon(s) contain each of a set of query points:
113 //
114 //   void TestContainment(const vector<S2Point>& points,
115 //                        const vector<S2Polygon*>& polygons) {
116 //     MutableS2ShapeIndex index;
117 //     for (auto polygon : polygons) {
118 //       index.Add(absl::make_unique<S2Polygon::Shape>(polygon));
119 //     }
120 //     auto query = MakeS2ContainsPointQuery(&index);
121 //     for (const auto& point : points) {
122 //       for (S2Shape* shape : query.GetContainingShapes(point)) {
123 //         S2Polygon* polygon = polygons[shape->id()];
124 //         ... do something with (point, polygon) ...
125 //       }
126 //     }
127 //   }
128 //
129 // This example uses S2Polygon::Shape, which is one example of an S2Shape
130 // object.  S2Polyline and S2Loop also have nested Shape classes, and there are
131 // additional S2Shape types defined in *_shape.h.
132 //
133 // Internally, MutableS2ShapeIndex is essentially a map from S2CellIds to the
134 // set of shapes that intersect each S2CellId.  It is adaptively refined to
135 // ensure that no cell contains more than a small number of edges.
136 //
137 // For efficiency, updates are batched together and applied lazily on the
138 // first subsequent query.  Locking is used to ensure that MutableS2ShapeIndex
139 // has the same thread-safety properties as "vector": const methods are
140 // thread-safe, while non-const methods are not thread-safe.  This means that
141 // if one thread updates the index, you must ensure that no other thread is
142 // reading or updating the index at the same time.
143 //
144 // TODO(ericv): MutableS2ShapeIndex has an Encode() method that allows the
145 // index to be serialized.  An encoded S2ShapeIndex can be decoded either into
146 // its original form (MutableS2ShapeIndex) or into an EncodedS2ShapeIndex.
147 // The key property of EncodedS2ShapeIndex is that it can be constructed
148 // instantaneously, since the index is kept in its original encoded form.
149 // Data is decoded only when an operation needs it.  For example, to determine
150 // which shapes(s) contain a given query point only requires decoding the data
151 // in the S2ShapeIndexCell that contains that point.
152 class MutableS2ShapeIndex : S2ShapeIndex {
153 private:
154 
155   alias CellMap = BTreeMap!(S2CellId, S2ShapeIndexCell);
156 
157 public:
158   // Options that affect construction of the MutableS2ShapeIndex.
159   static struct Options {
160   public:
161     // The maximum number of edges per cell.  If a cell has more than this
162     // many edges that are not considered "long" relative to the cell size,
163     // then it is subdivided.  (Whether an edge is considered "long" is
164     // controlled by --s2shape_index_cell_size_to_long_edge_ratio flag.)
165     //
166     // Values between 10 and 50 represent a reasonable balance between memory
167     // usage, construction time, and query time.  Small values make queries
168     // faster, while large values make construction faster and use less memory.
169     // Values higher than 50 do not save significant additional memory, and
170     // query times can increase substantially, especially for algorithms that
171     // visit all pairs of potentially intersecting edges (such as polygon
172     // validation), since this is quadratic in the number of edges per cell.
173     //
174     // Note that the *average* number of edges per cell is generally slightly
175     // less than half of the maximum value defined here.
176     //
177     // Defaults to value given by --s2shape_index_default_max_edges_per_cell.
178     @property
179     int maxEdgesPerCell() const {
180       return _maxEdgesPerCell;
181     }
182 
183     @property
184     void maxEdgesPerCell(int maxEdgesPerCell) {
185       _maxEdgesPerCell = maxEdgesPerCell;
186     }
187 
188   private:
189     int _maxEdgesPerCell = S2SHAPE_INDEX_DEFAULT_MAX_EDGES_PER_CELL;
190   }
191 
192   // Creates a MutableS2ShapeIndex that uses the default option settings.
193   // Option values may be changed by calling Init().
194   this() {
195     _cellMap = new CellMap();
196     _lock = new SpinLock();
197     _indexStatus = IndexStatus.FRESH;
198   }
199 
200   // Create a MutableS2ShapeIndex with the given options.
201   this(Options options) {
202     this();
203     _options = options;
204   }
205 
206   ~this() {
207     //clear();
208   }
209 
210   // Initialize a MutableS2ShapeIndex with the given options.  This method may
211   // only be called when the index is empty (i.e. newly created or Reset() has
212   // just been called).
213   void initialize(Options options)
214   in {
215     assert(_shapes.length == 0);
216   } do {
217     _options = options;
218   }
219 
220   const(Options) options() const {
221     return _options;
222   }
223 
224   // The number of distinct shape ids that have been assigned.  This equals
225   // the number of shapes in the index provided that no shapes have ever been
226   // removed.  (Shape ids are not reused.)
227   override
228   int numShapeIds() const {
229     return cast(int) _shapes.length;
230   }
231 
232   // Returns a pointer to the shape with the given id, or nullptr if the shape
233   // has been removed from the index.
234   override
235   inout(S2Shape) shape(int id) inout {
236     return _shapes[id];
237   }
238 
239   // Minimizes memory usage by requesting that any data structures that can be
240   // rebuilt should be discarded.  This method invalidates all iterators.
241   //
242   // Like all non-const methods, this method is not thread-safe.
243   override
244   void minimize() {
245     // TODO(ericv): Implement.  In theory we should be able to discard the
246     // entire index and rebuild it the next time it is needed.
247   }
248 
249   final static class Iterator : IteratorBase {
250   public:
251     // Default constructor; must be followed by a call to Init().
252     this() {
253       _index = null;
254     }
255 
256     // Constructs an iterator positioned as specified.  By default iterators
257     // are unpositioned, since this avoids an extra seek in this situation
258     // where one of the seek methods (such as Locate) is immediately called.
259     //
260     // If you want to position the iterator at the beginning, e.g. in order to
261     // loop through the entire index, do this instead:
262     //
263     //   for (MutableS2ShapeIndex::Iterator it(&index, S2ShapeIndex::BEGIN);
264     //        !it.done(); it.Next()) { ... }
265     this(MutableS2ShapeIndex index, InitialPosition pos = InitialPosition.UNPOSITIONED) {
266       initialize(index, pos);
267     }
268 
269 
270     // Initializes an iterator for the given MutableS2ShapeIndex.  This method
271     // may also be called in order to restore an iterator to a valid state
272     // after the underlying index has been updated (although it is usually
273     // easier just to declare a new iterator whenever required, since iterator
274     // construction is cheap).
275     void initialize(MutableS2ShapeIndex index, InitialPosition pos = InitialPosition.UNPOSITIONED) {
276       index.maybeApplyUpdates();  // TODO: This method is why indexes and iterators cannot be const.
277       initStale(index, pos);
278     }
279 
280     // Initialize an iterator for the given MutableS2ShapeIndex without
281     // applying any pending updates.  This can be used to observe the actual
282     // current state of the index without modifying it in any way.
283     void initStale(MutableS2ShapeIndex index, InitialPosition pos = InitialPosition.UNPOSITIONED) {
284       _index = index;
285       _end = _index._cellMap.end();
286       if (pos == InitialPosition.BEGIN) {
287         _iter = _index._cellMap.begin();
288       } else {
289         _iter = _end;
290       }
291       refresh();
292     }
293 
294     // Inherited non-virtual methods:
295     //   S2CellId id() const;
296     //   bool done() const;
297     //   S2Point center() const;
298     override
299     inout(S2ShapeIndexCell) cell() inout {
300       // Since MutableS2ShapeIndex always sets the "cell_" field, we can skip the
301       // logic in the base class that conditionally calls GetCell().
302       return rawCell();
303     }
304 
305     // IteratorBase API:
306     override
307     void begin()
308     in {
309       // Make sure that the index has not been modified since Init() was called.
310       assert(_index.isFresh());
311     } do {
312       _iter = _index._cellMap.begin();
313       _end = _index._cellMap.end();
314       refresh();
315     }
316 
317     override
318     void finish() {
319       _iter = _end;
320       refresh();
321     }
322 
323     override
324     void next()
325     in {
326       assert(!done());
327     } do {
328       ++_iter;
329       refresh();
330     }
331 
332     override
333     bool prev() {
334       if (_iter == _index._cellMap.begin()) {
335         return false;
336       }
337       --_iter;
338       refresh();
339       return true;
340     }
341 
342     override
343     void seek(S2CellId target) {
344       if (!_index._cellMap.equalRange(target).empty()) {
345         _iter = _index._cellMap.equalRange(target).toIterator();
346       } else {
347         _iter = _index._cellMap.upperRange(target).toIterator();
348       }
349       refresh();
350     }
351 
352     override
353     bool locate(in S2Point target) {
354       return IteratorBase.locateImpl(target, this);
355     }
356 
357     override
358     CellRelation locate(in S2CellId target) {
359       return IteratorBase.locateImpl(target, this);
360     }
361 
362     override
363     void copy(IteratorBase other) {
364       Iterator iter = cast(Iterator) other;
365       enforce(iter !is null, "Only the same concrete Iterator type may be copied.");
366       _index = iter._index;
367       _iter = iter._iter;
368       _end = iter._end;
369     }
370 
371   protected:
372     override
373     const(S2ShapeIndexCell) getCell() const {
374       enforce(false, "Should never be called");
375       return null;
376     }
377 
378     override
379     IteratorBase clone() {
380       auto iterator = new Iterator();
381       iterator._index = _index;
382       iterator._iter = _iter;
383       iterator._end = _end;
384       iterator.setState(id(), cell());
385       return iterator;
386     }
387 
388   private:
389     // Updates the IteratorBase fields.
390     void refresh() {
391       if (_iter == _end) {
392         setFinished();
393       } else {
394         setState(_iter.getValue().key, _iter.getValue().value);
395       }
396     }
397 
398     MutableS2ShapeIndex _index;
399     CellMap.Iterator _iter;
400     CellMap.Iterator _end;
401   }
402 
403   // Takes ownership of the given shape and adds it to the index.  Also
404   // assigns a unique id to the shape (shape->id()) and returns that id.
405   // Shape ids are assigned sequentially starting from 0 in the order shapes
406   // are added.  Invalidates all iterators and their associated data.
407   int add(S2Shape shape) {
408     // Additions are processed lazily by ApplyUpdates().
409     int id = cast(int) _shapes.length;
410     shape._id = id;
411     _shapes ~= shape;
412     atomicStore!(MemoryOrder.raw)(_indexStatus, IndexStatus.STALE);
413     return id;
414   }
415 
416   /// Removes the given shape from the index and return ownership to the caller.
417   /// Invalidates all iterators and their associated data.
418   S2Shape release(int shapeId)
419   in {
420     assert(_shapes[shapeId] !is null);
421   } out {
422     assert(_shapes[shapeId] is null);
423   } do {
424     // This class updates itself lazily, because it is much more efficient to
425     // process additions and removals in batches.  However this means that when
426     // a shape is removed, we need to make a copy of all its edges, since the
427     // client is free to delete "shape" once this call is finished.
428 
429     S2Shape shape = _shapes[shapeId];
430     _shapes[shapeId] = null;
431     if (shapeId >= _pendingAdditionsBegin) {
432       // We are removing a shape that has not yet been added to the index,
433       // so there is nothing else to do.
434     } else {
435       // We build the new RemovedShape in place, since it includes a potentially
436       // large vector of edges that might be expensive to copy.
437       _pendingRemovals ~= RemovedShape();
438       RemovedShape* removed = &_pendingRemovals[$ - 1];
439       removed.shapeId = shape.id();
440       removed.hasInterior = shape.hasInterior();
441       removed.containsTrackerOrigin = containsBruteForce(shape, interiorTrackerOrigin());
442       int num_edges = shape.numEdges();
443       removed.edges.length = 0;
444       for (int e = 0; e < num_edges; ++e) {
445         removed.edges ~= shape.edge(e);
446       }
447     }
448     atomicStore!(MemoryOrder.raw)(_indexStatus, IndexStatus.STALE);
449     return shape;
450   }
451 
452   private static S2Point interiorTrackerOrigin() {
453     return s2coords.FaceUVtoXYZ(0, -1, -1).normalize();
454   }
455 
456   // Resets the index to its original state and returns ownership of all
457   // shapes to the caller.  This method is much more efficient than removing
458   // all shapes one at a time.
459   S2Shape[] releaseAll()
460   in {
461     assert(_updateState is null);
462   } do {
463     _cellMap.clear();
464     _pendingAdditionsBegin = 0;
465     _pendingRemovals.length = 0;
466     atomicStore!(MemoryOrder.raw)(_indexStatus, IndexStatus.FRESH);
467     S2Shape[] result = _shapes[];
468     _shapes.length = 0;
469     return result;
470   }
471 
472   // Resets the index to its original state and deletes all shapes.  Any
473   // options specified via Init() are preserved.
474   void clear() {
475     releaseAll();
476   }
477 
478   // Returns the number of bytes currently occupied by the index (including any
479   // unused space at the end of vectors, etc). It has the same thread safety
480   // as the other "const" methods (see introduction).
481   override
482   size_t spaceUsed() const {
483     // TODO: Implement correctly when needed.
484     return this.sizeof;
485     /+
486     size_t size = sizeof(*this);
487     size += shapes_.capacity() * sizeof(std::unique_ptr<S2Shape>);
488     // cell_map_ itself is already included in sizeof(*this).
489     size += cell_map_.bytes_used() - sizeof(cell_map_);
490     size += cell_map_.size() * sizeof(S2ShapeIndexCell);
491     Iterator it;
492     for (it.InitStale(this, S2ShapeIndex::BEGIN); !it.done(); it.Next()) {
493       const S2ShapeIndexCell& cell = it.cell();
494       size += cell.shapes_.capacity() * sizeof(S2ClippedShape);
495       for (int s = 0; s < cell.num_clipped(); ++s) {
496         const S2ClippedShape& clipped = cell.clipped(s);
497         if (!clipped.is_inline()) {
498           size += clipped.num_edges() * sizeof(int32);
499         }
500       }
501     }
502     if (pending_removals_ != nullptr) {
503       size += pending_removals_->capacity() * sizeof(RemovedShape);
504     }
505 
506     return size;
507     +/
508   }
509 
510   // Calls to Add() and Release() are normally queued and processed on the
511   // first subsequent query (in a thread-safe way).  This has many advantages,
512   // the most important of which is that sometimes there *is* no subsequent
513   // query, which lets us avoid building the index completely.
514   //
515   // This method forces any pending updates to be applied immediately.
516   // Calling this method is rarely a good idea.  (One valid reason is to
517   // exclude the cost of building the index from benchmark results.)
518   void forceBuild() {
519     // No locks required because this is not a const method.  It is the client's
520     // responsibility to ensure correct thread synchronization.
521     if (atomicLoad!(MemoryOrder.raw)(_indexStatus) != IndexStatus.FRESH) {
522       applyUpdatesInternal();
523       atomicStore!(MemoryOrder.raw)(_indexStatus, IndexStatus.FRESH);
524     }
525   }
526 
527   // Returns true if there are no pending updates that need to be applied.
528   // This can be useful to avoid building the index unnecessarily, or for
529   // choosing between two different algorithms depending on whether the index
530   // is available.
531   //
532   // The returned index status may be slightly out of date if the index was
533   // built in a different thread.  This is fine for the intended use (as an
534   // efficiency hint), but it should not be used by internal methods  (see
535   // MaybeApplyUpdates).
536   bool isFresh() const {
537     return atomicLoad!(MemoryOrder.raw)(_indexStatus) == IndexStatus.FRESH;
538   }
539 
540  protected:
541   override
542   IteratorBase newIterator(InitialPosition pos) {
543     return new Iterator(this, pos);
544   }
545 
546  private:
547   /**
548    * A BatchDescriptor represents a set of pending updates that will be applied
549    * at the same time.  The batch consists of all updates with shape ids between
550    * the current value of "ShapeIndex._pendingAdditionsBegin" (inclusive) and
551    * "additionsEnd" (exclusive).  The first batch to be processed also
552    * implicitly includes all shapes being removed.  "numEdges" is the total
553    * number of edges that will be added or removed in this batch.
554    */
555   struct BatchDescriptor {
556     int additionsEnd;
557     int numEdges;
558   }
559 
560   /**
561    * FaceEdge and ClippedEdge store temporary edge data while the index is being
562    * updated.  FaceEdge represents an edge that has been projected onto a given
563    * face, while ClippedEdge represents the portion of that edge that has been
564    * clipped to a given S2Cell.
565    *
566    * While it would be possible to combine all the edge information into one
567    * structure, there are two good reasons for separating it:
568    *
569    *  - Memory usage.  Separating the two classes means that we only need to
570    *    store one copy of the per-face data no matter how many times an edge is
571    *    subdivided, and it also lets us delay computing bounding boxes until
572    *    they are needed for processing each face (when the dataset spans
573    *    multiple faces).
574    *
575    *  - Performance.  UpdateEdges is significantly faster on large polygons when
576    *    the data is separated, because it often only needs to access the data in
577    *    ClippedEdge and this data is cached more successfully.
578    */
579   struct FaceEdge {
580     int shapeId;        // The shape that this edge belongs to
581     int edgeId;         // Edge id within that shape
582     int maxLevel;       // Not desirable to subdivide this edge beyond this level
583     bool hasInterior;   // Belongs to a shape that has an interior
584     R2Point a, b;       // The edge endpoints, clipped to a given face
585     S2Shape.Edge edge;  // The edge endpoints
586   }
587 
588   struct ClippedEdge {
589     FaceEdge faceEdge;  // The original unclipped edge
590     R2Rect bound;       // Bounding box for the clipped portion
591   }
592 
593   /**
594    * Given a set of shapes, InteriorTracker keeps track of which shapes contain
595    * a particular point (the "focus").  It provides an efficient way to move the
596    * focus from one point to another and incrementally update the set of shapes
597    * which contain it.  We use this to compute which shapes contain the center
598    * of every S2CellId in the index, by advancing the focus from one cell center
599    * to the next.
600    *
601    * Initially the focus is at the start of the S2CellId space-filling curve.
602    * We then visit all the cells that are being added to the MutableS2ShapeIndex
603    * in increasing order of S2CellId.  For each cell, we draw two edges: one
604    * from the entry vertex to the center, and another from the center to the
605    * exit vertex (where "entry" and "exit" refer to the points where the
606    * space-filling curve enters and exits the cell).  By counting edge crossings
607    * we can incrementally compute which shapes contain the cell center.  Note
608    * that the same set of shapes will always contain the exit point of one cell
609    * and the entry point of the next cell in the index, because either (a) these
610    * two points are actually the same, or (b) the intervening cells in S2CellId
611    * order are all empty, and therefore there are no edge crossings if we follow
612    * this path from one cell to the other.
613    */
614   class InteriorTracker {
615   public:
616     /**
617      * Constructs the InteriorTracker.  You must call AddShape() for each shape
618      * that will be tracked before calling MoveTo() or DrawTo().
619      */
620     this() {
621       // As shapes are added, we compute which ones contain the start of the
622       // S2CellId space-filling curve by drawing an edge from S2::Origin() to this
623       // point and counting how many shape edges cross this edge.
624       _isActive = false;
625       _b = origin();
626       _nextS2CellId = S2CellId.begin(S2CellId.MAX_LEVEL);
627       _crosser = new S2EdgeCrosser();
628     }
629 
630     /**
631      * Returns the initial focus point when the InteriorTracker is constructed
632      * (corresponding to the start of the S2CellId space-filling curve).
633      */
634     static S2Point origin() {
635       // The start of the S2CellId space-filling curve.
636       return s2coords.FaceUVtoXYZ(0, -1, -1).normalize();
637     }
638 
639     /// Returns the current focus point (see above).
640     const(S2Point) focus() {
641       return _b;
642     }
643 
644     /// Returns true if any shapes are being tracked.
645     bool isActive() const {
646       return _isActive;
647     }
648 
649     /**
650      * Adds a shape whose interior should be tracked.  "is_inside" indicates
651      * whether the current focus point is inside the shape.  Alternatively, if
652      * the focus point is in the process of being moved (via MoveTo/DrawTo), you
653      * can also specify "is_inside" at the old focus point and call TestEdge()
654      * for every edge of the shape that might cross the current DrawTo() line.
655      * This updates the state to correspond to the new focus point.
656      *
657      * REQUIRES: shape->has_interior()
658      */
659     void addShape(int shape_id, bool contains_focus) {
660       _isActive = true;
661       if (contains_focus) {
662         toggleShape(shape_id);
663       }
664     }
665 
666     /**
667      * Moves the focus to the given point.  This method should only be used when
668      * it is known that there are no edge crossings between the old and new
669      * focus locations; otherwise use DrawTo().
670      */
671     void moveTo(in S2Point b) {
672       _b = b;
673     }
674 
675     /**
676      * Moves the focus to the given point.  After this method is called,
677      * TestEdge() should be called with all edges that may cross the line
678      * segment between the old and new focus locations.
679      */
680     void drawTo(in S2Point b) {
681       _a = _b;
682       _b = b;
683       _crosser.initialize(_a, _b);
684     }
685 
686     // Indicates that the given edge of the given shape may cross the line
687     // segment between the old and new focus locations (see DrawTo).
688     // REQUIRES: shape->has_interior()
689     void testEdge(int shape_id, in S2Shape.Edge edge) {
690       if (_crosser.edgeOrVertexCrossing(edge.v0, edge.v1)) {
691         toggleShape(shape_id);
692       }
693     }
694 
695 
696     // The set of shape ids that contain the current focus.
697     const(ShapeIdSet) shapeIds() const {
698       return _shapeIds;
699     }
700 
701     // Indicates that the last argument to MoveTo() or DrawTo() was the entry
702     // vertex of the given S2CellId, i.e. the tracker is positioned at the start
703     // of this cell.  By using this method together with at_cellid(), the caller
704     // can avoid calling MoveTo() in cases where the exit vertex of the previous
705     // cell is the same as the entry vertex of the current cell.
706     void setNextS2CellId(S2CellId next_cellid) {
707       _nextS2CellId = next_cellid.rangeMin();
708     }
709 
710     // Returns true if the focus is already at the entry vertex of the given
711     // S2CellId (provided that the caller calls set_next_cellid() as each cell
712     // is processed).
713     bool atCellId(S2CellId cellid) const {
714       return cellid.rangeMin() == _nextS2CellId;
715     }
716 
717     // Makes an internal copy of the state for shape ids below the given limit,
718     // and then clear the state for those shapes.  This is used during
719     // incremental updates to track the state of added and removed shapes
720     // separately.
721     void saveAndClearStateBefore(int limit_shape_id)
722     in {
723       assert(_savedIds.length == 0);
724     } do {
725       size_t limit = lowerBound(limit_shape_id);
726       _savedIds = _shapeIds[0 .. limit];
727       _shapeIds = _shapeIds[limit .. $];
728     }
729 
730     // Restores the state previously saved by SaveAndClearStateBefore().  This
731     // only affects the state for shape_ids below "limit_shape_id".
732     void restoreStateBefore(int limit_shape_id) {
733       _shapeIds = _shapeIds[lowerBound(limit_shape_id) .. $];
734       _shapeIds = _savedIds[] ~ _shapeIds[];
735       _savedIds.length = 0;
736     }
737 
738     override
739     string toString() const {
740       import std.conv;
741       return "InteriorTracker(isActive=" ~ _isActive.to!string ~ ", a=" ~ _a.to!string
742           ~ ", b=" ~ _b.to!string ~ ", nextS2CellId=" ~ _nextS2CellId.to!string ~ ")";
743     }
744 
745   private:
746     // Removes "shape_id" from _shapeIds if it exists, otherwise insert it.
747     void toggleShape(int shape_id) {
748       import std.algorithm : remove;
749       // Since shape_ids_.size() is typically *very* small (0, 1, or 2), it turns
750       // out to be significantly faster to maintain a sorted array rather than
751       // using an STL set or btree_set.
752       if (_shapeIds.length == 0) {
753         _shapeIds ~= shape_id;
754       } else if (_shapeIds[0] == shape_id) {
755         _shapeIds = _shapeIds.remove(0);
756       } else {
757         size_t pos = 0;
758         while (_shapeIds[pos] < shape_id) {
759           if (++pos == _shapeIds.length) {
760             _shapeIds ~= shape_id;
761             return;
762           }
763         }
764         if (_shapeIds[pos] == shape_id) {
765           _shapeIds = _shapeIds.remove(pos);
766         } else {
767           _shapeIds.insertInPlace(pos, shape_id);
768         }
769       }
770     }
771 
772     // Returns a pointer to the first entry "x" where x >= shape_id.
773     size_t lowerBound(int shape_id) {
774       size_t pos = 0;
775       while (pos != _shapeIds.length && _shapeIds[pos] < shape_id) {
776         ++pos;
777       }
778       return pos;
779     }
780 
781     bool _isActive;
782     S2Point _a, _b;
783     S2CellId _nextS2CellId;
784     S2EdgeCrosser _crosser;
785     ShapeIdSet _shapeIds;
786 
787     // Shape ids saved by SaveAndClearStateBefore().  The state is never saved
788     // recursively so we don't need to worry about maintaining a stack.
789     ShapeIdSet _savedIds;
790   }
791 
792 
793   /**
794    * EdgeAllocator provides temporary storage for new ClippedEdges that are
795    * created during indexing.  It is essentially a stack model, where edges are
796    * allocated as the recursion does down and freed as it comes back up.
797    *
798    * It also provides a mutable vector of FaceEdges that is used when
799    * incrementally updating the index (see AbsorbIndexCell).
800    */
801   class EdgeAllocator {
802   public:
803 
804     // Return a pointer to a newly allocated edge.  The EdgeAllocator
805     // retains ownership.
806     ref ClippedEdge newClippedEdge() {
807       if (_size == _clippedEdges.length) {
808         _clippedEdges ~= ClippedEdge();
809       }
810       return _clippedEdges[_size++];
811     }
812 
813     // Return the number of allocated edges.
814     size_t size() const {
815       return _size;
816     }
817 
818     // Reset the allocator to only contain the first "size" allocated edges.
819     void reset(size_t size) {
820       _size = size;
821     }
822 
823     FaceEdge[]* mutableFaceEdges() {
824       return &_faceEdges;
825     }
826 
827   private:
828     size_t _size = 0;
829     ClippedEdge[] _clippedEdges;
830 
831     FaceEdge[] _faceEdges;
832   }
833 
834   alias ShapeIdSet = int[];
835 
836   // Return true if this is the first update to the index.
837   bool isFirstUpdate() const {
838     // Note that it is not sufficient to check whether cell_map_ is empty, since
839     // entries are added during the update process.
840     return _pendingAdditionsBegin == 0;
841   }
842 
843   // Given that the given shape is being updated, return true if it is being
844   // removed (as opposed to being added).
845   bool isShapeBeingRemoved(int shape_id) const {
846     // All shape ids being removed are less than all shape ids being added.
847     return shape_id < _pendingAdditionsBegin;
848   }
849 
850   // Ensure that any pending updates have been applied.  This method must be
851   // called before accessing the cell_map_ field, even if the index_status_
852   // appears to be FRESH, because a memory barrier is required in order to
853   // ensure that all the index updates are visible if the updates were done in
854   // another thread.
855   void maybeApplyUpdates() {
856     // To avoid acquiring and releasing the spinlock on every query, we use
857     // atomic operations when testing whether the status is FRESH and when
858     // updating the status to be FRESH.  This guarantees that any thread that
859     // sees a status of FRESH will also see the corresponding index updates.
860     if (atomicLoad!(MemoryOrder.acq)(_indexStatus) != IndexStatus.FRESH) {
861       this.applyUpdatesThreadSafe();
862     }
863   }
864 
865   // Apply any pending updates in a thread-safe way.
866   void applyUpdatesThreadSafe() {
867     _lock.lock();
868     if (atomicLoad!(MemoryOrder.raw)(_indexStatus) == IndexStatus.FRESH) {
869       _lock.unlock();
870     } else if (atomicLoad!(MemoryOrder.raw)(_indexStatus) == IndexStatus.UPDATING) {
871       // Wait until the updating thread is finished.  We do this by attempting
872       // to lock a mutex that is held by the updating thread.  When this mutex
873       // is unlocked the index_status_ is guaranteed to be FRESH.
874       _updateState.numWaiting++;
875       _lock.unlock();
876       _updateState.waitMutex.lock();
877       _lock.lock();
878       _updateState.numWaiting--;
879       unlockAndSignal();  // Notify other waiting threads.
880     } else {
881       enforce(_indexStatus == IndexStatus.STALE);
882       atomicStore!(MemoryOrder.raw)(_indexStatus, IndexStatus.UPDATING);
883       // Allocate the extra state needed for thread synchronization.  We keep
884       // the spinlock held while doing this, because (1) memory allocation is
885       // fast, so the chance of a context switch while holding the lock is low;
886       // (2) by far the most common situation is that there is no contention,
887       // and this saves an extra lock and unlock step; (3) even in the rare case
888       // where there is contention, the main side effect is that some other
889       // thread will burn a few CPU cycles rather than sleeping.
890       _updateState = new UpdateState();
891       // lock_.Lock wait_mutex *before* calling Unlock() to ensure that all other
892       // threads will block on it.
893       _updateState.waitMutex.lock();
894       // Release the spinlock before doing any real work.
895       _lock.unlock();
896       applyUpdatesInternal();
897       _lock.lock();
898       // index_status_ can be updated to FRESH only while locked *and* using
899       // an atomic store operation, so that MaybeApplyUpdates() can check
900       // whether the index is FRESH without acquiring the spinlock.
901       atomicStore!(MemoryOrder.rel)(_indexStatus, IndexStatus.FRESH);
902       unlockAndSignal();  // Notify any waiting threads.
903     }
904   }
905 
906   // This method updates the index by applying all pending additions and
907   // removals.  It does *not* update index_status_ (see ApplyUpdatesThreadSafe).
908   void applyUpdatesInternal() {
909     // Check whether we have so many edges to process that we should process
910     // them in multiple batches to save memory.  Building the index can use up
911     // to 20x as much memory (per edge) as the final index size.
912     BatchDescriptor[] batches;
913     getUpdateBatches(batches);
914     int i = 0;
915     foreach (ref BatchDescriptor batch; batches) {
916       FaceEdge[][6] all_edges;
917       logger.logfDebug("Batch %d: shape_limit=%d, edges=%d\n",
918           i++, batch.additionsEnd, batch.numEdges);
919 
920       reserveSpace(batch, all_edges);
921       InteriorTracker tracker = new InteriorTracker();
922       if (_pendingRemovals) {
923         // The first batch implicitly includes all shapes being removed.
924         foreach (ref pending_removal; _pendingRemovals) {
925           removeShape(pending_removal, all_edges, tracker);
926         }
927         _pendingRemovals.length = 0;
928       }
929       for (int id = _pendingAdditionsBegin; id < batch.additionsEnd; ++id) {
930         addShape(id, all_edges, tracker);
931       }
932       for (int face = 0; face < 6; ++face) {
933         updateFaceEdges(face, all_edges[face], tracker);
934         // Save memory by clearing vectors after we are done with them.
935         foreach(edges; all_edges) {
936           edges.length = 0;
937         }
938       }
939       _pendingAdditionsBegin = batch.additionsEnd;
940     }
941     // It is the caller's responsibility to update index_status_.
942   }
943 
944   // Count the number of edges being updated, and break them into several
945   // batches if necessary to reduce the amount of memory needed.  (See the
946   // documentation for FLAGS_s2shape_index_tmp_memory_budget_mb.)
947   void getUpdateBatches(ref BatchDescriptor[] batches) const {
948     // Count the edges being removed and added.
949     int num_edges_removed = 0;
950     if (_pendingRemovals) {
951       foreach (pending_removal; _pendingRemovals) {
952         num_edges_removed += pending_removal.edges.length;
953       }
954     }
955     int num_edges_added = 0;
956     for (int id = _pendingAdditionsBegin; id < _shapes.length; ++id) {
957       const(S2Shape) shape = this.shape(id);
958       if (shape is null) continue;
959       num_edges_added += shape.numEdges();
960     }
961     int num_edges = num_edges_removed + num_edges_added;
962 
963     // The following memory estimates are based on heap profiling.
964     //
965     // The final size of a MutableS2ShapeIndex depends mainly on how finely the
966     // index is subdivided, as controlled by Options::max_edges_per_cell() and
967     // --s2shape_index_default_max_edges_per_cell. For realistic values of
968     // max_edges_per_cell() and shapes with moderate numbers of edges, it is
969     // difficult to get much below 8 bytes per edge.  [The minimum possible size
970     // is 4 bytes per edge (to store a 32-bit edge id in an S2ClippedShape) plus
971     // 24 bytes per shape (for the S2ClippedShape itself plus a pointer in the
972     // shapes_ vector.]
973     //
974     // The temporary memory consists mainly of the FaceEdge and ClippedEdge
975     // structures plus a ClippedEdge pointer for every level of recursive
976     // subdivision.  For very large indexes this can be 200 bytes per edge.
977     const size_t kFinalBytesPerEdge = 8;
978     const size_t kTmpBytesPerEdge = 200;
979     const size_t kTmpMemoryBudgetBytes = cast(size_t)(S2SHAPE_INDEX_TMP_MEMORY_BUDGET_MB) << 20;
980 
981     // We arbitrarily limit the number of batches just as a safety measure.
982     // With the current default memory budget of 100 MB, this limit is not
983     // reached even when building an index of 350 million edges.
984     const int kMaxUpdateBatches = 100;
985 
986     if (num_edges * kTmpBytesPerEdge <= kTmpMemoryBudgetBytes) {
987       // We can update all edges at once without exceeding kTmpMemoryBudgetBytes.
988       batches ~= BatchDescriptor(cast(int) _shapes.length, num_edges);
989       return;
990     }
991     // Otherwise, break the updates into up to several batches, where the size
992     // of each batch is chosen so that all batches use approximately the same
993     // high-water memory.  GetBatchSizes() returns the recommended number of
994     // edges in each batch.
995     int[] batch_sizes;
996     getBatchSizes(num_edges, kMaxUpdateBatches, kFinalBytesPerEdge,
997         kTmpBytesPerEdge, kTmpMemoryBudgetBytes, batch_sizes);
998 
999     // We always process removed edges in a single batch, since (1) they already
1000     // take up a lot of memory because we have copied all their edges, and (2)
1001     // AbsorbIndexCell() uses (shapes_[id] == nullptr) to detect when a shape is
1002     // being removed, so in order to split the removals into batches we would
1003     // need a different approach (e.g., temporarily add fake entries to shapes_
1004     // and restore them back to nullptr as shapes are actually removed).
1005     num_edges = 0;
1006     if (_pendingRemovals) {
1007       num_edges += num_edges_removed;
1008       if (num_edges >= batch_sizes[0]) {
1009         batches ~= BatchDescriptor(_pendingAdditionsBegin, num_edges);
1010         num_edges = 0;
1011       }
1012     }
1013     // Keep adding shapes to each batch until the recommended number of edges
1014     // for that batch is reached, then move on to the next batch.
1015     for (int id = _pendingAdditionsBegin; id < _shapes.length; ++id) {
1016       const(S2Shape) shape = this.shape(id);
1017       if (shape is null) continue;
1018       num_edges += shape.numEdges();
1019       if (num_edges >= batch_sizes[batches.length]) {
1020         batches ~= BatchDescriptor(id + 1, num_edges);
1021         num_edges = 0;
1022       }
1023     }
1024     // Some shapes have no edges.  If a shape with no edges is the last shape to
1025     // be added or removed, then the final batch may not include it, so we fix
1026     // that problem here.
1027     batches[$-1].additionsEnd = cast(int) _shapes.length;
1028     enforce(batches.length <= kMaxUpdateBatches);
1029   }
1030 
1031   // Given "num_items" items, each of which uses "tmp_bytes_per_item" while it
1032   // is being updated but only "final_bytes_per_item" in the end, divide the
1033   // items into batches that have approximately the same *total* memory usage
1034   // consisting of the temporary memory needed for the items in the current
1035   // batch plus the final size of all the items that have already been
1036   // processed.  Use the fewest number of batches (but never more than
1037   // "max_batches") such that the total memory usage does not exceed the
1038   // combined final size of all the items plus "tmp_memory_budget_bytes".
1039   static void getBatchSizes(int num_items, int max_batches,
1040       double final_bytes_per_item,
1041       double tmp_bytes_per_item,
1042       double tmp_memory_budget_bytes,
1043       ref int[] batch_sizes) {
1044     import std.algorithm : min, max;
1045     import std.math : pow;
1046     // This code tries to fit all the data into the same memory space
1047     // ("total_budget_bytes") at every iteration.  The data consists of some
1048     // number of processed items (at "final_bytes_per_item" each), plus some
1049     // number being updated (at "tmp_bytes_per_item" each).  The space occupied
1050     // by the items being updated is the "free space".  At each iteration, the
1051     // free space is multiplied by (1 - final_bytes_per_item/tmp_bytes_per_item)
1052     // as the items are converted into their final form.
1053     double final_bytes = num_items * final_bytes_per_item;
1054     double final_bytes_ratio = final_bytes_per_item / tmp_bytes_per_item;
1055     double free_space_multiplier = 1 - final_bytes_ratio;
1056 
1057     // The total memory budget is the greater of the final size plus the allowed
1058     // temporary memory, or the minimum amount of memory required to limit the
1059     // number of batches to "max_batches".
1060     double total_budget_bytes = max(
1061         final_bytes + tmp_memory_budget_bytes,
1062         final_bytes / (1 - pow(free_space_multiplier, max_batches)));
1063 
1064     // "max_batch_items" is the number of items in the current batch.
1065     double max_batch_items = total_budget_bytes / tmp_bytes_per_item;
1066     batch_sizes.length = 0;
1067     for (int i = 0; i + 1 < max_batches && num_items > 0; ++i) {
1068       int batch_items = min(num_items, cast(int)(max_batch_items + 1));
1069       batch_sizes ~= batch_items;
1070       num_items -= batch_items;
1071       max_batch_items *= free_space_multiplier;
1072     }
1073     enforce(batch_sizes.length <= max_batches);
1074   }
1075 
1076   // Reserve an appropriate amount of space for the top-level face edges in the
1077   // current batch.  This data structure uses about half of the temporary memory
1078   // needed during index construction.  Furthermore, if the arrays are grown via
1079   // push_back() then up to 10% of the total run time consists of copying data
1080   // as these arrays grow, so it is worthwhile to preallocate space for them.
1081   void reserveSpace(BatchDescriptor batch, ref FaceEdge[][6] all_edges) const {
1082     import std.algorithm : max;
1083     // If the number of edges is relatively small, then the fastest approach is
1084     // to simply reserve space on every face for the maximum possible number of
1085     // edges.  We use a different threshold for this calculation than for
1086     // deciding when to break updates into batches, because the cost/benefit
1087     // ratio is different.  (Here the only extra expense is that we need to
1088     // sample the edges to estimate how many edges per face there are.)
1089     const size_t kMaxCheapBytes = 30 << 20;  // 30 MB
1090     const int kMaxCheapEdges = kMaxCheapBytes / (6 * FaceEdge.sizeof);
1091     if (batch.numEdges <= kMaxCheapEdges) {
1092       for (int face = 0; face < 6; ++face) {
1093         all_edges[face].reserve(batch.numEdges);
1094       }
1095       return;
1096     }
1097     // Otherwise we estimate the number of edges on each face by taking a random
1098     // sample.  The goal is to come up with an estimate that is fast and
1099     // accurate for non-pathological geometry.  If our estimates happen to be
1100     // wrong, the vector will still grow automatically - the main side effects
1101     // are that memory usage will be larger (by up to a factor of 3), and
1102     // constructing the index will be about 10% slower.
1103     //
1104     // Given a desired sample size, we choose equally spaced edges from
1105     // throughout the entire data set.  We use a Bresenham-type algorithm to
1106     // choose the samples.
1107     const int kDesiredSampleSize = 10000;
1108     const int sample_interval = max(1, batch.numEdges / kDesiredSampleSize);
1109 
1110     // Initialize "edge_id" to be midway through the first sample interval.
1111     // Because samples are equally spaced the actual sample size may differ
1112     // slightly from the desired sample size.
1113     int edge_id = sample_interval / 2;
1114     const int actual_sample_size = (batch.numEdges + edge_id) / sample_interval;
1115     int[6] face_count = [ 0, 0, 0, 0, 0, 0 ];
1116     if (_pendingRemovals) {
1117       foreach (removed; _pendingRemovals) {
1118         edge_id += removed.edges.length;
1119         while (edge_id >= sample_interval) {
1120           edge_id -= sample_interval;
1121           face_count[s2coords.GetFace(removed.edges[edge_id].v0)] += 1;
1122         }
1123       }
1124     }
1125     for (int id = _pendingAdditionsBegin; id < batch.additionsEnd; ++id) {
1126       const(S2Shape) shape = this.shape(id);
1127       if (shape is null) continue;
1128       edge_id += shape.numEdges();
1129       while (edge_id >= sample_interval) {
1130         edge_id -= sample_interval;
1131         // For speed, we only count the face containing one endpoint of the
1132         // edge.  In general the edge could span all 6 faces (with padding), but
1133         // it's not worth the expense to compute this more accurately.
1134         face_count[s2coords.GetFace(shape.edge(edge_id).v0)] += 1;
1135       }
1136     }
1137     // Now given the raw face counts, compute a confidence interval such that we
1138     // will be unlikely to allocate too little space.  Computing accurate
1139     // binomial confidence intervals is expensive and not really necessary.
1140     // Instead we use a simple approximation:
1141     //  - For any face with at least 1 sample, we use at least a 4-sigma
1142     //    confidence interval.  (The chosen width is adequate for the worst case
1143     //    accuracy, which occurs when the face contains approximately 50% of the
1144     //    edges.)  Assuming that our sample is representative, the probability of
1145     //    reserving too little space is approximately 1 in 30,000.
1146     //  - For faces with no samples at all, we don't bother reserving space.
1147     //    It is quite likely that such faces are truly empty, so we save time
1148     //    and memory this way.  If the face does contain some edges, there will
1149     //    only be a few so it is fine to let the vector grow automatically.
1150     // On average, we reserve 2% extra space for each face that has geometry.
1151 
1152     // kMaxSemiWidth is the maximum semi-width over all probabilities p of a
1153     // 4-sigma binomial confidence interval with a sample size of 10,000.
1154     const double kMaxSemiWidth = 0.02;
1155     const double sample_ratio = 1.0 / actual_sample_size;
1156     for (int face = 0; face < 6; ++face) {
1157       if (face_count[face] == 0) continue;
1158       double fraction = sample_ratio * face_count[face] + kMaxSemiWidth;
1159       all_edges[face].reserve(cast(int)(fraction * batch.numEdges));
1160     }
1161   }
1162 
1163   // Clip all edges of the given shape to the six cube faces, add the clipped
1164   // edges to "all_edges", and start tracking its interior if necessary.
1165   void addShape(int id, ref FaceEdge[][6] all_edges, InteriorTracker tracker) const {
1166     const S2Shape shape = this.shape(id);
1167     if (shape is null) {
1168       return;  // This shape has already been removed.
1169     }
1170     // Construct a template for the edges to be added.
1171     FaceEdge edge;
1172     edge.shapeId = id;
1173     edge.hasInterior = shape.hasInterior();
1174     if (edge.hasInterior) {
1175       tracker.addShape(id, containsBruteForce(shape, tracker.focus()));
1176     }
1177     int num_edges = shape.numEdges();
1178     for (int e = 0; e < num_edges; ++e) {
1179       edge.edgeId = e;
1180       edge.edge = shape.edge(e);
1181       edge.maxLevel = getEdgeMaxLevel(edge.edge);
1182       addFaceEdge(edge, all_edges);
1183     }
1184   }
1185 
1186   void removeShape(
1187       in RemovedShape removed, ref FaceEdge[][6] all_edges, InteriorTracker tracker) const {
1188     FaceEdge edge;
1189     edge.edgeId = -1;  // Not used or needed for removed edges.
1190     edge.shapeId = removed.shapeId;
1191     edge.hasInterior = removed.hasInterior;
1192     if (edge.hasInterior) {
1193       tracker.addShape(edge.shapeId, removed.containsTrackerOrigin);
1194     }
1195     foreach (removed_edge; removed.edges) {
1196       edge.edge = removed_edge;
1197       edge.maxLevel = getEdgeMaxLevel(edge.edge);
1198       addFaceEdge(edge, all_edges);
1199     }
1200   }
1201 
1202   void addFaceEdge(ref FaceEdge edge, ref FaceEdge[][6] all_edges) const {
1203     import s2.s2edge_clipping : clipToPaddedFace;
1204     import std.math : fabs;
1205     // Fast path: both endpoints are on the same face, and are far enough from
1206     // the edge of the face that don't intersect any (padded) adjacent face.
1207     int a_face = s2coords.GetFace(edge.edge.v0);
1208     if (a_face == s2coords.GetFace(edge.edge.v1)) {
1209       s2coords.ValidFaceXYZtoUV(a_face, edge.edge.v0, edge.a);
1210       s2coords.ValidFaceXYZtoUV(a_face, edge.edge.v1, edge.b);
1211       const double kMaxUV = 1 - CELL_PADDING;
1212       if (fabs(edge.a[0]) <= kMaxUV && fabs(edge.a[1]) <= kMaxUV
1213           && fabs(edge.b[0]) <= kMaxUV && fabs(edge.b[1]) <= kMaxUV) {
1214         all_edges[a_face] ~= edge;
1215         return;
1216       }
1217     }
1218     // Otherwise we simply clip the edge to all six faces.
1219     for (int face = 0; face < 6; ++face) {
1220       if (clipToPaddedFace(edge.edge.v0, edge.edge.v1, face,
1221               CELL_PADDING, edge.a, edge.b)) {
1222         all_edges[face] ~= edge;
1223       }
1224     }
1225   }
1226 
1227   // Given a face and a vector of edges that intersect that face, add or remove
1228   // all the edges from the index.  (An edge is added if shapes_[id] is not
1229   // nullptr, and removed otherwise.)
1230   void updateFaceEdges(int face, in FaceEdge[] face_edges, InteriorTracker tracker) {
1231     int num_edges = cast(int) face_edges.length;
1232     if (num_edges == 0 && tracker.shapeIds.length == 0) return;
1233 
1234     // Create the initial ClippedEdge for each FaceEdge.  Additional clipped
1235     // edges are created when edges are split between child cells.  We create
1236     // two arrays, one containing the edge data and another containing pointers
1237     // to those edges, so that during the recursion we only need to copy
1238     // pointers in order to propagate an edge to the correct child.
1239     ClippedEdge[] clipped_edge_storage;
1240     ClippedEdge[] clipped_edges;
1241     clipped_edge_storage.reserve(num_edges);
1242     clipped_edges.reserve(num_edges);
1243     R2Rect bound = R2Rect.empty();
1244     for (int e = 0; e < num_edges; ++e) {
1245       ClippedEdge clipped;
1246       clipped.faceEdge = face_edges[e];
1247       clipped.bound = R2Rect.fromPointPair(face_edges[e].a, face_edges[e].b);
1248       clipped_edge_storage ~= clipped;
1249       clipped_edges ~= clipped_edge_storage[$-1];
1250       bound.addRect(clipped.bound);
1251     }
1252     // Construct the initial face cell containing all the edges, and then update
1253     // all the edges in the index recursively.
1254     EdgeAllocator alloc = new EdgeAllocator();
1255     S2CellId face_id = S2CellId.fromFace(face);
1256     S2PaddedCell pcell = new S2PaddedCell(face_id, CELL_PADDING);
1257 
1258     // "disjoint_from_index" means that the current cell being processed (and
1259     // all its descendants) are not already present in the index.
1260     bool disjoint_from_index = isFirstUpdate();
1261     if (num_edges > 0) {
1262       S2CellId shrunk_id = shrinkToFit(pcell, bound);
1263       if (shrunk_id != pcell.id()) {
1264         // All the edges are contained by some descendant of the face cell.  We
1265         // can save a lot of work by starting directly with that cell, but if we
1266         // are in the interior of at least one shape then we need to create
1267         // index entries for the cells we are skipping over.
1268         skipCellRange(face_id.rangeMin(), shrunk_id.rangeMin(),
1269             tracker, alloc, disjoint_from_index);
1270         pcell = new S2PaddedCell(shrunk_id, CELL_PADDING);
1271         updateEdges(pcell, clipped_edges, tracker, alloc, disjoint_from_index);
1272         skipCellRange(shrunk_id.rangeMax().next(), face_id.rangeMax().next(),
1273             tracker, alloc, disjoint_from_index);
1274         return;
1275       }
1276     }
1277     // Otherwise (no edges, or no shrinking is possible), subdivide normally.
1278     updateEdges(pcell, clipped_edges, tracker, alloc, disjoint_from_index);
1279   }
1280 
1281   S2CellId shrinkToFit(in S2PaddedCell pcell, in R2Rect bound) {
1282     S2CellId shrunk_id = pcell.shrinkToFit(bound);
1283     if (!isFirstUpdate() && shrunk_id != pcell.id()) {
1284       // Don't shrink any smaller than the existing index cells, since we need
1285       // to combine the new edges with those cells.
1286       // Use InitStale() to avoid applying updated recursively.
1287       Iterator iter = new Iterator();
1288       iter.initStale(this);
1289       CellRelation r = iter.locate(shrunk_id);
1290       if (r == CellRelation.INDEXED) { shrunk_id = iter.id(); }
1291     }
1292     return shrunk_id;
1293   }
1294 
1295   // Skip over the cells in the given range, creating index cells if we are
1296   // currently in the interior of at least one shape.
1297   void skipCellRange(
1298       S2CellId begin, S2CellId end, InteriorTracker tracker, EdgeAllocator alloc,
1299       bool disjoint_from_index) {
1300     import s2.s2cell_union;
1301     // If we aren't in the interior of a shape, then skipping over cells is easy.
1302     if (tracker.shapeIds().empty()) return;
1303 
1304     // Otherwise generate the list of cell ids that we need to visit, and create
1305     // an index entry for each one.
1306     foreach (S2CellId skipped_id; S2CellUnion.fromBeginEnd(begin, end).cellIds()) {
1307       ClippedEdge[] clipped_edges;
1308       updateEdges(new S2PaddedCell(skipped_id, CELL_PADDING),
1309           clipped_edges, tracker, alloc, disjoint_from_index);
1310     }
1311   }
1312 
1313   // Given a cell and a set of ClippedEdges whose bounding boxes intersect that
1314   // cell, add or remove all the edges from the index.  Temporary space for
1315   // edges that need to be subdivided is allocated from the given EdgeAllocator.
1316   // "disjoint_from_index" is an optimization hint indicating that cell_map_
1317   // does not contain any entries that overlap the given cell.
1318   void updateEdges(S2PaddedCell pcell, ref ClippedEdge[] edges,
1319       InteriorTracker tracker,
1320       EdgeAllocator alloc,
1321       bool disjoint_from_index)
1322   in {
1323     // Cases where an index cell is not needed should be detected before this.
1324     assert(edges.length != 0 || tracker.shapeIds().length != 0);
1325   } do {
1326     // This function is recursive with a maximum recursion depth of 30
1327     // (S2CellId::kMaxLevel).  Note that using an explicit stack does not seem
1328     // to be any faster based on profiling.
1329 
1330     // Incremental updates are handled as follows.  All edges being added or
1331     // removed are combined together in "edges", and all shapes with interiors
1332     // are tracked using "tracker".  We subdivide recursively as usual until we
1333     // encounter an existing index cell.  At this point we "absorb" the index
1334     // cell as follows:
1335     //
1336     //   - Edges and shapes that are being removed are deleted from "edges" and
1337     //     "tracker".
1338     //   - All remaining edges and shapes from the index cell are added to
1339     //     "edges" and "tracker".
1340     //   - Continue subdividing recursively, creating new index cells as needed.
1341     //   - When the recursion gets back to the cell that was absorbed, we
1342     //     restore "edges" and "tracker" to their previous state.
1343     //
1344     // Note that the only reason that we include removed shapes in the recursive
1345     // subdivision process is so that we can find all of the index cells that
1346     // contain those shapes efficiently, without maintaining an explicit list of
1347     // index cells for each shape (which would be expensive in terms of memory).
1348     bool index_cell_absorbed = false;
1349     if (!disjoint_from_index) {
1350       // There may be existing index cells contained inside "pcell".  If we
1351       // encounter such a cell, we need to combine the edges being updated with
1352       // the existing cell contents by "absorbing" the cell.
1353       // Use InitStale() to avoid applying updated recursively.
1354       Iterator iter = new Iterator();
1355       iter.initStale(this);
1356       CellRelation r = iter.locate(pcell.id());
1357       if (r == CellRelation.DISJOINT) {
1358         disjoint_from_index = true;
1359       } else if (r == CellRelation.INDEXED) {
1360         // Absorb the index cell by transferring its contents to "edges" and
1361         // deleting it.  We also start tracking the interior of any new shapes.
1362         absorbIndexCell(pcell, iter, edges, tracker, alloc);
1363         index_cell_absorbed = true;
1364         disjoint_from_index = true;
1365       } else {
1366         enforce(r == CellRelation.SUBDIVIDED);
1367       }
1368     }
1369 
1370     // If there are existing index cells below us, then we need to keep
1371     // subdividing so that we can merge with those cells.  Otherwise,
1372     // MakeIndexCell checks if the number of edges is small enough, and creates
1373     // an index cell if possible (returning true when it does so).
1374     if (!disjoint_from_index || !makeIndexCell(pcell, edges, tracker)) {
1375       // Reserve space for the edges that will be passed to each child.  This is
1376       // important since otherwise the running time is dominated by the time
1377       // required to grow the vectors.  The amount of memory involved is
1378       // relatively small, so we simply reserve the maximum space for every child.
1379       ClippedEdge[][2][2] child_edges;  // [i][j]
1380       int num_edges = cast(int) edges.length;
1381       for (int i = 0; i < 2; ++i) {
1382         for (int j = 0; j < 2; ++j) {
1383           child_edges[i][j].reserve(num_edges);
1384         }
1385       }
1386 
1387       // Remember the current size of the EdgeAllocator so that we can free any
1388       // edges that are allocated during edge splitting.
1389       size_t alloc_size = alloc.size();
1390 
1391       // Compute the middle of the padded cell, defined as the rectangle in
1392       // (u,v)-space that belongs to all four (padded) children.  By comparing
1393       // against the four boundaries of "middle" we can determine which children
1394       // each edge needs to be propagated to.
1395       const R2Rect middle = pcell.middle();
1396 
1397       // Build up a vector edges to be passed to each child cell.  The (i,j)
1398       // directions are left (i=0), right (i=1), lower (j=0), and upper (j=1).
1399       // Note that the vast majority of edges are propagated to a single child.
1400       // This case is very fast, consisting of between 2 and 4 floating-point
1401       // comparisons and copying one pointer.  (ClipVAxis is inline.)
1402       for (int e = 0; e < num_edges; ++e) {
1403         const ClippedEdge edge = edges[e];
1404         if (edge.bound[0].hi() <= middle[0].lo()) {
1405           // Edge is entirely contained in the two left children.
1406           clipVAxis(edge, middle[1], child_edges[0], alloc);
1407         } else if (edge.bound[0].lo() >= middle[0].hi()) {
1408           // Edge is entirely contained in the two right children.
1409           clipVAxis(edge, middle[1], child_edges[1], alloc);
1410         } else if (edge.bound[1].hi() <= middle[1].lo()) {
1411           // Edge is entirely contained in the two lower children.
1412           child_edges[0][0] ~= clipUBound(edge, 1, middle[0].hi(), alloc);
1413           child_edges[1][0] ~= clipUBound(edge, 0, middle[0].lo(), alloc);
1414         } else if (edge.bound[1].lo() >= middle[1].hi()) {
1415           // Edge is entirely contained in the two upper children.
1416           child_edges[0][1] ~= clipUBound(edge, 1, middle[0].hi(), alloc);
1417           child_edges[1][1] ~= clipUBound(edge, 0, middle[0].lo(), alloc);
1418         } else {
1419           // The edge bound spans all four children.  The edge itself intersects
1420           // either three or four (padded) children.
1421           ClippedEdge left = clipUBound(edge, 1, middle[0].hi(), alloc);
1422           clipVAxis(left, middle[1], child_edges[0], alloc);
1423           ClippedEdge right = clipUBound(edge, 0, middle[0].lo(), alloc);
1424           clipVAxis(right, middle[1], child_edges[1], alloc);
1425         }
1426       }
1427 
1428       // Now recursively update the edges in each child.  We call the children in
1429       // increasing order of S2CellId so that when the index is first constructed,
1430       // all insertions into cell_map_ are at the end (which is much faster).
1431       for (int pos = 0; pos < 4; ++pos) {
1432         int i, j;
1433         pcell.getChildIJ(pos, i, j);
1434         if (child_edges[i][j].length != 0 || tracker.shapeIds().length != 0) {
1435           updateEdges(new S2PaddedCell(pcell, i, j), child_edges[i][j],
1436               tracker, alloc, disjoint_from_index);
1437         }
1438       }
1439       // Free any temporary edges that were allocated during clipping.
1440       alloc.reset(alloc_size);
1441     }
1442     if (index_cell_absorbed) {
1443       // Restore the state for any edges being removed that we are tracking.
1444       tracker.restoreStateBefore(_pendingAdditionsBegin);
1445     }
1446   }
1447 
1448   // Absorb an index cell by transferring its contents to "edges" and/or
1449   // "tracker", and then delete this cell from the index.  If "edges" includes
1450   // any edges that are being removed, this method also updates their
1451   // InteriorTracker state to correspond to the exit vertex of this cell, and
1452   // saves the InteriorTracker state by calling SaveAndClearStateBefore().  It
1453   // is the caller's responsibility to restore this state by calling
1454   // RestoreStateBefore() when processing of this cell is finished.
1455   void absorbIndexCell(in S2PaddedCell pcell, in Iterator iter, ref ClippedEdge[] edges,
1456       InteriorTracker tracker, EdgeAllocator alloc)
1457   in {
1458     assert(pcell.id() == iter.id());
1459   } do {
1460     import s2.s2edge_clipping : clipToPaddedFace;
1461     // When we absorb a cell, we erase all the edges that are being removed.
1462     // However when we are finished with this cell, we want to restore the state
1463     // of those edges (since that is how we find all the index cells that need
1464     // to be updated).  The edges themselves are restored automatically when
1465     // UpdateEdges returns from its recursive call, but the InteriorTracker
1466     // state needs to be restored explicitly.
1467     //
1468     // Here we first update the InteriorTracker state for removed edges to
1469     // correspond to the exit vertex of this cell, and then save the
1470     // InteriorTracker state.  This state will be restored by UpdateEdges when
1471     // it is finished processing the contents of this cell.
1472     if (tracker.isActive() && edges.length != 0 &&
1473         isShapeBeingRemoved(edges[0].faceEdge.shapeId)) {
1474       // We probably need to update the InteriorTracker.  ("Probably" because
1475       // it's possible that all shapes being removed do not have interiors.)
1476       if (!tracker.atCellId(pcell.id())) {
1477         tracker.moveTo(pcell.getEntryVertex());
1478       }
1479       tracker.drawTo(pcell.getExitVertex());
1480       tracker.setNextS2CellId(pcell.id().next());
1481       foreach (ref const ClippedEdge edge; edges) {
1482         const FaceEdge face_edge = edge.faceEdge;
1483         if (!isShapeBeingRemoved(face_edge.shapeId)) {
1484           break;  // All shapes being removed come first.
1485         }
1486         if (face_edge.hasInterior) {
1487           tracker.testEdge(face_edge.shapeId, face_edge.edge);
1488         }
1489       }
1490     }
1491     // Save the state of the edges being removed, so that it can be restored
1492     // when we are finished processing this cell and its children.  We don't
1493     // need to save the state of the edges being added because they aren't being
1494     // removed from "edges" and will therefore be updated normally as we visit
1495     // this cell and its children.
1496     tracker.saveAndClearStateBefore(_pendingAdditionsBegin);
1497 
1498     // Create a FaceEdge for each edge in this cell that isn't being removed.
1499     FaceEdge[]* face_edges = alloc.mutableFaceEdges();
1500     face_edges.length = 0;
1501     bool tracker_moved = false;
1502     const S2ShapeIndexCell cell = iter.cell();
1503     for (int s = 0; s < cell.numClipped(); ++s) {
1504       const S2ClippedShape clipped = cell.clipped(s);
1505       int shape_id = clipped.shapeId();
1506       const S2Shape shape = this.shape(shape_id);
1507       if (shape is null) continue;  // This shape is being removed.
1508       int num_edges = clipped.numEdges();
1509 
1510       // If this shape has an interior, start tracking whether we are inside the
1511       // shape.  UpdateEdges() wants to know whether the entry vertex of this
1512       // cell is inside the shape, but we only know whether the center of the
1513       // cell is inside the shape, so we need to test all the edges against the
1514       // line segment from the cell center to the entry vertex.
1515       FaceEdge edge;
1516       edge.shapeId = shape.id();
1517       edge.hasInterior = shape.hasInterior();
1518       if (edge.hasInterior) {
1519         tracker.addShape(shape_id, clipped.containsCenter());
1520         // There might not be any edges in this entire cell (i.e., it might be
1521         // in the interior of all shapes), so we delay updating the tracker
1522         // until we see the first edge.
1523         if (!tracker_moved && num_edges > 0) {
1524           tracker.moveTo(pcell.getCenter());
1525           tracker.drawTo(pcell.getEntryVertex());
1526           tracker.setNextS2CellId(pcell.id());
1527           tracker_moved = true;
1528         }
1529       }
1530       for (int i = 0; i < num_edges; ++i) {
1531         int e = clipped.edge(i);
1532         edge.edgeId = e;
1533         edge.edge = shape.edge(e);
1534         edge.maxLevel = getEdgeMaxLevel(edge.edge);
1535         if (edge.hasInterior) tracker.testEdge(shape_id, edge.edge);
1536         if (!clipToPaddedFace(edge.edge.v0, edge.edge.v1,
1537                 pcell.id().face(), CELL_PADDING,
1538                 edge.a, edge.b)) {
1539           enforce(false, "Invariant failure in MutableS2ShapeIndex");
1540 
1541         }
1542         *face_edges ~= edge;
1543       }
1544     }
1545     // Now create a ClippedEdge for each FaceEdge, and put them in "new_edges".
1546     ClippedEdge[] new_edges;
1547     foreach (const FaceEdge face_edge; *face_edges) {
1548       ClippedEdge clipped = alloc.newClippedEdge();
1549       clipped.faceEdge = face_edge;
1550       clipped.bound = getClippedEdgeBound(face_edge.a, face_edge.b,
1551           pcell.bound());
1552       new_edges ~= clipped;
1553     }
1554     // Discard any edges from "edges" that are being removed, and append the
1555     // remainder to "new_edges".  (This keeps the edges sorted by shape id.)
1556     for (int i = 0; i < edges.length; ++i) {
1557       ClippedEdge clipped = edges[i];
1558       if (!isShapeBeingRemoved(clipped.faceEdge.shapeId)) {
1559         new_edges ~= edges[i .. $];
1560         break;
1561       }
1562     }
1563     // Update the edge list and delete this cell from the index.
1564     edges = new_edges;
1565     _cellMap.remove(pcell.id());
1566   }
1567 
1568   // Return the first level at which the edge will *not* contribute towards
1569   // the decision to subdivide.
1570   package int getEdgeMaxLevel(in S2Shape.Edge edge) const {
1571     import s2.s2metrics : AVG_EDGE;
1572     // Compute the maximum cell size for which this edge is considered "long".
1573     // The calculation does not need to be perfectly accurate, so we use Norm()
1574     // rather than Angle() for speed.
1575     double cell_size = (edge.v0 - edge.v1).norm() * S2SHAPE_INDEX_CELL_SIZE_TO_LONG_EDGE_RATIO;
1576     // Now return the first level encountered during subdivision where the
1577     // average cell size is at most "cell_size".
1578     return AVG_EDGE.getLevelForMaxValue(cell_size);
1579   }
1580 
1581   // Return the number of distinct shapes that are either associated with the
1582   // given edges, or that are currently stored in the InteriorTracker.
1583   static int countShapes(in ClippedEdge[] edges, in ShapeIdSet cshape_ids) {
1584     int count = 0;
1585     int last_shape_id = -1;
1586     size_t cnext_pos = 0;  // Next shape
1587     foreach (ClippedEdge edge; edges) {
1588       if (edge.faceEdge.shapeId != last_shape_id) {
1589         ++count;
1590         last_shape_id = edge.faceEdge.shapeId;
1591         // Skip over any containing shapes up to and including this one,
1592         // updating "count" appropriately.
1593         for (; cnext_pos != cshape_ids.length; ++cnext_pos) {
1594           if (cshape_ids[cnext_pos] > last_shape_id) break;
1595           if (cshape_ids[cnext_pos] < last_shape_id) ++count;
1596         }
1597       }
1598     }
1599     // Count any remaining containing shapes.
1600     count += (cshape_ids.length - cnext_pos);
1601     return count;
1602   }
1603 
1604   // Attempt to build an index cell containing the given edges, and return true
1605   // if successful.  (Otherwise the edges should be subdivided further.)
1606   bool makeIndexCell(in S2PaddedCell pcell, in ClippedEdge[] edges, InteriorTracker tracker) {
1607     if (edges.length == 0 && tracker.shapeIds().length == 0) {
1608       // No index cell is needed.  (In most cases this situation is detected
1609       // before we get to this point, but this can happen when all shapes in a
1610       // cell are removed.)
1611       return true;
1612     }
1613 
1614     // Count the number of edges that have not reached their maximum level yet.
1615     // Return false if there are too many such edges.
1616     int count = 0;
1617     foreach (const ClippedEdge edge; edges) {
1618       count += (pcell.level() < edge.faceEdge.maxLevel);
1619       if (count > _options.maxEdgesPerCell()) {
1620         return false;
1621       }
1622     }
1623 
1624     // Possible optimization: Continue subdividing as long as exactly one child
1625     // of "pcell" intersects the given edges.  This can be done by finding the
1626     // bounding box of all the edges and calling ShrinkToFit():
1627     //
1628     // S2CellId cellid = pcell.ShrinkToFit(GetRectBound(edges));
1629     //
1630     // Currently this is not beneficial; it slows down construction by 4-25%
1631     // (mainly computing the union of the bounding rectangles) and also slows
1632     // down queries (since more recursive clipping is required to get down to
1633     // the level of a spatial index cell).  But it may be worth trying again
1634     // once "contains_center" is computed and all algorithms are modified to
1635     // take advantage of it.
1636 
1637     // We update the InteriorTracker as follows.  For every S2Cell in the index
1638     // we construct two edges: one edge from entry vertex of the cell to its
1639     // center, and one from the cell center to its exit vertex.  Here "entry"
1640     // and "exit" refer the S2CellId ordering, i.e. the order in which points
1641     // are encountered along the S2 space-filling curve.  The exit vertex then
1642     // becomes the entry vertex for the next cell in the index, unless there are
1643     // one or more empty intervening cells, in which case the InteriorTracker
1644     // state is unchanged because the intervening cells have no edges.
1645 
1646     // Shift the InteriorTracker focus point to the center of the current cell.
1647     if (tracker.isActive() && edges.length != 0) {
1648       if (!tracker.atCellId(pcell.id())) {
1649         tracker.moveTo(pcell.getEntryVertex());
1650       }
1651       tracker.drawTo(pcell.getCenter());
1652       testAllEdges(edges, tracker);
1653     }
1654     // Allocate and fill a new index cell.  To get the total number of shapes we
1655     // need to merge the shapes associated with the intersecting edges together
1656     // with the shapes that happen to contain the cell center.
1657     const ShapeIdSet cshape_ids = tracker.shapeIds();
1658     int num_shapes = countShapes(edges, cshape_ids);
1659     S2ShapeIndexCell cell = new S2ShapeIndexCell();
1660     auto shapesAppender = cell.addShapes(num_shapes);
1661 
1662     // To fill the index cell we merge the two sources of shapes: "edge shapes"
1663     // (those that have at least one edge that intersects this cell), and
1664     // "containing shapes" (those that contain the cell center).  We keep track
1665     // of the index of the next intersecting edge and the next containing shape
1666     // as we go along.  Both sets of shape ids are already sorted.
1667     int enext = 0;
1668     int cnext_pos = 0;
1669     for (int i = 0; i < num_shapes; ++i) {
1670       S2ClippedShape clipped = S2ClippedShape();
1671       int eshape_id = numShapeIds(), cshape_id = eshape_id;  // Sentinels
1672       if (enext != edges.length) {
1673         eshape_id = edges[enext].faceEdge.shapeId;
1674       }
1675       if (cnext_pos != cshape_ids.length) {
1676         cshape_id = cshape_ids[cnext_pos];
1677       }
1678       int ebegin = enext;
1679       if (cshape_id < eshape_id) {
1680         // The entire cell is in the shape interior.
1681         clipped.initialize(cshape_id, 0);
1682         clipped.setContainsCenter(true);
1683         ++cnext_pos;
1684       } else {
1685         // Count the number of edges for this shape and allocate space for them.
1686         while (enext < edges.length && edges[enext].faceEdge.shapeId == eshape_id) {
1687           ++enext;
1688         }
1689         clipped.initialize(eshape_id, enext - ebegin);
1690         for (int e = ebegin; e < enext; ++e) {
1691           clipped.setEdge(e - ebegin, edges[e].faceEdge.edgeId);
1692         }
1693         if (cshape_id == eshape_id) {
1694           clipped.setContainsCenter(true);
1695           ++cnext_pos;
1696         }
1697       }
1698       shapesAppender ~= clipped;
1699     }
1700     // UpdateEdges() visits cells in increasing order of S2CellId, so during
1701     // initial construction of the index all insertions happen at the end.  It
1702     // is much faster to give an insertion hint in this case.  Otherwise the
1703     // hint doesn't do much harm.  With more effort we could provide a hint even
1704     // during incremental updates, but this is probably not worth the effort.
1705     _cellMap[pcell.id()] = cell;
1706 
1707     // Shift the InteriorTracker focus point to the exit vertex of this cell.
1708     if (tracker.isActive() && edges.length != 0) {
1709       tracker.drawTo(pcell.getExitVertex());
1710       testAllEdges(edges, tracker);
1711       tracker.setNextS2CellId(pcell.id().next());
1712     }
1713     return true;
1714   }
1715 
1716   // Call tracker.testEdge() on all edges from shapes that have interiors.
1717   static void testAllEdges(in ClippedEdge[] edges, InteriorTracker tracker) {
1718     foreach (ClippedEdge edge; edges) {
1719       FaceEdge face_edge = edge.faceEdge;
1720       if (face_edge.hasInterior) {
1721         tracker.testEdge(face_edge.shapeId, face_edge.edge);
1722       }
1723     }
1724   }
1725 
1726   // Given an edge and two bound endpoints that need to be updated, allocate and
1727   // return a new edge with the updated bound.
1728   static const(ClippedEdge) updateBound(
1729       in ClippedEdge edge,
1730       int u_end, double u,
1731       int v_end, double v,
1732       EdgeAllocator alloc) {
1733     ClippedEdge clipped = alloc.newClippedEdge();
1734     clipped.faceEdge = edge.faceEdge;
1735     clipped.bound[0][u_end] = u;
1736     clipped.bound[1][v_end] = v;
1737     clipped.bound[0][1-u_end] = edge.bound[0][1 - u_end];
1738     clipped.bound[1][1-v_end] = edge.bound[1][1 - v_end];
1739     enforce(!clipped.bound.isEmpty());
1740     enforce(edge.bound.contains(clipped.bound));
1741     return clipped;
1742   }
1743 
1744   // Given an edge, clip the given endpoint (lo=0, hi=1) of the u-axis so that
1745   // it does not extend past the given value.
1746   static const(ClippedEdge) clipUBound(
1747       in ClippedEdge edge, int u_end, double u, EdgeAllocator alloc) {
1748     import s2.s2edge_clipping : interpolateDouble;
1749     // First check whether the edge actually requires any clipping.  (Sometimes
1750     // this method is called when clipping is not necessary, e.g. when one edge
1751     // endpoint is in the overlap area between two padded child cells.)
1752     if (u_end == 0) {
1753       if (edge.bound[0].lo() >= u) return edge;
1754     } else {
1755       if (edge.bound[0].hi() <= u) return edge;
1756     }
1757     // We interpolate the new v-value from the endpoints of the original edge.
1758     // This has two advantages: (1) we don't need to store the clipped endpoints
1759     // at all, just their bounding box; and (2) it avoids the accumulation of
1760     // roundoff errors due to repeated interpolations.  The result needs to be
1761     // clamped to ensure that it is in the appropriate range.
1762     FaceEdge e = edge.faceEdge;
1763     double v = edge.bound[1].project(interpolateDouble(u, e.a[0], e.b[0], e.a[1], e.b[1]));
1764 
1765     // Determine which endpoint of the v-axis bound to update.  If the edge
1766     // slope is positive we update the same endpoint, otherwise we update the
1767     // opposite endpoint.
1768     int v_end = u_end ^ ((e.a[0] > e.b[0]) != (e.a[1] > e.b[1]));
1769     return updateBound(edge, u_end, u, v_end, v, alloc);
1770   }
1771 
1772   // Given an edge, clip the given endpoint (lo=0, hi=1) of the v-axis so that
1773   // it does not extend past the given value.
1774   static ClippedEdge clipVBound(
1775       in ClippedEdge edge,
1776       int v_end, double v,
1777       EdgeAllocator alloc) {
1778     import s2.s2edge_clipping : interpolateDouble;
1779     // See comments in ClipUBound.
1780     if (v_end == 0) {
1781       if (edge.bound[1].lo() >= v) return edge;
1782     } else {
1783       if (edge.bound[1].hi() <= v) return edge;
1784     }
1785     const FaceEdge e = edge.faceEdge;
1786     double u = edge.bound[0].project(
1787         interpolateDouble(v, e.a[1], e.b[1], e.a[0], e.b[0]));
1788     int u_end = v_end ^ ((e.a[0] > e.b[0]) != (e.a[1] > e.b[1]));
1789     return updateBound(edge, u_end, u, v_end, v, alloc);
1790   }
1791 
1792   // Given an edge and an interval "middle" along the v-axis, clip the edge
1793   // against the boundaries of "middle" and add the edge to the corresponding
1794   // children.
1795   static void clipVAxis(in ClippedEdge edge, in R1Interval middle,
1796       ref ClippedEdge[][2] child_edges, EdgeAllocator alloc) {
1797     if (edge.bound[1].hi() <= middle.lo()) {
1798       // Edge is entirely contained in the lower child.
1799       child_edges[0] ~= edge;
1800     } else if (edge.bound[1].lo() >= middle.hi()) {
1801       // Edge is entirely contained in the upper child.
1802       child_edges[1] ~= edge;
1803     } else {
1804       // The edge bound spans both children.
1805       child_edges[0] ~= clipVBound(edge, 1, middle.hi(), alloc);
1806       child_edges[1] ~= clipVBound(edge, 0, middle.lo(), alloc);
1807     }
1808   }
1809 
1810 
1811   // The amount by which cells are "padded" to compensate for numerical errors
1812   // when clipping line segments to cell boundaries.
1813   // The total error when clipping an edge comes from two sources:
1814   // (1) Clipping the original spherical edge to a cube face (the "face edge").
1815   //     The maximum error in this step is S2::kFaceClipErrorUVCoord.
1816   // (2) Clipping the face edge to the u- or v-coordinate of a cell boundary.
1817   //     The maximum error in this step is S2::kEdgeClipErrorUVCoord.
1818   // Finally, since we encounter the same errors when clipping query edges, we
1819   // double the total error so that we only need to pad edges during indexing
1820   // and not at query time.
1821   package enum double CELL_PADDING = 2 * (FACE_CLIP_ERROR_UV_COORD + EDGE_CLIP_ERROR_UV_COORD);
1822 
1823   // The shapes in the index, accessed by their shape id.  Removed shapes are
1824   // replaced by nullptr pointers.
1825   S2Shape[] _shapes;
1826 
1827   // A map from S2CellId to the set of clipped shapes that intersect that
1828   // cell.  The cell ids cover a set of non-overlapping regions on the
1829   // sphere.  Note that this field is updated lazily (see below).  Const
1830   // methods *must* call MaybeApplyUpdates() before accessing this field.
1831   // (The easiest way to achieve this is simply to use an Iterator.)
1832   CellMap _cellMap;
1833 
1834   // The options supplied for this index.
1835   Options _options;
1836 
1837   // The id of the first shape that has been queued for addition but not
1838   // processed yet.
1839   int _pendingAdditionsBegin = 0;
1840 
1841   // The representation of an edge that has been queued for removal.
1842   struct RemovedShape {
1843     int shapeId;
1844     bool hasInterior;
1845     bool containsTrackerOrigin;
1846     S2Shape.Edge[] edges;
1847   }
1848 
1849   // The set of shapes that have been queued for removal but not processed
1850   // yet.  Note that we need to copy the edge data since the caller is free to
1851   // destroy the shape once Release() has been called.  This field is present
1852   // only when there are removed shapes to process (to save memory).
1853   RemovedShape[] _pendingRemovals;
1854 
1855   // Additions and removals are queued and processed on the first subsequent
1856   // query.  There are several reasons to do this:
1857   //
1858   //  - It is significantly more efficient to process updates in batches.
1859   //  - Often the index will never be queried, in which case we can save both
1860   //    the time and memory required to build it.  Examples:
1861   //     + S2Loops that are created simply to pass to an S2Polygon.  (We don't
1862   //       need the S2Loop index, because S2Polygon builds its own index.)
1863   //     + Applications that load a database of geometry and then query only
1864   //       a small fraction of it.
1865   //     + Applications that only read and write geometry (Decode/Encode).
1866   //
1867   // The main drawback is that we need to go to some extra work to ensure that
1868   // "const" methods are still thread-safe.  Note that the goal is *not* to
1869   // make this class thread-safe in general, but simply to hide the fact that
1870   // we defer some of the indexing work until query time.
1871   //
1872   // The textbook approach to this problem would be to use a mutex and a
1873   // condition variable.  Unfortunately pthread mutexes are huge (40 bytes).
1874   // Instead we use spinlock (which is only 4 bytes) to guard a few small
1875   // fields representing the current update status, and only create additional
1876   // state while the update is actually occurring.
1877   shared(SpinLock) _lock;
1878 
1879   enum IndexStatus {
1880     STALE,     // There are pending updates.
1881     UPDATING,  // Updates are currently being applied.
1882     FRESH     // There are no pending updates.
1883   }
1884   // Reads and writes to this field are guarded by "lock_".
1885   shared IndexStatus _indexStatus;
1886 
1887   // UpdateState holds temporary data related to thread synchronization.  It
1888   // is only allocated while updates are being applied.
1889   class UpdateState {
1890     // This mutex is used as a condition variable.  It is locked by the
1891     // updating thread for the entire duration of the update; other threads
1892     // lock it in order to wait until the update is finished.
1893     Mutex waitMutex;
1894 
1895     // The number of threads currently waiting on "wait_mutex_".  The
1896     // UpdateState can only be freed when this number reaches zero.
1897     //
1898     // Reads and writes to this field are guarded by "lock_".
1899     int numWaiting;
1900 
1901     this() {
1902       waitMutex = new Mutex();
1903     }
1904   }
1905   UpdateState _updateState;
1906 
1907   // Documented in the .cc file.
1908   void unlockAndSignal() {
1909     _lock.unlock();
1910     _updateState.waitMutex.unlock();
1911     if (_updateState.numWaiting == 0) {
1912       _updateState = null;
1913     }
1914   }
1915 }