The C and C++ Include Header Files
/usr/include/nodejs/deps/v8/include/v8-profiler.h
$ cat -n /usr/include/nodejs/deps/v8/include/v8-profiler.h 1 // Copyright 2010 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_V8_PROFILER_H_ 6 #define V8_V8_PROFILER_H_ 7 8 #include
9 10 #include
11 #include
12 #include
13 14 #include "v8-local-handle.h" // NOLINT(build/include_directory) 15 #include "v8-message.h" // NOLINT(build/include_directory) 16 #include "v8-persistent-handle.h" // NOLINT(build/include_directory) 17 18 /** 19 * Profiler support for the V8 JavaScript engine. 20 */ 21 namespace v8 { 22 23 enum class EmbedderStateTag : uint8_t; 24 class HeapGraphNode; 25 struct HeapStatsUpdate; 26 class Object; 27 enum StateTag : int; 28 29 using NativeObject = void*; 30 using SnapshotObjectId = uint32_t; 31 using ProfilerId = uint32_t; 32 33 struct CpuProfileDeoptFrame { 34 int script_id; 35 size_t position; 36 }; 37 38 namespace internal { 39 class CpuProfile; 40 } // namespace internal 41 42 } // namespace v8 43 44 #ifdef V8_OS_WIN 45 template class V8_EXPORT std::vector
; 46 #endif 47 48 namespace v8 { 49 50 struct V8_EXPORT CpuProfileDeoptInfo { 51 /** A pointer to a static string owned by v8. */ 52 const char* deopt_reason; 53 std::vector
stack; 54 }; 55 56 } // namespace v8 57 58 #ifdef V8_OS_WIN 59 template class V8_EXPORT std::vector
; 60 #endif 61 62 namespace v8 { 63 64 /** 65 * CpuProfileNode represents a node in a call graph. 66 */ 67 class V8_EXPORT CpuProfileNode { 68 public: 69 struct LineTick { 70 /** The 1-based number of the source line where the function originates. */ 71 int line; 72 73 /** The count of samples associated with the source line. */ 74 unsigned int hit_count; 75 }; 76 77 // An annotation hinting at the source of a CpuProfileNode. 78 enum SourceType { 79 // User-supplied script with associated resource information. 80 kScript = 0, 81 // Native scripts and provided builtins. 82 kBuiltin = 1, 83 // Callbacks into native code. 84 kCallback = 2, 85 // VM-internal functions or state. 86 kInternal = 3, 87 // A node that failed to symbolize. 88 kUnresolved = 4, 89 }; 90 91 /** Returns function name (empty string for anonymous functions.) */ 92 Local
GetFunctionName() const; 93 94 /** 95 * Returns function name (empty string for anonymous functions.) 96 * The string ownership is *not* passed to the caller. It stays valid until 97 * profile is deleted. The function is thread safe. 98 */ 99 const char* GetFunctionNameStr() const; 100 101 /** Returns id of the script where function is located. */ 102 int GetScriptId() const; 103 104 /** Returns resource name for script from where the function originates. */ 105 Local
GetScriptResourceName() const; 106 107 /** 108 * Returns resource name for script from where the function originates. 109 * The string ownership is *not* passed to the caller. It stays valid until 110 * profile is deleted. The function is thread safe. 111 */ 112 const char* GetScriptResourceNameStr() const; 113 114 /** 115 * Return true if the script from where the function originates is flagged as 116 * being shared cross-origin. 117 */ 118 bool IsScriptSharedCrossOrigin() const; 119 120 /** 121 * Returns the number, 1-based, of the line where the function originates. 122 * kNoLineNumberInfo if no line number information is available. 123 */ 124 int GetLineNumber() const; 125 126 /** 127 * Returns 1-based number of the column where the function originates. 128 * kNoColumnNumberInfo if no column number information is available. 129 */ 130 int GetColumnNumber() const; 131 132 /** 133 * Returns the number of the function's source lines that collect the samples. 134 */ 135 unsigned int GetHitLineCount() const; 136 137 /** Returns the set of source lines that collect the samples. 138 * The caller allocates buffer and responsible for releasing it. 139 * True if all available entries are copied, otherwise false. 140 * The function copies nothing if buffer is not large enough. 141 */ 142 bool GetLineTicks(LineTick* entries, unsigned int length) const; 143 144 /** Returns bailout reason for the function 145 * if the optimization was disabled for it. 146 */ 147 const char* GetBailoutReason() const; 148 149 /** 150 * Returns the count of samples where the function was currently executing. 151 */ 152 unsigned GetHitCount() const; 153 154 /** Returns id of the node. The id is unique within the tree */ 155 unsigned GetNodeId() const; 156 157 /** 158 * Gets the type of the source which the node was captured from. 159 */ 160 SourceType GetSourceType() const; 161 162 /** Returns child nodes count of the node. */ 163 int GetChildrenCount() const; 164 165 /** Retrieves a child node by index. */ 166 const CpuProfileNode* GetChild(int index) const; 167 168 /** Retrieves the ancestor node, or null if the root. */ 169 const CpuProfileNode* GetParent() const; 170 171 /** Retrieves deopt infos for the node. */ 172 const std::vector
& GetDeoptInfos() const; 173 174 static const int kNoLineNumberInfo = Message::kNoLineNumberInfo; 175 static const int kNoColumnNumberInfo = Message::kNoColumnInfo; 176 }; 177 178 179 /** 180 * CpuProfile contains a CPU profile in a form of top-down call tree 181 * (from main() down to functions that do all the work). 182 */ 183 class V8_EXPORT CpuProfile { 184 public: 185 /** Returns CPU profile title. */ 186 Local
GetTitle() const; 187 188 /** Returns the root node of the top down call tree. */ 189 const CpuProfileNode* GetTopDownRoot() const; 190 191 /** 192 * Returns number of samples recorded. The samples are not recorded unless 193 * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true. 194 */ 195 int GetSamplesCount() const; 196 197 /** 198 * Returns profile node corresponding to the top frame the sample at 199 * the given index. 200 */ 201 const CpuProfileNode* GetSample(int index) const; 202 203 /** 204 * Returns the timestamp of the sample. The timestamp is the number of 205 * microseconds since some unspecified starting point. 206 * The point is equal to the starting point used by GetStartTime. 207 */ 208 int64_t GetSampleTimestamp(int index) const; 209 210 /** 211 * Returns time when the profile recording was started (in microseconds) 212 * since some unspecified starting point. 213 */ 214 int64_t GetStartTime() const; 215 216 /** 217 * Returns state of the vm when sample was captured. 218 */ 219 StateTag GetSampleState(int index) const; 220 221 /** 222 * Returns state of the embedder when sample was captured. 223 */ 224 EmbedderStateTag GetSampleEmbedderState(int index) const; 225 226 /** 227 * Returns time when the profile recording was stopped (in microseconds) 228 * since some unspecified starting point. 229 * The point is equal to the starting point used by GetStartTime. 230 */ 231 int64_t GetEndTime() const; 232 233 /** 234 * Deletes the profile and removes it from CpuProfiler's list. 235 * All pointers to nodes previously returned become invalid. 236 */ 237 void Delete(); 238 }; 239 240 enum CpuProfilingMode { 241 // In the resulting CpuProfile tree, intermediate nodes in a stack trace 242 // (from the root to a leaf) will have line numbers that point to the start 243 // line of the function, rather than the line of the callsite of the child. 244 kLeafNodeLineNumbers, 245 // In the resulting CpuProfile tree, nodes are separated based on the line 246 // number of their callsite in their parent. 247 kCallerLineNumbers, 248 }; 249 250 // Determines how names are derived for functions sampled. 251 enum CpuProfilingNamingMode { 252 // Use the immediate name of functions at compilation time. 253 kStandardNaming, 254 // Use more verbose naming for functions without names, inferred from scope 255 // where possible. 256 kDebugNaming, 257 }; 258 259 enum CpuProfilingLoggingMode { 260 // Enables logging when a profile is active, and disables logging when all 261 // profiles are detached. 262 kLazyLogging, 263 // Enables logging for the lifetime of the CpuProfiler. Calls to 264 // StartRecording are faster, at the expense of runtime overhead. 265 kEagerLogging, 266 }; 267 268 // Enum for returning profiling status. Once StartProfiling is called, 269 // we want to return to clients whether the profiling was able to start 270 // correctly, or return a descriptive error. 271 enum class CpuProfilingStatus { 272 kStarted, 273 kAlreadyStarted, 274 kErrorTooManyProfilers 275 }; 276 277 /** 278 * Result from StartProfiling returning the Profiling Status, and 279 * id of the started profiler, or 0 if profiler is not started 280 */ 281 struct CpuProfilingResult { 282 const ProfilerId id; 283 const CpuProfilingStatus status; 284 }; 285 286 /** 287 * Delegate for when max samples reached and samples are discarded. 288 */ 289 class V8_EXPORT DiscardedSamplesDelegate { 290 public: 291 DiscardedSamplesDelegate() = default; 292 293 virtual ~DiscardedSamplesDelegate() = default; 294 virtual void Notify() = 0; 295 296 ProfilerId GetId() const { return profiler_id_; } 297 298 private: 299 friend internal::CpuProfile; 300 301 void SetId(ProfilerId id) { profiler_id_ = id; } 302 303 ProfilerId profiler_id_; 304 }; 305 306 /** 307 * Optional profiling attributes. 308 */ 309 class V8_EXPORT CpuProfilingOptions { 310 public: 311 // Indicates that the sample buffer size should not be explicitly limited. 312 static const unsigned kNoSampleLimit = UINT_MAX; 313 314 /** 315 * \param mode Type of computation of stack frame line numbers. 316 * \param max_samples The maximum number of samples that should be recorded by 317 * the profiler. Samples obtained after this limit will be 318 * discarded. 319 * \param sampling_interval_us controls the profile-specific target 320 * sampling interval. The provided sampling 321 * interval will be snapped to the next lowest 322 * non-zero multiple of the profiler's sampling 323 * interval, set via SetSamplingInterval(). If 324 * zero, the sampling interval will be equal to 325 * the profiler's sampling interval. 326 * \param filter_context If specified, profiles will only contain frames 327 * using this context. Other frames will be elided. 328 */ 329 CpuProfilingOptions( 330 CpuProfilingMode mode = kLeafNodeLineNumbers, 331 unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0, 332 MaybeLocal
filter_context = MaybeLocal
()); 333 334 CpuProfilingMode mode() const { return mode_; } 335 unsigned max_samples() const { return max_samples_; } 336 int sampling_interval_us() const { return sampling_interval_us_; } 337 338 private: 339 friend class internal::CpuProfile; 340 341 bool has_filter_context() const { return !filter_context_.IsEmpty(); } 342 void* raw_filter_context() const; 343 344 CpuProfilingMode mode_; 345 unsigned max_samples_; 346 int sampling_interval_us_; 347 CopyablePersistentTraits
::CopyablePersistent filter_context_; 348 }; 349 350 /** 351 * Interface for controlling CPU profiling. Instance of the 352 * profiler can be created using v8::CpuProfiler::New method. 353 */ 354 class V8_EXPORT CpuProfiler { 355 public: 356 /** 357 * Creates a new CPU profiler for the |isolate|. The isolate must be 358 * initialized. The profiler object must be disposed after use by calling 359 * |Dispose| method. 360 */ 361 static CpuProfiler* New(Isolate* isolate, 362 CpuProfilingNamingMode = kDebugNaming, 363 CpuProfilingLoggingMode = kLazyLogging); 364 365 /** 366 * Synchronously collect current stack sample in all profilers attached to 367 * the |isolate|. The call does not affect number of ticks recorded for 368 * the current top node. 369 */ 370 static void CollectSample(Isolate* isolate); 371 372 /** 373 * Disposes the CPU profiler object. 374 */ 375 void Dispose(); 376 377 /** 378 * Changes default CPU profiler sampling interval to the specified number 379 * of microseconds. Default interval is 1000us. This method must be called 380 * when there are no profiles being recorded. 381 */ 382 void SetSamplingInterval(int us); 383 384 /** 385 * Sets whether or not the profiler should prioritize consistency of sample 386 * periodicity on Windows. Disabling this can greatly reduce CPU usage, but 387 * may result in greater variance in sample timings from the platform's 388 * scheduler. Defaults to enabled. This method must be called when there are 389 * no profiles being recorded. 390 */ 391 void SetUsePreciseSampling(bool); 392 393 /** 394 * Starts collecting a CPU profile. Several profiles may be collected at once. 395 * Generates an anonymous profiler, without a String identifier. 396 */ 397 CpuProfilingResult Start( 398 CpuProfilingOptions options, 399 std::unique_ptr
delegate = nullptr); 400 401 /** 402 * Starts collecting a CPU profile. Title may be an empty string. Several 403 * profiles may be collected at once. Attempts to start collecting several 404 * profiles with the same title are silently ignored. 405 */ 406 CpuProfilingResult Start( 407 Local
title, CpuProfilingOptions options, 408 std::unique_ptr
delegate = nullptr); 409 410 /** 411 * Starts profiling with the same semantics as above, except with expanded 412 * parameters. 413 * 414 * |record_samples| parameter controls whether individual samples should 415 * be recorded in addition to the aggregated tree. 416 * 417 * |max_samples| controls the maximum number of samples that should be 418 * recorded by the profiler. Samples obtained after this limit will be 419 * discarded. 420 */ 421 CpuProfilingResult Start( 422 Local
title, CpuProfilingMode mode, bool record_samples = false, 423 unsigned max_samples = CpuProfilingOptions::kNoSampleLimit); 424 425 /** 426 * The same as StartProfiling above, but the CpuProfilingMode defaults to 427 * kLeafNodeLineNumbers mode, which was the previous default behavior of the 428 * profiler. 429 */ 430 CpuProfilingResult Start(Local
title, bool record_samples = false); 431 432 /** 433 * Starts collecting a CPU profile. Title may be an empty string. Several 434 * profiles may be collected at once. Attempts to start collecting several 435 * profiles with the same title are silently ignored. 436 */ 437 CpuProfilingStatus StartProfiling( 438 Local
title, CpuProfilingOptions options, 439 std::unique_ptr
delegate = nullptr); 440 441 /** 442 * Starts profiling with the same semantics as above, except with expanded 443 * parameters. 444 * 445 * |record_samples| parameter controls whether individual samples should 446 * be recorded in addition to the aggregated tree. 447 * 448 * |max_samples| controls the maximum number of samples that should be 449 * recorded by the profiler. Samples obtained after this limit will be 450 * discarded. 451 */ 452 CpuProfilingStatus StartProfiling( 453 Local
title, CpuProfilingMode mode, bool record_samples = false, 454 unsigned max_samples = CpuProfilingOptions::kNoSampleLimit); 455 456 /** 457 * The same as StartProfiling above, but the CpuProfilingMode defaults to 458 * kLeafNodeLineNumbers mode, which was the previous default behavior of the 459 * profiler. 460 */ 461 CpuProfilingStatus StartProfiling(Local
title, 462 bool record_samples = false); 463 464 /** 465 * Stops collecting CPU profile with a given id and returns it. 466 */ 467 CpuProfile* Stop(ProfilerId id); 468 469 /** 470 * Stops collecting CPU profile with a given title and returns it. 471 * If the title given is empty, finishes the last profile started. 472 */ 473 CpuProfile* StopProfiling(Local
title); 474 475 /** 476 * Generate more detailed source positions to code objects. This results in 477 * better results when mapping profiling samples to script source. 478 */ 479 static void UseDetailedSourcePositionsForProfiling(Isolate* isolate); 480 481 private: 482 CpuProfiler(); 483 ~CpuProfiler(); 484 CpuProfiler(const CpuProfiler&); 485 CpuProfiler& operator=(const CpuProfiler&); 486 }; 487 488 /** 489 * HeapSnapshotEdge represents a directed connection between heap 490 * graph nodes: from retainers to retained nodes. 491 */ 492 class V8_EXPORT HeapGraphEdge { 493 public: 494 enum Type { 495 kContextVariable = 0, // A variable from a function context. 496 kElement = 1, // An element of an array. 497 kProperty = 2, // A named object property. 498 kInternal = 3, // A link that can't be accessed from JS, 499 // thus, its name isn't a real property name 500 // (e.g. parts of a ConsString). 501 kHidden = 4, // A link that is needed for proper sizes 502 // calculation, but may be hidden from user. 503 kShortcut = 5, // A link that must not be followed during 504 // sizes calculation. 505 kWeak = 6 // A weak reference (ignored by the GC). 506 }; 507 508 /** Returns edge type (see HeapGraphEdge::Type). */ 509 Type GetType() const; 510 511 /** 512 * Returns edge name. This can be a variable name, an element index, or 513 * a property name. 514 */ 515 Local
GetName() const; 516 517 /** Returns origin node. */ 518 const HeapGraphNode* GetFromNode() const; 519 520 /** Returns destination node. */ 521 const HeapGraphNode* GetToNode() const; 522 }; 523 524 525 /** 526 * HeapGraphNode represents a node in a heap graph. 527 */ 528 class V8_EXPORT HeapGraphNode { 529 public: 530 enum Type { 531 kHidden = 0, // Hidden node, may be filtered when shown to user. 532 kArray = 1, // An array of elements. 533 kString = 2, // A string. 534 kObject = 3, // A JS object (except for arrays and strings). 535 kCode = 4, // Compiled code. 536 kClosure = 5, // Function closure. 537 kRegExp = 6, // RegExp. 538 kHeapNumber = 7, // Number stored in the heap. 539 kNative = 8, // Native object (not from V8 heap). 540 kSynthetic = 9, // Synthetic object, usually used for grouping 541 // snapshot items together. 542 kConsString = 10, // Concatenated string. A pair of pointers to strings. 543 kSlicedString = 11, // Sliced string. A fragment of another string. 544 kSymbol = 12, // A Symbol (ES6). 545 kBigInt = 13 // BigInt. 546 }; 547 548 /** Returns node type (see HeapGraphNode::Type). */ 549 Type GetType() const; 550 551 /** 552 * Returns node name. Depending on node's type this can be the name 553 * of the constructor (for objects), the name of the function (for 554 * closures), string value, or an empty string (for compiled code). 555 */ 556 Local
GetName() const; 557 558 /** 559 * Returns node id. For the same heap object, the id remains the same 560 * across all snapshots. 561 */ 562 SnapshotObjectId GetId() const; 563 564 /** Returns node's own size, in bytes. */ 565 size_t GetShallowSize() const; 566 567 /** Returns child nodes count of the node. */ 568 int GetChildrenCount() const; 569 570 /** Retrieves a child by index. */ 571 const HeapGraphEdge* GetChild(int index) const; 572 }; 573 574 575 /** 576 * An interface for exporting data from V8, using "push" model. 577 */ 578 class V8_EXPORT OutputStream { 579 public: 580 enum WriteResult { 581 kContinue = 0, 582 kAbort = 1 583 }; 584 virtual ~OutputStream() = default; 585 /** Notify about the end of stream. */ 586 virtual void EndOfStream() = 0; 587 /** Get preferred output chunk size. Called only once. */ 588 virtual int GetChunkSize() { return 1024; } 589 /** 590 * Writes the next chunk of snapshot data into the stream. Writing 591 * can be stopped by returning kAbort as function result. EndOfStream 592 * will not be called in case writing was aborted. 593 */ 594 virtual WriteResult WriteAsciiChunk(char* data, int size) = 0; 595 /** 596 * Writes the next chunk of heap stats data into the stream. Writing 597 * can be stopped by returning kAbort as function result. EndOfStream 598 * will not be called in case writing was aborted. 599 */ 600 virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) { 601 return kAbort; 602 } 603 }; 604 605 /** 606 * HeapSnapshots record the state of the JS heap at some moment. 607 */ 608 class V8_EXPORT HeapSnapshot { 609 public: 610 enum SerializationFormat { 611 kJSON = 0 // See format description near 'Serialize' method. 612 }; 613 614 /** Returns the root node of the heap graph. */ 615 const HeapGraphNode* GetRoot() const; 616 617 /** Returns a node by its id. */ 618 const HeapGraphNode* GetNodeById(SnapshotObjectId id) const; 619 620 /** Returns total nodes count in the snapshot. */ 621 int GetNodesCount() const; 622 623 /** Returns a node by index. */ 624 const HeapGraphNode* GetNode(int index) const; 625 626 /** Returns a max seen JS object Id. */ 627 SnapshotObjectId GetMaxSnapshotJSObjectId() const; 628 629 /** 630 * Deletes the snapshot and removes it from HeapProfiler's list. 631 * All pointers to nodes, edges and paths previously returned become 632 * invalid. 633 */ 634 void Delete(); 635 636 /** 637 * Prepare a serialized representation of the snapshot. The result 638 * is written into the stream provided in chunks of specified size. 639 * The total length of the serialized snapshot is unknown in 640 * advance, it can be roughly equal to JS heap size (that means, 641 * it can be really big - tens of megabytes). 642 * 643 * For the JSON format, heap contents are represented as an object 644 * with the following structure: 645 * 646 * { 647 * snapshot: { 648 * title: "...", 649 * uid: nnn, 650 * meta: { meta-info }, 651 * node_count: nnn, 652 * edge_count: nnn 653 * }, 654 * nodes: [nodes array], 655 * edges: [edges array], 656 * strings: [strings array] 657 * } 658 * 659 * Nodes reference strings, other nodes, and edges by their indexes 660 * in corresponding arrays. 661 */ 662 void Serialize(OutputStream* stream, 663 SerializationFormat format = kJSON) const; 664 }; 665 666 667 /** 668 * An interface for reporting progress and controlling long-running 669 * activities. 670 */ 671 class V8_EXPORT ActivityControl { 672 public: 673 enum ControlOption { 674 kContinue = 0, 675 kAbort = 1 676 }; 677 virtual ~ActivityControl() = default; 678 /** 679 * Notify about current progress. The activity can be stopped by 680 * returning kAbort as the callback result. 681 */ 682 virtual ControlOption ReportProgressValue(uint32_t done, uint32_t total) = 0; 683 }; 684 685 /** 686 * AllocationProfile is a sampled profile of allocations done by the program. 687 * This is structured as a call-graph. 688 */ 689 class V8_EXPORT AllocationProfile { 690 public: 691 struct Allocation { 692 /** 693 * Size of the sampled allocation object. 694 */ 695 size_t size; 696 697 /** 698 * The number of objects of such size that were sampled. 699 */ 700 unsigned int count; 701 }; 702 703 /** 704 * Represents a node in the call-graph. 705 */ 706 struct Node { 707 /** 708 * Name of the function. May be empty for anonymous functions or if the 709 * script corresponding to this function has been unloaded. 710 */ 711 Local
name; 712 713 /** 714 * Name of the script containing the function. May be empty if the script 715 * name is not available, or if the script has been unloaded. 716 */ 717 Local
script_name; 718 719 /** 720 * id of the script where the function is located. May be equal to 721 * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist. 722 */ 723 int script_id; 724 725 /** 726 * Start position of the function in the script. 727 */ 728 int start_position; 729 730 /** 731 * 1-indexed line number where the function starts. May be 732 * kNoLineNumberInfo if no line number information is available. 733 */ 734 int line_number; 735 736 /** 737 * 1-indexed column number where the function starts. May be 738 * kNoColumnNumberInfo if no line number information is available. 739 */ 740 int column_number; 741 742 /** 743 * Unique id of the node. 744 */ 745 uint32_t node_id; 746 747 /** 748 * List of callees called from this node for which we have sampled 749 * allocations. The lifetime of the children is scoped to the containing 750 * AllocationProfile. 751 */ 752 std::vector
children; 753 754 /** 755 * List of self allocations done by this node in the call-graph. 756 */ 757 std::vector
allocations; 758 }; 759 760 /** 761 * Represent a single sample recorded for an allocation. 762 */ 763 struct Sample { 764 /** 765 * id of the node in the profile tree. 766 */ 767 uint32_t node_id; 768 769 /** 770 * Size of the sampled allocation object. 771 */ 772 size_t size; 773 774 /** 775 * The number of objects of such size that were sampled. 776 */ 777 unsigned int count; 778 779 /** 780 * Unique time-ordered id of the allocation sample. Can be used to track 781 * what samples were added or removed between two snapshots. 782 */ 783 uint64_t sample_id; 784 }; 785 786 /** 787 * Returns the root node of the call-graph. The root node corresponds to an 788 * empty JS call-stack. The lifetime of the returned Node* is scoped to the 789 * containing AllocationProfile. 790 */ 791 virtual Node* GetRootNode() = 0; 792 virtual const std::vector
& GetSamples() = 0; 793 794 virtual ~AllocationProfile() = default; 795 796 static const int kNoLineNumberInfo = Message::kNoLineNumberInfo; 797 static const int kNoColumnNumberInfo = Message::kNoColumnInfo; 798 }; 799 800 /** 801 * An object graph consisting of embedder objects and V8 objects. 802 * Edges of the graph are strong references between the objects. 803 * The embedder can build this graph during heap snapshot generation 804 * to include the embedder objects in the heap snapshot. 805 * Usage: 806 * 1) Define derived class of EmbedderGraph::Node for embedder objects. 807 * 2) Set the build embedder graph callback on the heap profiler using 808 * HeapProfiler::AddBuildEmbedderGraphCallback. 809 * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from 810 * node1 to node2. 811 * 4) To represent references from/to V8 object, construct V8 nodes using 812 * graph->V8Node(value). 813 */ 814 class V8_EXPORT EmbedderGraph { 815 public: 816 class Node { 817 public: 818 /** 819 * Detachedness specifies whether an object is attached or detached from the 820 * main application state. While unkown in general, there may be objects 821 * that specifically know their state. V8 passes this information along in 822 * the snapshot. Users of the snapshot may use it to annotate the object 823 * graph. 824 */ 825 enum class Detachedness : uint8_t { 826 kUnknown = 0, 827 kAttached = 1, 828 kDetached = 2, 829 }; 830 831 Node() = default; 832 virtual ~Node() = default; 833 virtual const char* Name() = 0; 834 virtual size_t SizeInBytes() = 0; 835 /** 836 * The corresponding V8 wrapper node if not null. 837 * During heap snapshot generation the embedder node and the V8 wrapper 838 * node will be merged into one node to simplify retaining paths. 839 */ 840 virtual Node* WrapperNode() { return nullptr; } 841 virtual bool IsRootNode() { return false; } 842 /** Must return true for non-V8 nodes. */ 843 virtual bool IsEmbedderNode() { return true; } 844 /** 845 * Optional name prefix. It is used in Chrome for tagging detached nodes. 846 */ 847 virtual const char* NamePrefix() { return nullptr; } 848 849 /** 850 * Returns the NativeObject that can be used for querying the 851 * |HeapSnapshot|. 852 */ 853 virtual NativeObject GetNativeObject() { return nullptr; } 854 855 /** 856 * Detachedness state of a given object. While unkown in general, there may 857 * be objects that specifically know their state. V8 passes this information 858 * along in the snapshot. Users of the snapshot may use it to annotate the 859 * object graph. 860 */ 861 virtual Detachedness GetDetachedness() { return Detachedness::kUnknown; } 862 863 Node(const Node&) = delete; 864 Node& operator=(const Node&) = delete; 865 }; 866 867 /** 868 * Returns a node corresponding to the given V8 value. Ownership is not 869 * transferred. The result pointer is valid while the graph is alive. 870 */ 871 virtual Node* V8Node(const v8::Local
& value) = 0; 872 873 /** 874 * Adds the given node to the graph and takes ownership of the node. 875 * Returns a raw pointer to the node that is valid while the graph is alive. 876 */ 877 virtual Node* AddNode(std::unique_ptr
node) = 0; 878 879 /** 880 * Adds an edge that represents a strong reference from the given 881 * node |from| to the given node |to|. The nodes must be added to the graph 882 * before calling this function. 883 * 884 * If name is nullptr, the edge will have auto-increment indexes, otherwise 885 * it will be named accordingly. 886 */ 887 virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0; 888 889 virtual ~EmbedderGraph() = default; 890 }; 891 892 /** 893 * Interface for controlling heap profiling. Instance of the 894 * profiler can be retrieved using v8::Isolate::GetHeapProfiler. 895 */ 896 class V8_EXPORT HeapProfiler { 897 public: 898 enum SamplingFlags { 899 kSamplingNoFlags = 0, 900 kSamplingForceGC = 1 << 0, 901 }; 902 903 /** 904 * Callback function invoked during heap snapshot generation to retrieve 905 * the embedder object graph. The callback should use graph->AddEdge(..) to 906 * add references between the objects. 907 * The callback must not trigger garbage collection in V8. 908 */ 909 typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate, 910 v8::EmbedderGraph* graph, 911 void* data); 912 913 /** 914 * Callback function invoked during heap snapshot generation to retrieve 915 * the detachedness state of an object referenced by a TracedReference. 916 * 917 * The callback takes Local
as parameter to allow the embedder to 918 * unpack the TracedReference into a Local and reuse that Local for different 919 * purposes. 920 */ 921 using GetDetachednessCallback = EmbedderGraph::Node::Detachedness (*)( 922 v8::Isolate* isolate, const v8::Local
& v8_value, 923 uint16_t class_id, void* data); 924 925 /** Returns the number of snapshots taken. */ 926 int GetSnapshotCount(); 927 928 /** Returns a snapshot by index. */ 929 const HeapSnapshot* GetHeapSnapshot(int index); 930 931 /** 932 * Returns SnapshotObjectId for a heap object referenced by |value| if 933 * it has been seen by the heap profiler, kUnknownObjectId otherwise. 934 */ 935 SnapshotObjectId GetObjectId(Local
value); 936 937 /** 938 * Returns SnapshotObjectId for a native object referenced by |value| if it 939 * has been seen by the heap profiler, kUnknownObjectId otherwise. 940 */ 941 SnapshotObjectId GetObjectId(NativeObject value); 942 943 /** 944 * Returns heap object with given SnapshotObjectId if the object is alive, 945 * otherwise empty handle is returned. 946 */ 947 Local
FindObjectById(SnapshotObjectId id); 948 949 /** 950 * Clears internal map from SnapshotObjectId to heap object. The new objects 951 * will not be added into it unless a heap snapshot is taken or heap object 952 * tracking is kicked off. 953 */ 954 void ClearObjectIds(); 955 956 /** 957 * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return 958 * it in case heap profiler cannot find id for the object passed as 959 * parameter. HeapSnapshot::GetNodeById will always return NULL for such id. 960 */ 961 static const SnapshotObjectId kUnknownObjectId = 0; 962 963 /** 964 * Callback interface for retrieving user friendly names of global objects. 965 */ 966 class ObjectNameResolver { 967 public: 968 /** 969 * Returns name to be used in the heap snapshot for given node. Returned 970 * string must stay alive until snapshot collection is completed. 971 */ 972 virtual const char* GetName(Local
object) = 0; 973 974 protected: 975 virtual ~ObjectNameResolver() = default; 976 }; 977 978 /** 979 * Takes a heap snapshot and returns it. 980 */ 981 const HeapSnapshot* TakeHeapSnapshot( 982 ActivityControl* control = nullptr, 983 ObjectNameResolver* global_object_name_resolver = nullptr, 984 bool treat_global_objects_as_roots = true, 985 bool capture_numeric_value = false); 986 987 /** 988 * Starts tracking of heap objects population statistics. After calling 989 * this method, all heap objects relocations done by the garbage collector 990 * are being registered. 991 * 992 * |track_allocations| parameter controls whether stack trace of each 993 * allocation in the heap will be recorded and reported as part of 994 * HeapSnapshot. 995 */ 996 void StartTrackingHeapObjects(bool track_allocations = false); 997 998 /** 999 * Adds a new time interval entry to the aggregated statistics array. The 1000 * time interval entry contains information on the current heap objects 1001 * population size. The method also updates aggregated statistics and 1002 * reports updates for all previous time intervals via the OutputStream 1003 * object. Updates on each time interval are provided as a stream of the 1004 * HeapStatsUpdate structure instances. 1005 * If |timestamp_us| is supplied, timestamp of the new entry will be written 1006 * into it. The return value of the function is the last seen heap object Id. 1007 * 1008 * StartTrackingHeapObjects must be called before the first call to this 1009 * method. 1010 */ 1011 SnapshotObjectId GetHeapStats(OutputStream* stream, 1012 int64_t* timestamp_us = nullptr); 1013 1014 /** 1015 * Stops tracking of heap objects population statistics, cleans up all 1016 * collected data. StartHeapObjectsTracking must be called again prior to 1017 * calling GetHeapStats next time. 1018 */ 1019 void StopTrackingHeapObjects(); 1020 1021 /** 1022 * Starts gathering a sampling heap profile. A sampling heap profile is 1023 * similar to tcmalloc's heap profiler and Go's mprof. It samples object 1024 * allocations and builds an online 'sampling' heap profile. At any point in 1025 * time, this profile is expected to be a representative sample of objects 1026 * currently live in the system. Each sampled allocation includes the stack 1027 * trace at the time of allocation, which makes this really useful for memory 1028 * leak detection. 1029 * 1030 * This mechanism is intended to be cheap enough that it can be used in 1031 * production with minimal performance overhead. 1032 * 1033 * Allocations are sampled using a randomized Poisson process. On average, one 1034 * allocation will be sampled every |sample_interval| bytes allocated. The 1035 * |stack_depth| parameter controls the maximum number of stack frames to be 1036 * captured on each allocation. 1037 * 1038 * NOTE: This is a proof-of-concept at this point. Right now we only sample 1039 * newspace allocations. Support for paged space allocation (e.g. pre-tenured 1040 * objects, large objects, code objects, etc.) and native allocations 1041 * doesn't exist yet, but is anticipated in the future. 1042 * 1043 * Objects allocated before the sampling is started will not be included in 1044 * the profile. 1045 * 1046 * Returns false if a sampling heap profiler is already running. 1047 */ 1048 bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024, 1049 int stack_depth = 16, 1050 SamplingFlags flags = kSamplingNoFlags); 1051 1052 /** 1053 * Stops the sampling heap profile and discards the current profile. 1054 */ 1055 void StopSamplingHeapProfiler(); 1056 1057 /** 1058 * Returns the sampled profile of allocations allocated (and still live) since 1059 * StartSamplingHeapProfiler was called. The ownership of the pointer is 1060 * transferred to the caller. Returns nullptr if sampling heap profiler is not 1061 * active. 1062 */ 1063 AllocationProfile* GetAllocationProfile(); 1064 1065 /** 1066 * Deletes all snapshots taken. All previously returned pointers to 1067 * snapshots and their contents become invalid after this call. 1068 */ 1069 void DeleteAllHeapSnapshots(); 1070 1071 void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback, 1072 void* data); 1073 void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback, 1074 void* data); 1075 1076 void SetGetDetachednessCallback(GetDetachednessCallback callback, void* data); 1077 1078 /** 1079 * Default value of persistent handle class ID. Must not be used to 1080 * define a class. Can be used to reset a class of a persistent 1081 * handle. 1082 */ 1083 static const uint16_t kPersistentHandleNoClassId = 0; 1084 1085 private: 1086 HeapProfiler(); 1087 ~HeapProfiler(); 1088 HeapProfiler(const HeapProfiler&); 1089 HeapProfiler& operator=(const HeapProfiler&); 1090 }; 1091 1092 /** 1093 * A struct for exporting HeapStats data from V8, using "push" model. 1094 * See HeapProfiler::GetHeapStats. 1095 */ 1096 struct HeapStatsUpdate { 1097 HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size) 1098 : index(index), count(count), size(size) { } 1099 uint32_t index; // Index of the time interval that was changed. 1100 uint32_t count; // New value of count field for the interval with this index. 1101 uint32_t size; // New value of size field for the interval with this index. 1102 }; 1103 1104 #define CODE_EVENTS_LIST(V) \ 1105 V(Builtin) \ 1106 V(Callback) \ 1107 V(Eval) \ 1108 V(Function) \ 1109 V(InterpretedFunction) \ 1110 V(Handler) \ 1111 V(BytecodeHandler) \ 1112 V(LazyCompile) \ 1113 V(RegExp) \ 1114 V(Script) \ 1115 V(Stub) \ 1116 V(Relocation) 1117 1118 /** 1119 * Note that this enum may be extended in the future. Please include a default 1120 * case if this enum is used in a switch statement. 1121 */ 1122 enum CodeEventType { 1123 kUnknownType = 0 1124 #define V(Name) , k##Name##Type 1125 CODE_EVENTS_LIST(V) 1126 #undef V 1127 }; 1128 1129 /** 1130 * Representation of a code creation event 1131 */ 1132 class V8_EXPORT CodeEvent { 1133 public: 1134 uintptr_t GetCodeStartAddress(); 1135 size_t GetCodeSize(); 1136 Local
GetFunctionName(); 1137 Local
GetScriptName(); 1138 int GetScriptLine(); 1139 int GetScriptColumn(); 1140 /** 1141 * NOTE (mmarchini): We can't allocate objects in the heap when we collect 1142 * existing code, and both the code type and the comment are not stored in the 1143 * heap, so we return those as const char*. 1144 */ 1145 CodeEventType GetCodeType(); 1146 const char* GetComment(); 1147 1148 static const char* GetCodeEventTypeName(CodeEventType code_event_type); 1149 1150 uintptr_t GetPreviousCodeStartAddress(); 1151 }; 1152 1153 /** 1154 * Interface to listen to code creation and code relocation events. 1155 */ 1156 class V8_EXPORT CodeEventHandler { 1157 public: 1158 /** 1159 * Creates a new listener for the |isolate|. The isolate must be initialized. 1160 * The listener object must be disposed after use by calling |Dispose| method. 1161 * Multiple listeners can be created for the same isolate. 1162 */ 1163 explicit CodeEventHandler(Isolate* isolate); 1164 virtual ~CodeEventHandler(); 1165 1166 /** 1167 * Handle is called every time a code object is created or moved. Information 1168 * about each code event will be available through the `code_event` 1169 * parameter. 1170 * 1171 * When the CodeEventType is kRelocationType, the code for this CodeEvent has 1172 * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`. 1173 */ 1174 virtual void Handle(CodeEvent* code_event) = 0; 1175 1176 /** 1177 * Call `Enable()` to starts listening to code creation and code relocation 1178 * events. These events will be handled by `Handle()`. 1179 */ 1180 void Enable(); 1181 1182 /** 1183 * Call `Disable()` to stop listening to code creation and code relocation 1184 * events. 1185 */ 1186 void Disable(); 1187 1188 private: 1189 CodeEventHandler(); 1190 CodeEventHandler(const CodeEventHandler&); 1191 CodeEventHandler& operator=(const CodeEventHandler&); 1192 void* internal_listener_; 1193 }; 1194 1195 } // namespace v8 1196 1197 1198 #endif // V8_V8_PROFILER_H_
Contact us
|
About us
|
Term of use
|
Copyright © 2000-2024 MyWebUniversity.com ™