Skip to content

Commit

Permalink
Remove scheduler sampling profiler shared array buffer (#20840)
Browse files Browse the repository at this point in the history
No one has been using this data so there's no reason to collect it. Event log has been maintained and tests have been updated.
  • Loading branch information
Brian Vaughn authored and gaearon committed Mar 22, 2021
1 parent b2bbee7 commit 12adaff
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 123 deletions.
2 changes: 0 additions & 2 deletions packages/scheduler/src/Scheduler.js
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import {
IdlePriority,
} from './SchedulerPriorities';
import {
sharedProfilingBuffer,
markTaskRun,
markTaskYield,
markTaskCompleted,
Expand Down Expand Up @@ -424,6 +423,5 @@ export const unstable_Profiling = enableProfiling
? {
startLoggingProfilingEvents,
stopLoggingProfilingEvents,
sharedProfilingBuffer,
}
: null;
2 changes: 1 addition & 1 deletion packages/scheduler/src/SchedulerFeatureFlags.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@

export const enableSchedulerDebugging = false;
export const enableIsInputPending = false;
export const enableProfiling = false;
export const enableProfiling = __VARIANT__;
56 changes: 0 additions & 56 deletions packages/scheduler/src/SchedulerProfiling.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,45 +10,9 @@
import type {PriorityLevel} from './SchedulerPriorities';
import {enableProfiling} from './SchedulerFeatureFlags';

import {NoPriority} from './SchedulerPriorities';

let runIdCounter: number = 0;
let mainThreadIdCounter: number = 0;

const isEnabledSharedArrayBuffer =
// $FlowFixMe Flow doesn't know about SharedArrayBuffer
typeof SharedArrayBuffer === 'function' &&
// We only use SharedArrayBuffer when cross origin isolation is enabled.
typeof window !== 'undefined' &&
window.crossOriginIsolated === true;

const profilingStateSize = 4;
export const sharedProfilingBuffer = enableProfiling
? isEnabledSharedArrayBuffer
? new SharedArrayBuffer(profilingStateSize * Int32Array.BYTES_PER_ELEMENT)
: typeof ArrayBuffer === 'function'
? new ArrayBuffer(profilingStateSize * Int32Array.BYTES_PER_ELEMENT)
: null // Don't crash the init path on IE9
: null;

const profilingState =
enableProfiling && sharedProfilingBuffer !== null
? new Int32Array(sharedProfilingBuffer)
: []; // We can't read this but it helps save bytes for null checks

const PRIORITY = 0;
const CURRENT_TASK_ID = 1;
const CURRENT_RUN_ID = 2;
const QUEUE_SIZE = 3;

if (enableProfiling) {
profilingState[PRIORITY] = NoPriority;
// This is maintained with a counter, because the size of the priority queue
// array might include canceled tasks.
profilingState[QUEUE_SIZE] = 0;
profilingState[CURRENT_TASK_ID] = 0;
}

// Bytes per element is 4
const INITIAL_EVENT_LOG_SIZE = 131072;
const MAX_EVENT_LOG_SIZE = 524288; // Equivalent to 2 megabytes
Expand Down Expand Up @@ -116,8 +80,6 @@ export function markTaskStart(
ms: number,
) {
if (enableProfiling) {
profilingState[QUEUE_SIZE]++;

if (eventLog !== null) {
// performance.now returns a float, representing milliseconds. When the
// event is logged, it's coerced to an int. Convert to microseconds to
Expand All @@ -136,10 +98,6 @@ export function markTaskCompleted(
ms: number,
) {
if (enableProfiling) {
profilingState[PRIORITY] = NoPriority;
profilingState[CURRENT_TASK_ID] = 0;
profilingState[QUEUE_SIZE]--;

if (eventLog !== null) {
logEvent([TaskCompleteEvent, ms * 1000, task.id]);
}
Expand All @@ -155,8 +113,6 @@ export function markTaskCanceled(
ms: number,
) {
if (enableProfiling) {
profilingState[QUEUE_SIZE]--;

if (eventLog !== null) {
logEvent([TaskCancelEvent, ms * 1000, task.id]);
}
Expand All @@ -172,10 +128,6 @@ export function markTaskErrored(
ms: number,
) {
if (enableProfiling) {
profilingState[PRIORITY] = NoPriority;
profilingState[CURRENT_TASK_ID] = 0;
profilingState[QUEUE_SIZE]--;

if (eventLog !== null) {
logEvent([TaskErrorEvent, ms * 1000, task.id]);
}
Expand All @@ -193,10 +145,6 @@ export function markTaskRun(
if (enableProfiling) {
runIdCounter++;

profilingState[PRIORITY] = task.priorityLevel;
profilingState[CURRENT_TASK_ID] = task.id;
profilingState[CURRENT_RUN_ID] = runIdCounter;

if (eventLog !== null) {
logEvent([TaskRunEvent, ms * 1000, task.id, runIdCounter]);
}
Expand All @@ -205,10 +153,6 @@ export function markTaskRun(

export function markTaskYield(task: {id: number, ...}, ms: number) {
if (enableProfiling) {
profilingState[PRIORITY] = NoPriority;
profilingState[CURRENT_TASK_ID] = 0;
profilingState[CURRENT_RUN_ID] = 0;

if (eventLog !== null) {
logEvent([TaskYieldEvent, ms * 1000, task.id, runIdCounter]);
}
Expand Down
78 changes: 14 additions & 64 deletions packages/scheduler/src/__tests__/SchedulerProfiling-test.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
'use strict';

let Scheduler;
let sharedProfilingArray;
// let runWithPriority;
let ImmediatePriority;
let UserBlockingPriority;
Expand Down Expand Up @@ -59,10 +58,6 @@ describe('Scheduler', () => {
jest.mock('scheduler', () => require('scheduler/unstable_mock'));
Scheduler = require('scheduler');

sharedProfilingArray = new Int32Array(
Scheduler.unstable_Profiling.sharedProfilingBuffer,
);

// runWithPriority = Scheduler.unstable_runWithPriority;
ImmediatePriority = Scheduler.unstable_ImmediatePriority;
UserBlockingPriority = Scheduler.unstable_UserBlockingPriority;
Expand All @@ -76,20 +71,6 @@ describe('Scheduler', () => {
// shouldYield = Scheduler.unstable_shouldYield;
});

const PRIORITY = 0;
const CURRENT_TASK_ID = 1;
const CURRENT_RUN_ID = 2;
const QUEUE_SIZE = 3;

afterEach(() => {
if (sharedProfilingArray[QUEUE_SIZE] !== 0) {
throw Error(
'Test exited, but the shared profiling buffer indicates that a task ' +
'is still running',
);
}
});

const TaskStartEvent = 1;
const TaskCompleteEvent = 2;
const TaskErrorEvent = 3;
Expand Down Expand Up @@ -272,23 +253,6 @@ describe('Scheduler', () => {
return '\n' + result;
}

function getProfilingInfo() {
const queueSize = sharedProfilingArray[QUEUE_SIZE];
if (queueSize === 0) {
return 'Empty Queue';
}
const priorityLevel = sharedProfilingArray[PRIORITY];
if (priorityLevel === 0) {
return 'Suspended, Queue Size: ' + queueSize;
}
return (
`Task: ${sharedProfilingArray[CURRENT_TASK_ID]}, ` +
`Run: ${sharedProfilingArray[CURRENT_RUN_ID]}, ` +
`Priority: ${priorityLevelToString(priorityLevel)}, ` +
`Queue Size: ${sharedProfilingArray[QUEUE_SIZE]}`
);
}

it('creates a basic flamegraph', () => {
Scheduler.unstable_Profiling.startLoggingProfilingEvents();

Expand All @@ -297,35 +261,27 @@ describe('Scheduler', () => {
NormalPriority,
() => {
Scheduler.unstable_advanceTime(300);
Scheduler.unstable_yieldValue(getProfilingInfo());
Scheduler.unstable_yieldValue('Yield 1');
scheduleCallback(
UserBlockingPriority,
() => {
Scheduler.unstable_yieldValue(getProfilingInfo());
Scheduler.unstable_yieldValue('Yield 2');
Scheduler.unstable_advanceTime(300);
},
{label: 'Bar'},
);
Scheduler.unstable_advanceTime(100);
Scheduler.unstable_yieldValue('Yield');
Scheduler.unstable_yieldValue('Yield 3');
return () => {
Scheduler.unstable_yieldValue(getProfilingInfo());
Scheduler.unstable_yieldValue('Yield 4');
Scheduler.unstable_advanceTime(300);
};
},
{label: 'Foo'},
);
expect(Scheduler).toFlushAndYieldThrough([
'Task: 1, Run: 1, Priority: Normal, Queue Size: 1',
'Yield',
]);
expect(Scheduler).toFlushAndYieldThrough(['Yield 1', 'Yield 3']);
Scheduler.unstable_advanceTime(100);
expect(Scheduler).toFlushAndYield([
'Task: 2, Run: 2, Priority: User-blocking, Queue Size: 2',
'Task: 1, Run: 3, Priority: Normal, Queue Size: 1',
]);

expect(getProfilingInfo()).toEqual('Empty Queue');
expect(Scheduler).toFlushAndYield(['Yield 2', 'Yield 4']);

expect(stopProfilingAndPrintFlamegraph()).toEqual(
`
Expand All @@ -340,19 +296,16 @@ Task 1 [Normal] │ ████████░░░░░░░
Scheduler.unstable_Profiling.startLoggingProfilingEvents();

const task = scheduleCallback(NormalPriority, () => {
Scheduler.unstable_yieldValue(getProfilingInfo());
Scheduler.unstable_yieldValue('Yield 1');
Scheduler.unstable_advanceTime(300);
Scheduler.unstable_yieldValue('Yield');
Scheduler.unstable_yieldValue('Yield 2');
return () => {
Scheduler.unstable_yieldValue('Continuation');
Scheduler.unstable_advanceTime(200);
};
});

expect(Scheduler).toFlushAndYieldThrough([
'Task: 1, Run: 1, Priority: Normal, Queue Size: 1',
'Yield',
]);
expect(Scheduler).toFlushAndYieldThrough(['Yield 1', 'Yield 2']);
Scheduler.unstable_advanceTime(100);

cancelCallback(task);
Expand Down Expand Up @@ -392,28 +345,25 @@ Task 1 [Normal] │██████🡐 errored
Scheduler.unstable_Profiling.startLoggingProfilingEvents();

const task1 = scheduleCallback(NormalPriority, () => {
Scheduler.unstable_yieldValue(getProfilingInfo());
Scheduler.unstable_yieldValue('Yield 1');
Scheduler.unstable_advanceTime(300);
Scheduler.unstable_yieldValue('Yield');
Scheduler.unstable_yieldValue('Yield 2');
return () => {
Scheduler.unstable_yieldValue('Continuation');
Scheduler.unstable_advanceTime(200);
};
});
const task2 = scheduleCallback(NormalPriority, () => {
Scheduler.unstable_yieldValue(getProfilingInfo());
Scheduler.unstable_yieldValue('Yield 3');
Scheduler.unstable_advanceTime(300);
Scheduler.unstable_yieldValue('Yield');
Scheduler.unstable_yieldValue('Yield 4');
return () => {
Scheduler.unstable_yieldValue('Continuation');
Scheduler.unstable_advanceTime(200);
};
});

expect(Scheduler).toFlushAndYieldThrough([
'Task: 1, Run: 1, Priority: Normal, Queue Size: 2',
'Yield',
]);
expect(Scheduler).toFlushAndYieldThrough(['Yield 1', 'Yield 2']);
Scheduler.unstable_advanceTime(100);

cancelCallback(task1);
Expand Down

0 comments on commit 12adaff

Please sign in to comment.