/* * Copyright (C) 2004-2021 Apple Inc. All rights reserved. * Copyright (C) 2007 Alp Toker * Copyright (C) 2010 Torch Mobile (Beijing) Co. Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "HTMLCanvasElement.h" #include "Blob.h" #include "BlobCallback.h" #include "CanvasGradient.h" #include "CanvasPattern.h" #include "CanvasRenderingContext2D.h" #include "CanvasRenderingContext2DSettings.h" #include "DisplayListDrawingContext.h" #include "Document.h" #include "EventNames.h" #include "Frame.h" #include "FrameLoaderClient.h" #include "GPUBasedCanvasRenderingContext.h" #include "GeometryUtilities.h" #include "GraphicsContext.h" #include "HTMLNames.h" #include "HTMLParserIdioms.h" #include "ImageBitmapRenderingContext.h" #include "ImageBitmapRenderingContextSettings.h" #include "ImageBuffer.h" #include "ImageData.h" #include "InMemoryDisplayList.h" #include "InspectorInstrumentation.h" #include "JSDOMConvertDictionary.h" #include "MIMETypeRegistry.h" #include "OffscreenCanvas.h" #include "PlaceholderRenderingContext.h" #include "RenderElement.h" #include "RenderHTMLCanvas.h" #include "ResourceLoadObserver.h" #include "RuntimeEnabledFeatures.h" #include "ScriptController.h" #include "Settings.h" #include "StringAdaptors.h" #include #include #include #include #include #if ENABLE(MEDIA_STREAM) #include "CanvasCaptureMediaStreamTrack.h" #include "MediaStream.h" #endif #if ENABLE(WEBGL) #include "WebGLContextAttributes.h" #include "WebGLRenderingContext.h" #endif #if ENABLE(WEBGL2) #include "WebGL2RenderingContext.h" #endif #if ENABLE(WEBXR) #include "DOMWindow.h" #include "Navigator.h" #include "NavigatorWebXR.h" #include "WebXRSystem.h" #endif #if USE(CG) #include "ImageBufferUtilitiesCG.h" #endif #if USE(GSTREAMER) #include "MediaSampleGStreamer.h" #endif #if PLATFORM(COCOA) #include "MediaSampleAVFObjC.h" #include #endif namespace WebCore { WTF_MAKE_ISO_ALLOCATED_IMPL(HTMLCanvasElement); using namespace HTMLNames; // These values come from the WhatWG/W3C HTML spec. const int defaultWidth = 300; const int defaultHeight = 150; static std::optional maxCanvasAreaForTesting; static std::optional maxActivePixelMemoryForTesting; HTMLCanvasElement::HTMLCanvasElement(const QualifiedName& tagName, Document& document) : HTMLElement(tagName, document) , CanvasBase(IntSize(defaultWidth, defaultHeight)) , ActiveDOMObject(document) { ASSERT(hasTagName(canvasTag)); } Ref HTMLCanvasElement::create(Document& document) { auto canvas = adoptRef(*new HTMLCanvasElement(canvasTag, document)); canvas->suspendIfNeeded(); return canvas; } Ref HTMLCanvasElement::create(const QualifiedName& tagName, Document& document) { auto canvas = adoptRef(*new HTMLCanvasElement(tagName, document)); canvas->suspendIfNeeded(); return canvas; } HTMLCanvasElement::~HTMLCanvasElement() { // FIXME: This has to be called here because CSSCanvasValue::CanvasObserverProxy::canvasDestroyed() // downcasts the CanvasBase object to HTMLCanvasElement. That invokes virtual methods, which should be // avoided in destructors, but works as long as it's done before HTMLCanvasElement destructs completely. notifyObserversCanvasDestroyed(); document().clearCanvasPreparation(*this); m_context = nullptr; // Ensure this goes away before the ImageBuffer. setImageBuffer(nullptr); } void HTMLCanvasElement::parseAttribute(const QualifiedName& name, const AtomString& value) { if (name == widthAttr || name == heightAttr) reset(); HTMLElement::parseAttribute(name, value); } RenderPtr HTMLCanvasElement::createElementRenderer(RenderStyle&& style, const RenderTreePosition& insertionPosition) { RefPtr frame = document().frame(); if (frame && frame->script().canExecuteScripts(NotAboutToExecuteScript)) return createRenderer(*this, WTFMove(style)); return HTMLElement::createElementRenderer(WTFMove(style), insertionPosition); } bool HTMLCanvasElement::canContainRangeEndPoint() const { return false; } bool HTMLCanvasElement::canStartSelection() const { return false; } ExceptionOr HTMLCanvasElement::setHeight(unsigned value) { if (isControlledByOffscreen()) return Exception { InvalidStateError }; setAttributeWithoutSynchronization(heightAttr, AtomString::number(limitToOnlyHTMLNonNegative(value, defaultHeight))); return { }; } ExceptionOr HTMLCanvasElement::setWidth(unsigned value) { if (isControlledByOffscreen()) return Exception { InvalidStateError }; setAttributeWithoutSynchronization(widthAttr, AtomString::number(limitToOnlyHTMLNonNegative(value, defaultWidth))); return { }; } void HTMLCanvasElement::setSize(const IntSize& newSize) { if (newSize == size()) return; m_ignoreReset = true; setWidth(newSize.width()); setHeight(newSize.height()); m_ignoreReset = false; reset(); } static inline size_t maxActivePixelMemory() { if (maxActivePixelMemoryForTesting) return *maxActivePixelMemoryForTesting; static size_t maxPixelMemory; static std::once_flag onceFlag; std::call_once(onceFlag, [] { #if PLATFORM(IOS_FAMILY) maxPixelMemory = ramSize() / 4; #else maxPixelMemory = std::max(ramSize() / 4, 2151 * MB); #endif }); return maxPixelMemory; } void HTMLCanvasElement::setMaxPixelMemoryForTesting(std::optional size) { maxActivePixelMemoryForTesting = size; } static inline size_t maxCanvasArea() { if (maxCanvasAreaForTesting) return *maxCanvasAreaForTesting; // Firefox limits width/height to 32767 pixels, but slows down dramatically before it // reaches that limit. We limit by area instead, giving us larger maximum dimensions, // in exchange for a smaller maximum canvas size. The maximum canvas size is in device pixels. #if PLATFORM(IOS_FAMILY) return 4096 * 4096; #else return 16384 * 16384; #endif } void HTMLCanvasElement::setMaxCanvasAreaForTesting(std::optional size) { maxCanvasAreaForTesting = size; } ExceptionOr> HTMLCanvasElement::getContext(JSC::JSGlobalObject& state, const String& contextId, Vector>&& arguments) { if (m_context) { if (m_context->isPlaceholder()) return Exception { InvalidStateError }; if (m_context->is2d()) { if (!is2dType(contextId)) return std::optional { std::nullopt }; return std::optional { RefPtr { &downcast(*m_context) } }; } if (m_context->isBitmapRenderer()) { if (!isBitmapRendererType(contextId)) return std::optional { std::nullopt }; return std::optional { RefPtr { &downcast(*m_context) } }; } #if ENABLE(WEBGL) if (m_context->isWebGL()) { if (!isWebGLType(contextId)) return std::optional { std::nullopt }; auto version = toWebGLVersion(contextId); if ((version == WebGLVersion::WebGL1) != m_context->isWebGL1()) return std::optional { std::nullopt }; if (is(*m_context)) return std::optional { RefPtr { &downcast(*m_context) } }; #if ENABLE(WEBGL2) ASSERT(is(*m_context)); return std::optional { RefPtr { &downcast(*m_context) } }; #endif } #endif ASSERT_NOT_REACHED(); return std::optional { std::nullopt }; } if (is2dType(contextId)) { auto scope = DECLARE_THROW_SCOPE(state.vm()); auto settings = convert>(state, arguments.isEmpty() ? JSC::jsUndefined() : (arguments[0].isObject() ? arguments[0].get() : JSC::jsNull())); RETURN_IF_EXCEPTION(scope, Exception { ExistingExceptionError }); auto context = createContext2d(contextId, WTFMove(settings)); if (!context) return std::optional { std::nullopt }; return std::optional { RefPtr { context } }; } if (isBitmapRendererType(contextId)) { auto scope = DECLARE_THROW_SCOPE(state.vm()); auto settings = convert>(state, arguments.isEmpty() ? JSC::jsUndefined() : (arguments[0].isObject() ? arguments[0].get() : JSC::jsNull())); RETURN_IF_EXCEPTION(scope, Exception { ExistingExceptionError }); auto context = createContextBitmapRenderer(contextId, WTFMove(settings)); if (!context) return std::optional { std::nullopt }; return std::optional { RefPtr { context } }; } #if ENABLE(WEBGL) if (isWebGLType(contextId)) { auto scope = DECLARE_THROW_SCOPE(state.vm()); auto attributes = convert>(state, arguments.isEmpty() ? JSC::jsUndefined() : (arguments[0].isObject() ? arguments[0].get() : JSC::jsNull())); RETURN_IF_EXCEPTION(scope, Exception { ExistingExceptionError }); auto context = createContextWebGL(toWebGLVersion(contextId), WTFMove(attributes)); if (!context) return std::optional { std::nullopt }; if (is(*context)) return std::optional { RefPtr { &downcast(*context) } }; #if ENABLE(WEBGL2) ASSERT(is(*context)); return std::optional { RefPtr { &downcast(*context) } }; #endif } #endif return std::optional { std::nullopt }; } CanvasRenderingContext* HTMLCanvasElement::getContext(const String& type) { if (HTMLCanvasElement::is2dType(type)) return getContext2d(type, { }); if (HTMLCanvasElement::isBitmapRendererType(type)) return getContextBitmapRenderer(type, { }); #if ENABLE(WEBGL) if (HTMLCanvasElement::isWebGLType(type)) return getContextWebGL(HTMLCanvasElement::toWebGLVersion(type)); #endif return nullptr; } bool HTMLCanvasElement::is2dType(const String& type) { return type == "2d"; } CanvasRenderingContext2D* HTMLCanvasElement::createContext2d(const String& type, CanvasRenderingContext2DSettings&& settings) { ASSERT_UNUSED(HTMLCanvasElement::is2dType(type), type); ASSERT(!m_context); // Make sure we don't use more pixel memory than the system can support. size_t requestedPixelMemory = 4 * width() * height(); if (activePixelMemory() + requestedPixelMemory > maxActivePixelMemory()) { auto message = makeString("Total canvas memory use exceeds the maximum limit (", maxActivePixelMemory() / 1024 / 1024, " MB)."); document().addConsoleMessage(MessageSource::JS, MessageLevel::Warning, message); return nullptr; } m_context = CanvasRenderingContext2D::create(*this, WTFMove(settings), document().inQuirksMode()); #if USE(IOSURFACE_CANVAS_BACKING_STORE) // Need to make sure a RenderLayer and compositing layer get created for the Canvas. invalidateStyleAndLayerComposition(); #endif return static_cast(m_context.get()); } CanvasRenderingContext2D* HTMLCanvasElement::getContext2d(const String& type, CanvasRenderingContext2DSettings&& settings) { ASSERT_UNUSED(HTMLCanvasElement::is2dType(type), type); if (m_context && !m_context->is2d()) return nullptr; if (!m_context) return createContext2d(type, WTFMove(settings)); return static_cast(m_context.get()); } #if ENABLE(WEBGL) static bool requiresAcceleratedCompositingForWebGL() { #if PLATFORM(GTK) || PLATFORM(WIN_CAIRO) return false; #else return true; #endif } static bool shouldEnableWebGL(const Settings& settings) { if (!settings.webGLEnabled()) return false; if (!requiresAcceleratedCompositingForWebGL()) return true; return settings.acceleratedCompositingEnabled(); } bool HTMLCanvasElement::isWebGLType(const String& type) { // Retain support for the legacy "webkit-3d" name. return type == "webgl" || type == "experimental-webgl" #if ENABLE(WEBGL2) || type == "webgl2" #endif || type == "webkit-3d"; } GraphicsContextGLWebGLVersion HTMLCanvasElement::toWebGLVersion(const String& type) { ASSERT(isWebGLType(type)); #if ENABLE(WEBGL2) if (type == "webgl2") return WebGLVersion::WebGL2; #else UNUSED_PARAM(type); #endif return WebGLVersion::WebGL1; } WebGLRenderingContextBase* HTMLCanvasElement::createContextWebGL(WebGLVersion type, WebGLContextAttributes&& attrs) { ASSERT(!m_context); if (!shouldEnableWebGL(document().settings())) return nullptr; #if ENABLE(WEBXR) // https://immersive-web.github.io/webxr/#xr-compatible if (attrs.xrCompatible) { if (auto* window = document().domWindow()) // FIXME: how to make this sync without blocking the main thread? // For reference: https://immersive-web.github.io/webxr/#ref-for-dom-webglcontextattributes-xrcompatible NavigatorWebXR::xr(window->navigator()).ensureImmersiveXRDeviceIsSelected([]() { }); } #endif // TODO(WEBXR): ensure the context is created in a compatible graphics // adapter when there is an active immersive device. m_context = WebGLRenderingContextBase::create(*this, attrs, type); if (m_context) { // This new context needs to be observed by the Document, in order // for it to be correctly preparedForRendering before it is composited. addObserver(document()); // Need to make sure a RenderLayer and compositing layer get created for the Canvas. invalidateStyleAndLayerComposition(); #if ENABLE(WEBXR) ASSERT(!attrs.xrCompatible || downcast(m_context.get())->isXRCompatible()); #endif } return downcast(m_context.get()); } WebGLRenderingContextBase* HTMLCanvasElement::getContextWebGL(WebGLVersion type, WebGLContextAttributes&& attrs) { if (!shouldEnableWebGL(document().settings())) return nullptr; if (m_context) { if (!m_context->isWebGL()) return nullptr; if ((type == WebGLVersion::WebGL1) != m_context->isWebGL1()) return nullptr; } if (!m_context) return createContextWebGL(type, WTFMove(attrs)); return &downcast(*m_context); } #endif // ENABLE(WEBGL) bool HTMLCanvasElement::isBitmapRendererType(const String& type) { return type == "bitmaprenderer"; } ImageBitmapRenderingContext* HTMLCanvasElement::createContextBitmapRenderer(const String& type, ImageBitmapRenderingContextSettings&& settings) { ASSERT_UNUSED(type, HTMLCanvasElement::isBitmapRendererType(type)); ASSERT(!m_context); m_context = ImageBitmapRenderingContext::create(*this, WTFMove(settings)); #if USE(IOSURFACE_CANVAS_BACKING_STORE) // Need to make sure a RenderLayer and compositing layer get created for the Canvas. invalidateStyleAndLayerComposition(); #endif return static_cast(m_context.get()); } ImageBitmapRenderingContext* HTMLCanvasElement::getContextBitmapRenderer(const String& type, ImageBitmapRenderingContextSettings&& settings) { ASSERT_UNUSED(type, HTMLCanvasElement::isBitmapRendererType(type)); if (!m_context) return createContextBitmapRenderer(type, WTFMove(settings)); return static_cast(m_context.get()); } void HTMLCanvasElement::didDraw(const std::optional& rect) { clearCopiedImage(); if (!rect) { notifyObserversCanvasChanged(std::nullopt); return; } auto dirtyRect = rect.value(); if (auto* renderer = renderBox()) { FloatRect destRect; if (is(renderer)) destRect = downcast(renderer)->replacedContentRect(); else destRect = renderer->contentBoxRect(); // Inflate dirty rect to cover antialiasing on image buffers. if (drawingContext() && drawingContext()->shouldAntialias()) dirtyRect.inflate(1); FloatRect r = mapRect(dirtyRect, FloatRect(0, 0, size().width(), size().height()), destRect); r.intersect(destRect); if (!r.isEmpty()) renderer->repaintRectangle(enclosingIntRect(r)); } notifyObserversCanvasChanged(dirtyRect); } void HTMLCanvasElement::reset() { if (m_ignoreReset || isControlledByOffscreen()) return; bool hadImageBuffer = hasCreatedImageBuffer(); int w = limitToOnlyHTMLNonNegative(attributeWithoutSynchronization(widthAttr), defaultWidth); int h = limitToOnlyHTMLNonNegative(attributeWithoutSynchronization(heightAttr), defaultHeight); resetGraphicsContextState(); if (is(m_context.get())) downcast(*m_context).reset(); IntSize oldSize = size(); IntSize newSize(w, h); // If the size of an existing buffer matches, we can just clear it instead of reallocating. // This optimization is only done for 2D canvases for now. if (m_hasCreatedImageBuffer && oldSize == newSize && m_context && m_context->is2d() && buffer() && m_context->colorSpace() == buffer()->colorSpace() && m_context->pixelFormat() == buffer()->pixelFormat()) { if (!m_didClearImageBuffer) clearImageBuffer(); return; } setSurfaceSize(newSize); if (isGPUBased() && oldSize != size()) downcast(*m_context).reshape(width(), height()); auto renderer = this->renderer(); if (is(renderer)) { auto& canvasRenderer = downcast(*renderer); if (oldSize != size()) { canvasRenderer.canvasSizeChanged(); if (canvasRenderer.hasAcceleratedCompositing()) canvasRenderer.contentChanged(CanvasChanged); } if (hadImageBuffer) canvasRenderer.repaint(); } notifyObserversCanvasResized(); } bool HTMLCanvasElement::paintsIntoCanvasBuffer() const { ASSERT(m_context); #if USE(IOSURFACE_CANVAS_BACKING_STORE) if (m_context->is2d() || m_context->isBitmapRenderer()) return true; #endif if (!m_context->isAccelerated()) return true; if (renderBox() && renderBox()->hasAcceleratedCompositing()) return false; return true; } void HTMLCanvasElement::paint(GraphicsContext& context, const LayoutRect& r) { if (m_context) m_context->clearAccumulatedDirtyRect(); if (!context.paintingDisabled()) { bool shouldPaint = true; if (m_context) { shouldPaint = paintsIntoCanvasBuffer() || document().printing() || m_isSnapshotting; if (shouldPaint) { m_context->prepareForDisplayWithPaint(); m_context->paintRenderingResultsToCanvas(); } } if (shouldPaint) { if (hasCreatedImageBuffer()) { if (ImageBuffer* imageBuffer = buffer()) context.drawImageBuffer(*imageBuffer, snappedIntRect(r)); } } } if (UNLIKELY(m_context && m_context->hasActiveInspectorCanvasCallTracer())) InspectorInstrumentation::didFinishRecordingCanvasFrame(*m_context); } bool HTMLCanvasElement::isGPUBased() const { return m_context && m_context->isGPUBased(); } void HTMLCanvasElement::setSurfaceSize(const IntSize& size) { CanvasBase::setSize(size); m_hasCreatedImageBuffer = false; setImageBuffer(nullptr); clearCopiedImage(); } static String toEncodingMimeType(const String& mimeType) { if (!MIMETypeRegistry::isSupportedImageMIMETypeForEncoding(mimeType)) return "image/png"_s; return mimeType.convertToASCIILowercase(); } // https://html.spec.whatwg.org/multipage/canvas.html#a-serialisation-of-the-bitmap-as-a-file static std::optional qualityFromJSValue(JSC::JSValue qualityValue) { if (!qualityValue.isNumber()) return std::nullopt; double qualityNumber = qualityValue.asNumber(); if (qualityNumber < 0 || qualityNumber > 1) return std::nullopt; return qualityNumber; } ExceptionOr HTMLCanvasElement::toDataURL(const String& mimeType, JSC::JSValue qualityValue) { if (!originClean()) return Exception { SecurityError }; if (size().isEmpty() || !buffer()) return UncachedString { "data:,"_s }; if (RuntimeEnabledFeatures::sharedFeatures().webAPIStatisticsEnabled()) ResourceLoadObserver::shared().logCanvasRead(document()); auto encodingMIMEType = toEncodingMimeType(mimeType); auto quality = qualityFromJSValue(qualityValue); #if USE(CG) // Try to get ImageData first, as that may avoid lossy conversions. if (auto imageData = getImageData()) return UncachedString { dataURL(imageData->pixelBuffer(), encodingMIMEType, quality) }; #endif makeRenderingResultsAvailable(); return UncachedString { buffer()->toDataURL(encodingMIMEType, quality) }; } ExceptionOr HTMLCanvasElement::toDataURL(const String& mimeType) { return toDataURL(mimeType, { }); } ExceptionOr HTMLCanvasElement::toBlob(ScriptExecutionContext& context, Ref&& callback, const String& mimeType, JSC::JSValue qualityValue) { if (!originClean()) return Exception { SecurityError }; if (size().isEmpty() || !buffer()) { callback->scheduleCallback(context, nullptr); return { }; } if (RuntimeEnabledFeatures::sharedFeatures().webAPIStatisticsEnabled()) ResourceLoadObserver::shared().logCanvasRead(document()); auto encodingMIMEType = toEncodingMimeType(mimeType); auto quality = qualityFromJSValue(qualityValue); #if USE(CG) if (auto imageData = getImageData()) { RefPtr blob; Vector blobData = data(imageData->pixelBuffer(), encodingMIMEType, quality); if (!blobData.isEmpty()) blob = Blob::create(&document(), WTFMove(blobData), encodingMIMEType); callback->scheduleCallback(context, WTFMove(blob)); return { }; } #endif makeRenderingResultsAvailable(); RefPtr blob; Vector blobData = buffer()->toData(encodingMIMEType, quality); if (!blobData.isEmpty()) blob = Blob::create(&document(), WTFMove(blobData), encodingMIMEType); callback->scheduleCallback(context, WTFMove(blob)); return { }; } #if ENABLE(OFFSCREEN_CANVAS) ExceptionOr> HTMLCanvasElement::transferControlToOffscreen(ScriptExecutionContext& context) { if (m_context) return Exception { InvalidStateError }; m_context = makeUnique(*this); if (m_context->isAccelerated()) invalidateStyleAndLayerComposition(); return OffscreenCanvas::create(context, *this); } #endif RefPtr HTMLCanvasElement::getImageData() { #if ENABLE(WEBGL) if (is(m_context.get())) { if (RuntimeEnabledFeatures::sharedFeatures().webAPIStatisticsEnabled()) ResourceLoadObserver::shared().logCanvasRead(document()); return ImageData::create(downcast(*m_context).paintRenderingResultsToPixelBuffer()); } #endif return nullptr; } #if ENABLE(MEDIA_STREAM) RefPtr HTMLCanvasElement::toMediaSample() { #if PLATFORM(COCOA) || USE(GSTREAMER) auto* imageBuffer = buffer(); if (!imageBuffer) return nullptr; if (RuntimeEnabledFeatures::sharedFeatures().webAPIStatisticsEnabled()) ResourceLoadObserver::shared().logCanvasRead(document()); makeRenderingResultsAvailable(); // FIXME: This can likely be optimized quite a bit, especially in the cases where // the ImageBuffer is backed by GPU memory already and/or is in the GPU process by // specializing toMediaSample() in ImageBufferBackend to not use getPixelBuffer(). auto pixelBuffer = imageBuffer->getPixelBuffer({ AlphaPremultiplication::Unpremultiplied, PixelFormat::BGRA8, DestinationColorSpace::SRGB() }, { { }, imageBuffer->logicalSize() }); if (!pixelBuffer) return nullptr; #if PLATFORM(COCOA) return MediaSampleAVFObjC::createImageSample(WTFMove(*pixelBuffer)); #elif USE(GSTREAMER) return MediaSampleGStreamer::createImageSample(WTFMove(*pixelBuffer)); #endif #else return nullptr; #endif } ExceptionOr> HTMLCanvasElement::captureStream(Document& document, std::optional&& frameRequestRate) { if (!originClean()) return Exception(SecurityError, "Canvas is tainted"_s); if (RuntimeEnabledFeatures::sharedFeatures().webAPIStatisticsEnabled()) ResourceLoadObserver::shared().logCanvasRead(this->document()); if (frameRequestRate && frameRequestRate.value() < 0) return Exception(NotSupportedError, "frameRequestRate is negative"_s); auto track = CanvasCaptureMediaStreamTrack::create(document, *this, WTFMove(frameRequestRate)); auto stream = MediaStream::create(document); stream->addTrack(track); return stream; } #endif SecurityOrigin* HTMLCanvasElement::securityOrigin() const { return &document().securityOrigin(); } bool HTMLCanvasElement::shouldAccelerate(const IntSize& size) const { auto checkedArea = size.area(); if (checkedArea.hasOverflowed()) return false; return shouldAccelerate(checkedArea.value()); } bool HTMLCanvasElement::shouldAccelerate(unsigned area) const { auto& settings = document().settings(); if (area > settings.maximumAccelerated2dCanvasSize()) return false; #if USE(IOSURFACE_CANVAS_BACKING_STORE) return settings.canvasUsesAcceleratedDrawing(); #else return false; #endif } void HTMLCanvasElement::setUsesDisplayListDrawing(bool usesDisplayListDrawing) { m_usesDisplayListDrawing = usesDisplayListDrawing; } void HTMLCanvasElement::setTracksDisplayListReplay(bool tracksDisplayListReplay) { m_tracksDisplayListReplay = tracksDisplayListReplay; if (!buffer()) return; auto& buffer = *this->buffer(); if (buffer.drawingContext()) buffer.drawingContext()->setTracksDisplayListReplay(m_tracksDisplayListReplay); } String HTMLCanvasElement::displayListAsText(DisplayList::AsTextFlags flags) const { if (!buffer()) return String(); auto& buffer = *this->buffer(); if (buffer.drawingContext()) return buffer.drawingContext()->displayList().asText(flags); return String(); } String HTMLCanvasElement::replayDisplayListAsText(DisplayList::AsTextFlags flags) const { if (!buffer()) return String(); auto& buffer = *this->buffer(); if (buffer.drawingContext() && buffer.drawingContext()->replayedDisplayList()) return buffer.drawingContext()->replayedDisplayList()->asText(flags); return String(); } void HTMLCanvasElement::createImageBuffer() const { ASSERT(!hasCreatedImageBuffer()); m_hasCreatedImageBuffer = true; m_didClearImageBuffer = true; auto checkedArea = size().area(); if (checkedArea.hasOverflowed() || checkedArea > maxCanvasArea()) { auto message = makeString("Canvas area exceeds the maximum limit (width * height > ", maxCanvasArea(), ")."); document().addConsoleMessage(MessageSource::JS, MessageLevel::Warning, message); return; } // Make sure we don't use more pixel memory than the system can support. auto checkedRequestedPixelMemory = (4 * checkedArea) + activePixelMemory(); if (checkedRequestedPixelMemory.hasOverflowed() || checkedRequestedPixelMemory > maxActivePixelMemory()) { auto message = makeString("Total canvas memory use exceeds the maximum limit (", maxActivePixelMemory() / 1024 / 1024, " MB)."); document().addConsoleMessage(MessageSource::JS, MessageLevel::Warning, message); return; } unsigned area = checkedArea.value(); if (!area) return; auto hostWindow = (document().view() && document().view()->root()) ? document().view()->root()->hostWindow() : nullptr; auto renderingMode = shouldAccelerate(area) ? RenderingMode::Accelerated : RenderingMode::Unaccelerated; // FIXME: Add a new setting for DisplayList drawing on canvas. auto useDisplayList = m_usesDisplayListDrawing.value_or(document().settings().displayListDrawingEnabled()) ? ShouldUseDisplayList::Yes : ShouldUseDisplayList::No; auto [colorSpace, pixelFormat] = [&] { if (m_context) return std::pair { m_context->colorSpace(), m_context->pixelFormat() }; return std::pair { DestinationColorSpace::SRGB(), PixelFormat::BGRA8 }; }(); setImageBuffer(ImageBuffer::create(size(), renderingMode, useDisplayList, RenderingPurpose::Canvas, 1, colorSpace, pixelFormat, hostWindow)); if (buffer() && buffer()->drawingContext()) buffer()->drawingContext()->setTracksDisplayListReplay(m_tracksDisplayListReplay); #if USE(IOSURFACE_CANVAS_BACKING_STORE) if (m_context && m_context->is2d()) { // Recalculate compositing requirements if acceleration state changed. const_cast(this)->invalidateStyleAndLayerComposition(); } #endif if (m_context && buffer() && buffer()->prefersPreparationForDisplay()) const_cast(this)->addObserver(document()); } void HTMLCanvasElement::setImageBufferAndMarkDirty(RefPtr&& buffer) { IntSize oldSize = size(); m_hasCreatedImageBuffer = true; setImageBuffer(WTFMove(buffer)); if (isControlledByOffscreen() && oldSize != size()) { setAttributeWithoutSynchronization(widthAttr, AtomString::number(width())); setAttributeWithoutSynchronization(heightAttr, AtomString::number(height())); auto renderer = this->renderer(); if (is(renderer)) { auto& canvasRenderer = downcast(*renderer); canvasRenderer.canvasSizeChanged(); canvasRenderer.contentChanged(CanvasChanged); } notifyObserversCanvasResized(); } didDraw(FloatRect(FloatPoint(), size())); } Image* HTMLCanvasElement::copiedImage() const { if (!m_copiedImage && buffer()) { if (m_context) m_context->paintRenderingResultsToCanvas(); m_copiedImage = buffer()->copyImage(CopyBackingStore, PreserveResolution::Yes); } return m_copiedImage.get(); } void HTMLCanvasElement::clearImageBuffer() const { ASSERT(m_hasCreatedImageBuffer); ASSERT(!m_didClearImageBuffer); ASSERT(m_context); m_didClearImageBuffer = true; if (is(*m_context)) { // No need to undo transforms/clip/etc. because we are called right after the context is reset. downcast(*m_context).clearRect(0, 0, width(), height()); } } void HTMLCanvasElement::clearCopiedImage() const { m_copiedImage = nullptr; m_didClearImageBuffer = false; } const char* HTMLCanvasElement::activeDOMObjectName() const { return "HTMLCanvasElement"; } bool HTMLCanvasElement::virtualHasPendingActivity() const { #if ENABLE(WEBGL) if (is(m_context.get())) { // WebGL rendering context may fire contextlost / contextchange / contextrestored events at any point. return m_hasRelevantWebGLEventListener && !downcast(*m_context).isContextUnrecoverablyLost(); } #endif return false; } void HTMLCanvasElement::eventListenersDidChange() { #if ENABLE(WEBGL) m_hasRelevantWebGLEventListener = hasEventListeners(eventNames().webglcontextchangedEvent) || hasEventListeners(eventNames().webglcontextlostEvent) || hasEventListeners(eventNames().webglcontextrestoredEvent); #endif } void HTMLCanvasElement::didMoveToNewDocument(Document& oldDocument, Document& newDocument) { if (needsPreparationForDisplay()) { oldDocument.clearCanvasPreparation(*this); removeObserver(oldDocument); addObserver(newDocument); } HTMLElement::didMoveToNewDocument(oldDocument, newDocument); } Node::InsertedIntoAncestorResult HTMLCanvasElement::insertedIntoAncestor(InsertionType insertionType, ContainerNode& parentOfInsertedTree) { if (needsPreparationForDisplay() && insertionType.connectedToDocument) { auto& document = parentOfInsertedTree.document(); addObserver(document); // Drawing commands may have been issued to the canvas before now, so we need to // tell the document if we need preparation. if (m_context && m_context->compositingResultsNeedUpdating()) document.canvasChanged(*this, FloatRect { }); } return HTMLElement::insertedIntoAncestor(insertionType, parentOfInsertedTree); } void HTMLCanvasElement::removedFromAncestor(RemovalType removalType, ContainerNode& oldParentOfRemovedTree) { if (needsPreparationForDisplay() && removalType.disconnectedFromDocument) { oldParentOfRemovedTree.document().clearCanvasPreparation(*this); removeObserver(oldParentOfRemovedTree.document()); } HTMLElement::removedFromAncestor(removalType, oldParentOfRemovedTree); } bool HTMLCanvasElement::needsPreparationForDisplay() { return m_context && m_context->needsPreparationForDisplay(); } void HTMLCanvasElement::prepareForDisplay() { ASSERT(needsPreparationForDisplay()); if (m_context) m_context->prepareForDisplay(); } bool HTMLCanvasElement::isControlledByOffscreen() const { return m_context && m_context->isPlaceholder(); } }