[CP] Fix pink images when returning from background on iOS (#170846)

Includes:
  * https://github.com/flutter/flutter/pull/169596
  * https://github.com/flutter/flutter/pull/169378
  
 Fix image decode errors on iOS that could occur if a push notification triggered image decoding while the app is backgrounded.
This commit is contained in:
Jonah Williams 2025-06-23 10:38:02 -07:00 committed by GitHub
parent 6fba2447e9
commit 82f2933347
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 167 additions and 30 deletions

View File

@ -190,6 +190,7 @@
../../../flutter/impeller/renderer/backend/gles/test
../../../flutter/impeller/renderer/backend/gles/unique_handle_gles_unittests.cc
../../../flutter/impeller/renderer/backend/metal/allocator_mtl_unittests.mm
../../../flutter/impeller/renderer/backend/metal/context_mtl_unittests.mm
../../../flutter/impeller/renderer/backend/metal/swapchain_transients_mtl_unittests.mm
../../../flutter/impeller/renderer/backend/metal/texture_mtl_unittests.mm
../../../flutter/impeller/renderer/backend/vulkan/allocator_vk_unittests.cc

View File

@ -38,7 +38,7 @@ class PlaygroundImplMTL final : public PlaygroundImpl {
std::shared_ptr<ContextMTL> context_;
std::shared_ptr<fml::ConcurrentMessageLoop> concurrent_loop_;
std::shared_ptr<SwapchainTransientsMTL> swapchain_transients_;
std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch_;
std::shared_ptr<fml::SyncSwitch> is_gpu_disabled_sync_switch_;
// |PlaygroundImpl|
std::shared_ptr<Context> GetContext() const override;
@ -50,6 +50,9 @@ class PlaygroundImplMTL final : public PlaygroundImpl {
std::unique_ptr<Surface> AcquireSurfaceFrame(
std::shared_ptr<Context> context) override;
// |PlaygroundImpl|
void SetGPUDisabled(bool disabled) const override;
PlaygroundImplMTL(const PlaygroundImplMTL&) = delete;
PlaygroundImplMTL& operator=(const PlaygroundImplMTL&) = delete;

View File

@ -138,4 +138,8 @@ fml::Status PlaygroundImplMTL::SetCapabilities(
return fml::Status();
}
void PlaygroundImplMTL::SetGPUDisabled(bool disabled) const {
is_gpu_disabled_sync_switch_->SetSwitch(disabled);
}
} // namespace impeller

View File

@ -526,4 +526,8 @@ Playground::VKProcAddressResolver Playground::CreateVKProcAddressResolver()
return impl_->CreateVKProcAddressResolver();
}
void Playground::SetGPUDisabled(bool value) const {
impl_->SetGPUDisabled(value);
}
} // namespace impeller

View File

@ -122,6 +122,11 @@ class Playground {
std::function<void*(void* instance, const char* proc_name)>;
VKProcAddressResolver CreateVKProcAddressResolver() const;
/// @brief Mark the GPU as unavilable.
///
/// Only supported on the Metal backend.
void SetGPUDisabled(bool disabled) const;
protected:
const PlaygroundSwitches switches_;

View File

@ -40,6 +40,8 @@ class PlaygroundImpl {
virtual Playground::VKProcAddressResolver CreateVKProcAddressResolver() const;
virtual void SetGPUDisabled(bool disabled) const {}
protected:
const PlaygroundSwitches switches_;

View File

@ -29,7 +29,8 @@ bool CommandBufferGLES::IsValid() const {
}
// |CommandBuffer|
bool CommandBufferGLES::OnSubmitCommands(CompletionCallback callback) {
bool CommandBufferGLES::OnSubmitCommands(bool block_on_schedule,
CompletionCallback callback) {
const auto result = reactor_->React();
if (callback) {
callback(result ? CommandBuffer::Status::kCompleted

View File

@ -32,7 +32,8 @@ class CommandBufferGLES final : public CommandBuffer {
bool IsValid() const override;
// |CommandBuffer|
bool OnSubmitCommands(CompletionCallback callback) override;
bool OnSubmitCommands(bool block_on_schedule,
CompletionCallback callback) override;
// |CommandBuffer|
void OnWaitUntilCompleted() override;

View File

@ -72,6 +72,7 @@ impeller_component("metal_unittests") {
sources = [
"allocator_mtl_unittests.mm",
"context_mtl_unittests.mm",
"swapchain_transients_mtl_unittests.mm",
"texture_mtl_unittests.mm",
]

View File

@ -34,7 +34,8 @@ class CommandBufferMTL final : public CommandBuffer {
bool IsValid() const override;
// |CommandBuffer|
bool OnSubmitCommands(CompletionCallback callback) override;
bool OnSubmitCommands(bool block_on_schedule,
CompletionCallback callback) override;
// |CommandBuffer|
void OnWaitUntilCompleted() override;

View File

@ -162,7 +162,8 @@ static CommandBuffer::Status ToCommitResult(MTLCommandBufferStatus status) {
return CommandBufferMTL::Status::kError;
}
bool CommandBufferMTL::OnSubmitCommands(CompletionCallback callback) {
bool CommandBufferMTL::OnSubmitCommands(bool block_on_schedule,
CompletionCallback callback) {
auto context = context_.lock();
if (!context) {
return false;
@ -182,6 +183,9 @@ bool CommandBufferMTL::OnSubmitCommands(CompletionCallback callback) {
}
[buffer_ commit];
if (block_on_schedule) {
[buffer_ waitUntilScheduled];
}
buffer_ = nil;
return true;

View File

@ -146,6 +146,9 @@ class ContextMTL final : public Context,
void StoreTaskForGPU(const fml::closure& task,
const fml::closure& failure) override;
// visible for testing.
void FlushTasksAwaitingGPU();
private:
class SyncSwitchObserver : public fml::SyncSwitch::Observer {
public:
@ -191,8 +194,6 @@ class ContextMTL final : public Context,
std::shared_ptr<CommandBuffer> CreateCommandBufferInQueue(
id<MTLCommandQueue> queue) const;
void FlushTasksAwaitingGPU();
ContextMTL(const ContextMTL&) = delete;
ContextMTL& operator=(const ContextMTL&) = delete;

View File

@ -428,8 +428,26 @@ void ContextMTL::FlushTasksAwaitingGPU() {
Lock lock(tasks_awaiting_gpu_mutex_);
std::swap(tasks_awaiting_gpu, tasks_awaiting_gpu_);
}
std::vector<PendingTasks> tasks_to_queue;
for (const auto& task : tasks_awaiting_gpu) {
task.task();
is_gpu_disabled_sync_switch_->Execute(fml::SyncSwitch::Handlers()
.SetIfFalse([&] { task.task(); })
.SetIfTrue([&] {
// Lost access to the GPU
// immediately after it was
// activated. This may happen if
// the app was quickly
// foregrounded/backgrounded
// from a push notification.
// Store the tasks on the
// context again.
tasks_to_queue.push_back(task);
}));
}
if (!tasks_to_queue.empty()) {
Lock lock(tasks_awaiting_gpu_mutex_);
tasks_awaiting_gpu_.insert(tasks_awaiting_gpu_.end(),
tasks_to_queue.begin(), tasks_to_queue.end());
}
}

View File

@ -0,0 +1,64 @@
// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "flutter/testing/testing.h"
#include "impeller/core/device_buffer_descriptor.h"
#include "impeller/core/formats.h"
#include "impeller/core/texture_descriptor.h"
#include "impeller/playground/playground_test.h"
#include "impeller/renderer/backend/metal/allocator_mtl.h"
#include "impeller/renderer/backend/metal/context_mtl.h"
#include "impeller/renderer/backend/metal/formats_mtl.h"
#include "impeller/renderer/backend/metal/texture_mtl.h"
#include "impeller/renderer/capabilities.h"
#include <QuartzCore/CAMetalLayer.h>
#include <memory>
#include <thread>
#include "gtest/gtest.h"
namespace impeller {
namespace testing {
using ContextMTLTest = PlaygroundTest;
INSTANTIATE_METAL_PLAYGROUND_SUITE(ContextMTLTest);
TEST_P(ContextMTLTest, FlushTask) {
auto& context_mtl = ContextMTL::Cast(*GetContext());
int executed = 0;
int failed = 0;
context_mtl.StoreTaskForGPU([&]() { executed++; }, [&]() { failed++; });
context_mtl.FlushTasksAwaitingGPU();
EXPECT_EQ(executed, 1);
EXPECT_EQ(failed, 0);
}
TEST_P(ContextMTLTest, FlushTaskWithGPULoss) {
auto& context_mtl = ContextMTL::Cast(*GetContext());
int executed = 0;
int failed = 0;
context_mtl.StoreTaskForGPU([&]() { executed++; }, [&]() { failed++; });
// If tasks are flushed while the GPU is disabled, then
// they should not be executed.
SetGPUDisabled(/*disabled=*/true);
context_mtl.FlushTasksAwaitingGPU();
EXPECT_EQ(executed, 0);
EXPECT_EQ(failed, 0);
// Toggling availibility should flush tasks.
SetGPUDisabled(/*disabled=*/false);
EXPECT_EQ(executed, 1);
EXPECT_EQ(failed, 0);
}
} // namespace testing
} // namespace impeller

View File

@ -43,7 +43,8 @@ bool CommandBufferVK::IsValid() const {
return true;
}
bool CommandBufferVK::OnSubmitCommands(CompletionCallback callback) {
bool CommandBufferVK::OnSubmitCommands(bool block_on_schedule,
CompletionCallback callback) {
FML_UNREACHABLE()
}

View File

@ -98,7 +98,8 @@ class CommandBufferVK final
bool IsValid() const override;
// |CommandBuffer|
bool OnSubmitCommands(CompletionCallback callback) override;
bool OnSubmitCommands(bool block_on_schedule,
CompletionCallback callback) override;
// |CommandBuffer|
void OnWaitUntilCompleted() override;

View File

@ -22,7 +22,8 @@ CommandQueueVK::~CommandQueueVK() = default;
fml::Status CommandQueueVK::Submit(
const std::vector<std::shared_ptr<CommandBuffer>>& buffers,
const CompletionCallback& completion_callback) {
const CompletionCallback& completion_callback,
bool block_on_schedule) {
if (buffers.empty()) {
return fml::Status(fml::StatusCode::kInvalidArgument,
"No command buffers provided.");

View File

@ -17,9 +17,9 @@ class CommandQueueVK : public CommandQueue {
~CommandQueueVK() override;
fml::Status Submit(
const std::vector<std::shared_ptr<CommandBuffer>>& buffers,
const CompletionCallback& completion_callback = {}) override;
fml::Status Submit(const std::vector<std::shared_ptr<CommandBuffer>>& buffers,
const CompletionCallback& completion_callback = {},
bool block_on_schedule = false) override;
private:
std::weak_ptr<ContextVK> context_;

View File

@ -15,7 +15,8 @@ CommandBuffer::CommandBuffer(std::weak_ptr<const Context> context)
CommandBuffer::~CommandBuffer() = default;
bool CommandBuffer::SubmitCommands(const CompletionCallback& callback) {
bool CommandBuffer::SubmitCommands(bool block_on_schedule,
const CompletionCallback& callback) {
if (!IsValid()) {
// Already committed or was never valid. Either way, this is caller error.
if (callback) {
@ -23,11 +24,11 @@ bool CommandBuffer::SubmitCommands(const CompletionCallback& callback) {
}
return false;
}
return OnSubmitCommands(callback);
return OnSubmitCommands(block_on_schedule, callback);
}
bool CommandBuffer::SubmitCommands() {
return SubmitCommands(nullptr);
bool CommandBuffer::SubmitCommands(bool block_on_schedule) {
return SubmitCommands(block_on_schedule, nullptr);
}
void CommandBuffer::WaitUntilCompleted() {

View File

@ -107,7 +107,11 @@ class CommandBuffer {
virtual std::shared_ptr<BlitPass> OnCreateBlitPass() = 0;
[[nodiscard]] virtual bool OnSubmitCommands(CompletionCallback callback) = 0;
/// @brief Submit the command buffer to the GPU for execution.
///
/// See also: [SubmitCommands].
[[nodiscard]] virtual bool OnSubmitCommands(bool block_on_schedule,
CompletionCallback callback) = 0;
virtual void OnWaitUntilCompleted() = 0;
@ -124,12 +128,15 @@ class CommandBuffer {
/// performed immediately on the calling thread.
///
/// A command buffer may only be committed once.
///
/// @param[in] block_on_schedule If true, this function will not return
/// until the command buffer has been scheduled. This only impacts
/// the Metal backend.
/// @param[in] callback The completion callback.
///
[[nodiscard]] bool SubmitCommands(const CompletionCallback& callback);
[[nodiscard]] bool SubmitCommands(bool block_on_schedule,
const CompletionCallback& callback);
[[nodiscard]] bool SubmitCommands();
[[nodiscard]] bool SubmitCommands(bool block_on_schedule);
CommandBuffer(const CommandBuffer&) = delete;

View File

@ -13,7 +13,8 @@ CommandQueue::~CommandQueue() = default;
fml::Status CommandQueue::Submit(
const std::vector<std::shared_ptr<CommandBuffer>>& buffers,
const CompletionCallback& completion_callback) {
const CompletionCallback& completion_callback,
bool block_on_schedule) {
if (buffers.empty()) {
if (completion_callback) {
completion_callback(CommandBuffer::Status::kError);
@ -22,7 +23,7 @@ fml::Status CommandQueue::Submit(
"No command buffers provided.");
}
for (const std::shared_ptr<CommandBuffer>& buffer : buffers) {
if (!buffer->SubmitCommands(completion_callback)) {
if (!buffer->SubmitCommands(block_on_schedule, completion_callback)) {
return fml::Status(fml::StatusCode::kCancelled,
"Failed to submit command buffer.");
}

View File

@ -35,9 +35,14 @@ class CommandQueue {
/// Only the Metal and Vulkan backends can give a status beyond
/// successful encoding. This callback may be called more than once and
/// potentially on a different thread.
///
/// If [block_on_schedule] is true, this function will not return until
/// the command buffer has been scheduled. This only impacts the Metal
/// backend.
virtual fml::Status Submit(
const std::vector<std::shared_ptr<CommandBuffer>>& buffers,
const CompletionCallback& completion_callback = {});
const CompletionCallback& completion_callback = {},
bool block_on_schedule = false);
private:
CommandQueue(const CommandQueue&) = delete;

View File

@ -124,7 +124,7 @@ class MockCommandBuffer : public CommandBuffer {
MOCK_METHOD(std::shared_ptr<BlitPass>, OnCreateBlitPass, (), (override));
MOCK_METHOD(bool,
OnSubmitCommands,
(CompletionCallback callback),
(bool block_on_schedule, CompletionCallback callback),
(override));
MOCK_METHOD(void, OnWaitUntilCompleted, (), (override));
MOCK_METHOD(void, OnWaitUntilScheduled, (), (override));
@ -238,7 +238,8 @@ class MockCommandQueue : public CommandQueue {
MOCK_METHOD(fml::Status,
Submit,
(const std::vector<std::shared_ptr<CommandBuffer>>& buffers,
const CompletionCallback& cb),
const CompletionCallback& cb,
bool block_on_schedule),
(override));
};

View File

@ -390,8 +390,17 @@ ImageDecoderImpeller::UnsafeUploadTextureToPrivate(
result_texture = std::move(resize_texture);
}
blit_pass->EncodeCommands();
if (!context->GetCommandQueue()->Submit({command_buffer}).ok()) {
if (!context->GetCommandQueue()
->Submit(
{command_buffer},
[](impeller::CommandBuffer::Status status) {
if (status == impeller::CommandBuffer::Status::kError) {
FML_LOG(ERROR)
<< "GPU Error submitting image decoding command buffer.";
}
},
/*block_on_schedule=*/true)
.ok()) {
std::string decode_error("Failed to submit image decoding command buffer.");
FML_DLOG(ERROR) << decode_error;
return std::make_pair(nullptr, decode_error);

View File

@ -224,7 +224,7 @@ std::shared_ptr<impeller::Context> MakeConvertDlImageToSkImageContext(
EXPECT_CALL(*context, GetResourceAllocator).WillRepeatedly(Return(allocator));
EXPECT_CALL(*context, CreateCommandBuffer).WillOnce(Return(command_buffer));
EXPECT_CALL(*device_buffer, OnGetContents).WillOnce(Return(buffer.data()));
EXPECT_CALL(*command_queue, Submit(_, _))
EXPECT_CALL(*command_queue, Submit(_, _, _))
.WillRepeatedly(
DoAll(InvokeArgument<1>(impeller::CommandBuffer::Status::kCompleted),
Return(fml::Status())));