Merge pull request #819 from fhuberts/fix-Wsuggest-attribute=format

Fix compiler warnings (-Wsuggest-attribute=format)
This commit is contained in:
Leonid Stryzhevskyi 2023-08-17 01:53:28 +03:00 committed by GitHub
commit ddd05b6ef5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 96 additions and 52 deletions

View File

@ -153,7 +153,7 @@ if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
add_compiler_flags(4.7 "-Wunused-local-typedefs")
add_compiler_flags(4.8 "-Wformat=2")
#add_compiler_flags(4.8 "-Wsuggest-attribute=format")
add_compiler_flags(4.8 "-Wsuggest-attribute=format")
add_compiler_flags(5.1 "-Wformat-signedness")
#add_compiler_flags(5.1 "-Wsuggest-final-methods")
@ -271,8 +271,6 @@ if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
add_compiler_flags(4.6 "-Wno-sign-compare")
add_compiler_flags(4.6 "-Wno-unused-parameter")
add_compiler_flags(4.8 "-Wno-suggest-attribute=format")
add_compiler_flags(9.1 "-Wno-pessimizing-move")
add_compiler_flags(13.0 "-Wno-dangling-reference")

View File

@ -33,7 +33,7 @@ PerformanceChecker::PerformanceChecker(const char* tag)
PerformanceChecker::~PerformanceChecker(){
v_int64 elapsedTicks = oatpp::base::Environment::getMicroTickCount() - m_ticks;
OATPP_LOGD(m_tag, "%d(micro)", elapsedTicks);
OATPP_LOGD(m_tag, "%ld(micro)", elapsedTicks);
}
v_int64 PerformanceChecker::getElapsedTicks(){
@ -54,9 +54,9 @@ ThreadLocalObjectsChecker::~ThreadLocalObjectsChecker(){
v_counter objectsCreatedPerTest = base::Environment::getThreadLocalObjectsCreated() - m_objectsCreated;
if(leakingObjects == 0){
OATPP_LOGE(m_tag, "OK:\n created(obj): %d", objectsCreatedPerTest);
OATPP_LOGE(m_tag, "OK:\n created(obj): %ld", objectsCreatedPerTest);
}else{
OATPP_LOGE(m_tag, "FAILED, leakingObjects = %d", leakingObjects);
OATPP_LOGE(m_tag, "FAILED, leakingObjects = %ld", leakingObjects);
OATPP_ASSERT(false);
}

View File

@ -52,10 +52,10 @@ void UnitTest::run(v_int32 times) {
if(leakingObjects == 0){
OATPP_LOGI(TAG, "\033[1mFINISHED\033[0m - \033[1;32msuccess!\033[0m");
OATPP_LOGI(TAG, "\033[33m%d(micro), %d(objs)\033[0m\n", millis, objectsCreatedPerTest);
OATPP_LOGI(TAG, "\033[33m%ld(micro), %ld(objs)\033[0m\n", millis, objectsCreatedPerTest);
}else{
OATPP_LOGE(TAG, "\033[1mFINISHED\033[0m - \033[1;31mfailed\033[0m, leakingObjects = %d", leakingObjects);
OATPP_LOGE(TAG, "\033[1mFINISHED\033[0m - \033[1;31mfailed\033[0m, leakingObjects = %ld", leakingObjects);
exit(EXIT_FAILURE);

View File

@ -92,7 +92,7 @@ public:
bool runConditionForLambda = true;
m_server = std::make_shared<oatpp::network::Server>(m_connectionProvider, m_connectionHandler);
OATPP_LOGD("\033[1;34mClientServerTestRunner\033[0m", "\033[1;34mRunning server on port %s. Timeout %lld(micro)\033[0m",
OATPP_LOGD("\033[1;34mClientServerTestRunner\033[0m", "\033[1;34mRunning server on port %s. Timeout %ld(micro)\033[0m",
m_connectionProvider->getProperty("port").toString()->c_str(),
timeout.count());
@ -129,7 +129,7 @@ public:
clientThread.join();
auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now() - startTime);
OATPP_LOGD("\033[1;34mClientServerTestRunner\033[0m", "\033[1;34mFinished with time %lld(micro). Stopping server...\033[0m", elapsed.count());
OATPP_LOGD("\033[1;34mClientServerTestRunner\033[0m", "\033[1;34mFinished with time %ld(micro). Stopping server...\033[0m", elapsed.count());
running = false;
timeoutCondition.notify_one();

View File

@ -54,7 +54,7 @@ void IOEventWorker::initEventQueue() {
if(!m_outEvents) {
OATPP_LOGE("[oatpp::async::worker::IOEventWorker::initEventQueue()]",
"Error. Unable to allocate %d bytes for events.", MAX_EVENTS * sizeof(epoll_event));
"Error. Unable to allocate %lu bytes for events.", MAX_EVENTS * sizeof(epoll_event));
throw std::runtime_error("[oatpp::async::worker::IOEventWorker::initEventQueue()]: Error. Unable to allocate memory for events.");
}
@ -162,10 +162,10 @@ void IOEventWorker::waitEvents() {
OATPP_LOGE("[oatpp::async::worker::IOEventWorker::waitEvents()]", "Error:\n"
"errno=%d\n"
"in-events=%d\n"
"foreman=%d\n"
"this=%d\n"
"foreman=%lx\n"
"this=%lx\n"
"specialization=%d",
errno, m_inEventsCount, m_foreman, this, m_specialization);
errno, m_inEventsCount, reinterpret_cast<v_buff_usize>(m_foreman), reinterpret_cast<v_buff_usize>(this), m_specialization);
throw std::runtime_error("[oatpp::async::worker::IOEventWorker::waitEvents()]: Error. Event loop failed.");
}

View File

@ -0,0 +1,34 @@
/***************************************************************************
*
* Project _____ __ ____ _ _
* ( _ ) /__\ (_ _)_| |_ _| |_
* )(_)( /(__)\ )( (_ _)(_ _)
* (_____)(__)(__)(__) |_| |_|
*
*
* Copyright 2018-present, Leonid Stryzhevskyi <lganzzzo@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***************************************************************************/
#ifndef oatpp_base_Compiler_hpp
#define oatpp_base_Compiler_hpp
#ifdef __GNUC__
#define GPP_ATTRIBUTE(x) __attribute__((x))
#else
#define GPP_ATTRIBUTE(x)
#endif
#endif /* oatpp_base_Compiler_hpp */

View File

@ -26,6 +26,7 @@
#ifndef oatpp_base_Environment_hpp
#define oatpp_base_Environment_hpp
#include "./Compiler.hpp"
#include "./Config.hpp"
#include <cstdio>
@ -356,7 +357,7 @@ public:
private:
static void registerComponent(const std::string& typeName, const std::string& componentName, void* component);
static void unregisterComponent(const std::string& typeName, const std::string& componentName);
static void vlogFormatted(v_uint32 priority, const std::string& tag, const char* message, va_list args);
static void vlogFormatted(v_uint32 priority, const std::string& tag, const char* message, va_list args) GPP_ATTRIBUTE(format (printf, 3, 0));
public:
/**
@ -448,7 +449,7 @@ public:
* @param message - message.
* @param ... - format arguments.
*/
static void logFormatted(v_uint32 priority, const std::string& tag, const char* message, ...);
static void logFormatted(v_uint32 priority, const std::string& tag, const char* message, ...) GPP_ATTRIBUTE(format (printf, 3, 4));
/**
* Format message and call `Logger::log()`<br>
@ -458,7 +459,7 @@ public:
* @param message - message.
* @param ... - format arguments.
*/
static void logFormatted(v_uint32 priority, const LogCategory& category, const char* message, ...);
static void logFormatted(v_uint32 priority, const LogCategory& category, const char* message, ...) GPP_ATTRIBUTE(format (printf, 3, 4));
/**
* Get component object by typeName.
@ -483,6 +484,17 @@ public:
};
/**
* Default oatpp assert method.
* @param FMT - the format string used for the expression
* @param EXP - expression that must be `true`.
*/
#define OATPP_ASSERT_FMT(FMT, EXP) \
if(!(EXP)) { \
OATPP_LOGE("\033[1mASSERT\033[0m[\033[1;31mFAILED\033[0m]", FMT, #EXP); \
exit(EXIT_FAILURE); \
}
/**
* Default oatpp assert method.
* @param EXP - expression that must be `true`.

View File

@ -73,12 +73,12 @@ void StreamPartReader::onPartData(const std::shared_ptr<Part>& part, const char*
if(size > 0) {
if(m_maxDataSize > 0 && tagObject->size + size > m_maxDataSize) {
OATPP_LOGE("[oatpp::web::mime::multipart::StreamPartReader::onPartData()]", "Error. Part size exceeds specified maxDataSize=%d", m_maxDataSize);
OATPP_LOGE("[oatpp::web::mime::multipart::StreamPartReader::onPartData()]", "Error. Part size exceeds specified maxDataSize=%ld", m_maxDataSize);
throw std::runtime_error("[oatpp::web::mime::multipart::StreamPartReader::onPartData()]: Error. Part size exceeds specified maxDataSize");
}
auto res = tagObject->outputStream->writeExactSizeDataSimple(data, size);
if(res != size) {
OATPP_LOGE("[oatpp::web::mime::multipart::StreamPartReader::onPartData()]", "Error. Failed to stream all data. Streamed %d/%d", res, size);
OATPP_LOGE("[oatpp::web::mime::multipart::StreamPartReader::onPartData()]", "Error. Failed to stream all data. Streamed %ld/%ld", res, size);
throw std::runtime_error("[oatpp::web::mime::multipart::StreamPartReader::onPartData()]: Error. Failed to stream all data.");
}
tagObject->size += res;
@ -163,7 +163,7 @@ async::CoroutineStarter AsyncStreamPartReader::onPartDataAsync(const std::shared
if(size > 0) {
if(m_maxDataSize > 0 && tagObject->size + size > m_maxDataSize) {
OATPP_LOGE("[oatpp::web::mime::multipart::AsyncStreamPartReader::onPartDataAsync()]", "Error. Part size exceeds specified maxDataSize=%d", m_maxDataSize);
OATPP_LOGE("[oatpp::web::mime::multipart::AsyncStreamPartReader::onPartDataAsync()]", "Error. Part size exceeds specified maxDataSize=%ld", m_maxDataSize);
throw std::runtime_error("[oatpp::web::mime::multipart::AsyncStreamPartReader::onPartDataAsync()]: Error. Part size exceeds specified maxDataSize");
}
return tagObject->outputStream->writeExactSizeDataAsync(data, size);

View File

@ -107,7 +107,7 @@ v_io_size MultipartBody::read(void *buffer, v_buff_size count, async::Action& ac
}
} else if(action.isNone()) {
OATPP_LOGE("[oatpp::web::protocol::http::outgoing::MultipartBody::MultipartReadCallback::read()]", "Error. Invalid read result %d. State=%d", res, m_state);
OATPP_LOGE("[oatpp::web::protocol::http::outgoing::MultipartBody::MultipartReadCallback::read()]", "Error. Invalid read result %ld. State=%d", res, m_state);
return 0;
}

View File

@ -75,9 +75,9 @@ void runTests() {
oatpp::base::Environment::printCompilationConfig();
OATPP_LOGD("Tests", "coroutine handle size=%d", sizeof(oatpp::async::CoroutineHandle));
OATPP_LOGD("Tests", "coroutine size=%d", sizeof(oatpp::async::AbstractCoroutine));
OATPP_LOGD("Tests", "action size=%d", sizeof(oatpp::async::Action));
OATPP_LOGD("Tests", "coroutine handle size=%lu", sizeof(oatpp::async::CoroutineHandle));
OATPP_LOGD("Tests", "coroutine size=%lu", sizeof(oatpp::async::AbstractCoroutine));
OATPP_LOGD("Tests", "action size=%lu", sizeof(oatpp::async::Action));
OATPP_LOGD("Tests", "class count=%d", oatpp::data::mapping::type::ClassId::getClassCount());
auto names = oatpp::data::mapping::type::ClassId::getRegisteredClassNames();

View File

@ -141,7 +141,7 @@ bool checkSymbol(char symbol, const char* data, v_buff_size size) {
for (v_buff_size j = 0; j < NUM_SYMBOLS; j++) {
if (data[i + j] != symbol) {
OATPP_LOGD("aaa", "j pos=%d", j);
OATPP_LOGD("aaa", "j pos=%ld", j);
return false;
}

View File

@ -33,7 +33,7 @@ namespace {
template<class T>
void checkHash(const T& val) {
auto h = std::hash<T>{}(val);
OATPP_LOGI("HASH", "type='%s', hash=%llu", val.getValueType()->classId.name, h);
OATPP_LOGI("HASH", "type='%s', hash=%lu", val.getValueType()->classId.name, h);
}
}
@ -254,4 +254,4 @@ void PrimitiveTest::onRun() {
}
}}}}}}
}}}}}}

View File

@ -189,11 +189,11 @@ void PoolTest::onRun() {
std::this_thread::sleep_for(std::chrono::milliseconds (200));
OATPP_LOGD(TAG, "1) pool->getCounter() == %d", pool->getCounter());
OATPP_LOGD(TAG, "1) pool->getCounter() == %ld", pool->getCounter());
OATPP_ASSERT(pool->getCounter() == 10);
OATPP_LOGD(TAG, "Waiting...");
std::this_thread::sleep_for(std::chrono::seconds(10));
OATPP_LOGD(TAG, "Pool counter=%d", pool->getCounter());
OATPP_LOGD(TAG, "Pool counter=%ld", pool->getCounter());
OATPP_ASSERT(pool->getCounter() == 0);
OATPP_LOGD(TAG, "Run 2");
@ -204,11 +204,11 @@ void PoolTest::onRun() {
std::this_thread::sleep_for(std::chrono::milliseconds (200));
OATPP_LOGD(TAG, "2) pool->getCounter() == %d", pool->getCounter());
OATPP_LOGD(TAG, "2) pool->getCounter() == %ld", pool->getCounter());
OATPP_ASSERT(pool->getCounter() == 10);
OATPP_LOGD(TAG, "Waiting...");
std::this_thread::sleep_for(std::chrono::seconds(10));
OATPP_LOGD(TAG, "Pool counter=%d", pool->getCounter());
OATPP_LOGD(TAG, "Pool counter=%ld", pool->getCounter());
OATPP_ASSERT(pool->getCounter() == 0);
for(std::thread& thread : threads) {
@ -217,7 +217,7 @@ void PoolTest::onRun() {
executor.waitTasksFinished();
OATPP_LOGD(TAG, "counter=%d", provider->getIdCounter());
OATPP_LOGD(TAG, "counter=%ld", provider->getIdCounter());
OATPP_ASSERT(provider->getIdCounter() == 20);
pool->stop();

View File

@ -61,7 +61,7 @@ void UrlTest::onRun(){
oatpp::encoding::Url::Config config;
config.spaceToPlus = false;
auto encoded = oatpp::encoding::Url::encode(" ", config);
OATPP_ASSERT(encoded == "%20");
OATPP_ASSERT_FMT("%s", encoded == "%20");
}
{
@ -75,14 +75,14 @@ void UrlTest::onRun(){
oatpp::encoding::Url::Config config;
config.spaceToPlus = false;
auto encoded = oatpp::encoding::Url::encode("Смачна Овсяночка!", config);
OATPP_ASSERT(encoded == "%D0%A1%D0%BC%D0%B0%D1%87%D0%BD%D0%B0%20%D0%9E%D0%B2%D1%81%D1%8F%D0%BD%D0%BE%D1%87%D0%BA%D0%B0%21");
OATPP_ASSERT_FMT("%s", encoded == "%D0%A1%D0%BC%D0%B0%D1%87%D0%BD%D0%B0%20%D0%9E%D0%B2%D1%81%D1%8F%D0%BD%D0%BE%D1%87%D0%BA%D0%B0%21");
}
{
oatpp::encoding::Url::Config config;
config.spaceToPlus = true;
auto encoded = oatpp::encoding::Url::encode("Смачна Овсяночка!", config);
OATPP_ASSERT(encoded == "%D0%A1%D0%BC%D0%B0%D1%87%D0%BD%D0%B0+%D0%9E%D0%B2%D1%81%D1%8F%D0%BD%D0%BE%D1%87%D0%BA%D0%B0%21");
OATPP_ASSERT_FMT("%s", encoded == "%D0%A1%D0%BC%D0%B0%D1%87%D0%BD%D0%B0+%D0%9E%D0%B2%D1%81%D1%8F%D0%BD%D0%BE%D1%87%D0%BA%D0%B0%21");
}
}

View File

@ -202,7 +202,7 @@ void ConnectionPoolTest::onRun() {
executor.waitTasksFinished();
OATPP_LOGD(TAG, "connections_counter=%d", connectionProvider->counter.load());
OATPP_LOGD(TAG, "connections_counter=%ld", connectionProvider->counter.load());
OATPP_ASSERT(connectionProvider->counter <= 10);
pool->stop();

View File

@ -150,7 +150,7 @@ void runClient() {
auto data = response->readBodyToString();
OATPP_ASSERT(data)
OATPP_LOGD("TEST", "data->size() == %d", data->size())
OATPP_LOGD("TEST", "data->size() == %lu", data->size())
OATPP_ASSERT(data->size() < 110) // it should be less than 100. But we put 110 for redundancy
}
@ -190,7 +190,7 @@ void runAsyncClient() {
Action onBody(const oatpp::String& data) {
OATPP_ASSERT(data)
OATPP_LOGD("TEST", "data->size() == %d", data->size())
OATPP_LOGD("TEST", "data->size() == %lu", data->size())
OATPP_ASSERT(data->size() < 60) // it should be less than 50. But we put 60 for redundancy
m_monitor->stop();
return finish();

View File

@ -66,7 +66,7 @@ namespace {
}
}
}
OATPP_LOGV("WriterTask", "sent %d bytes", m_transferedBytes);
OATPP_LOGV("WriterTask", "sent %ld bytes", m_transferedBytes);
}
};
@ -94,7 +94,7 @@ namespace {
m_buffer->writeSimple(readBuffer, res);
}
}
OATPP_LOGV("ReaderTask", "sent %d bytes", m_buffer->getCurrentPosition());
OATPP_LOGV("ReaderTask", "sent %ld bytes", m_buffer->getCurrentPosition());
}
};

View File

@ -170,7 +170,7 @@ void ClientRetryTest::onRun() {
auto response = client->getRoot();
auto ticks = checker.getElapsedTicks();
OATPP_LOGD(TAG, "ticks=%d", ticks);
OATPP_LOGD(TAG, "ticks=%ld", ticks);
if(m_port == 0) {
@ -255,7 +255,7 @@ void ClientRetryTest::onRun() {
counter ++;
if(counter % 1000 == 0) {
OATPP_LOGD("client", "requests=%d", counter);
OATPP_LOGD("client", "requests=%ld", counter);
}
}

View File

@ -155,7 +155,7 @@ public:
Action handleError(Error* error) override {
if(error->is<oatpp::AsyncIOError>()) {
auto e = static_cast<oatpp::AsyncIOError*>(error);
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_getRootAsync::handleError()]", "AsyncIOError. %s, %d", e->what(), e->getCode());
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_getRootAsync::handleError()]", "AsyncIOError. %s, %ld", e->what(), e->getCode());
} else {
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_getRootAsync::handleError()]", "Error. %s", error->what());
}
@ -197,7 +197,7 @@ public:
Action handleError(Error* error) override {
if(error->is<oatpp::AsyncIOError>()) {
auto e = static_cast<oatpp::AsyncIOError*>(error);
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_postBodyAsync::handleError()]", "AsyncIOError. %s, %d", e->what(), e->getCode());
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_postBodyAsync::handleError()]", "AsyncIOError. %s, %ld", e->what(), e->getCode());
} else {
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_postBodyAsync::handleError()]", "Error. %s", error->what());
}
@ -244,7 +244,7 @@ public:
if(error) {
if(error->is<oatpp::AsyncIOError>()) {
auto e = static_cast<oatpp::AsyncIOError*>(error);
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_echoBodyAsync::handleError()]", "AsyncIOError. %s, %d", e->what(), e->getCode());
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_echoBodyAsync::handleError()]", "AsyncIOError. %s, %ld", e->what(), e->getCode());
} else {
OATPP_LOGE("[FullAsyncClientTest::ClientCoroutine_echoBodyAsync::handleError()]", "Error. %s", error->what());
}

View File

@ -281,7 +281,7 @@ void FullAsyncTest::onRun() {
if((i + 1) % iterationsStep == 0) {
auto ticks = oatpp::base::Environment::getMicroTickCount() - lastTick;
lastTick = oatpp::base::Environment::getMicroTickCount();
OATPP_LOGV("i", "%d, tick=%d", i + 1, ticks);
OATPP_LOGV("i", "%d, tick=%ld", i + 1, ticks);
}
}

View File

@ -505,7 +505,7 @@ void FullTest::onRun() {
if((i + 1) % iterationsStep == 0) {
auto ticks = oatpp::base::Environment::getMicroTickCount() - lastTick;
lastTick = oatpp::base::Environment::getMicroTickCount();
OATPP_LOGV("i", "%d, tick=%d", i + 1, ticks);
OATPP_LOGV("i", "%d, tick=%ld", i + 1, ticks);
}
{ // test bundle

View File

@ -145,7 +145,7 @@ void PipelineTest::onRun() {
}
auto dataToSend = pipelineStream.toString();
OATPP_LOGD(TAG, "Sending %d bytes", dataToSend->size());
OATPP_LOGD(TAG, "Sending %lu bytes", dataToSend->size());
oatpp::data::stream::BufferInputStream inputStream(dataToSend);
@ -163,7 +163,7 @@ void PipelineTest::onRun() {
v_io_size transferSize = sample->size() * m_pipelineSize;
OATPP_LOGD(TAG, "want to Receive %d bytes", transferSize);
OATPP_LOGD(TAG, "want to Receive %ld bytes", transferSize);
oatpp::data::stream::transfer(connection.object.get(), &receiveStream, transferSize, ioBuffer.getData(), ioBuffer.getSize());
auto result = receiveStream.toString();

View File

@ -273,7 +273,7 @@ public:
request->transferBody(&multipartReader);
/* Print number of uploaded parts */
OATPP_LOGD("Multipart", "parts_count=%d", multipart->count());
OATPP_LOGD("Multipart", "parts_count=%ld", multipart->count());
/* Print value of "part1" */
auto part1 = multipart->getNamedPart("part1");

View File

@ -259,7 +259,7 @@ public:
Action onUploaded() {
/* Print number of uploaded parts */
OATPP_LOGD("Multipart", "parts_count=%d", m_multipart->count());
OATPP_LOGD("Multipart", "parts_count=%ld", m_multipart->count());
/* Get multipart by name */
auto part1 = m_multipart->getNamedPart("part1");

View File

@ -166,7 +166,7 @@ void runClient(const std::shared_ptr<oatpp::network::ClientConnectionProvider>&
auto data = response->readBodyToString();
OATPP_ASSERT(data)
OATPP_LOGD("TEST", "data->size() == %d", data->size())
OATPP_LOGD("TEST", "data->size() == %lu", data->size())
}